Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
6c6cade
migrate lora pipeline tests to pytest
sayakpaul Oct 3, 2025
9e92f6b
up
sayakpaul Oct 3, 2025
d61bb38
up
sayakpaul Oct 3, 2025
7b4bcce
up
sayakpaul Oct 3, 2025
ec866f5
tempfile is now a fixture.
sayakpaul Oct 3, 2025
949cc1c
up
sayakpaul Oct 3, 2025
cba8259
up
sayakpaul Oct 3, 2025
610842a
up
sayakpaul Oct 3, 2025
565d674
change flux lora integration tests to use pytest
sayakpaul Oct 3, 2025
1737b71
up
sayakpaul Oct 3, 2025
c4bcf72
up
sayakpaul Oct 3, 2025
dae161e
up
sayakpaul Oct 3, 2025
bdc9537
more fixtures.
sayakpaul Oct 3, 2025
128535c
up
sayakpaul Oct 3, 2025
f8f2789
up
sayakpaul Oct 3, 2025
23e5559
Merge branch 'main' into migrate-lora-pytest
sayakpaul Oct 3, 2025
4f5e9a6
up
sayakpaul Oct 3, 2025
0d3da48
up
sayakpaul Oct 3, 2025
4ae5772
Merge branch 'main' into migrate-lora-pytest
sayakpaul Oct 17, 2025
4561c06
Merge branch 'main' into migrate-lora-pytest
sayakpaul Oct 17, 2025
757bbf7
Merge branch 'main' into migrate-lora-pytest
sayakpaul Oct 24, 2025
2e42205
Merge branch 'main' into migrate-lora-pytest
sayakpaul Nov 6, 2025
eece712
up
sayakpaul Nov 6, 2025
9201505
Merge branch 'main' into migrate-lora-pytest
sayakpaul Nov 6, 2025
11b80d0
Merge branch 'main' into migrate-lora-pytest
sayakpaul Nov 10, 2025
9c3bed1
up
sayakpaul Nov 20, 2025
3fb66f2
Merge branch 'main' into migrate-lora-pytest
sayakpaul Nov 20, 2025
1b6cdea
Merge branch 'main' into migrate-lora-pytest
sayakpaul Dec 3, 2025
f3593a8
up
sayakpaul Dec 3, 2025
f956ba0
resolve conflicts.
sayakpaul Dec 4, 2025
db25095
Merge branch 'main' into migrate-lora-pytest
sayakpaul Dec 10, 2025
0e49bcb
Merge branch 'main' into migrate-lora-pytest
sayakpaul Dec 15, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 11 additions & 15 deletions tests/lora/test_lora_layers_auraflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,16 +13,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest

import pytest
import torch
from transformers import AutoTokenizer, UMT5EncoderModel

from diffusers import (
AuraFlowPipeline,
AuraFlowTransformer2DModel,
FlowMatchEulerDiscreteScheduler,
)
from diffusers import AuraFlowPipeline, AuraFlowTransformer2DModel, FlowMatchEulerDiscreteScheduler

from ..testing_utils import (
floats_tensor,
Expand All @@ -40,7 +36,7 @@


@require_peft_backend
class AuraFlowLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
class TestAuraFlowLoRA(PeftLoraLoaderMixinTests):
pipeline_class = AuraFlowPipeline
scheduler_cls = FlowMatchEulerDiscreteScheduler
scheduler_kwargs = {}
Expand Down Expand Up @@ -103,34 +99,34 @@ def get_dummy_inputs(self, with_generator=True):

return noise, input_ids, pipeline_inputs

@unittest.skip("Not supported in AuraFlow.")
@pytest.mark.skip("Not supported in AuraFlow.")
def test_simple_inference_with_text_denoiser_block_scale(self):
pass

@unittest.skip("Not supported in AuraFlow.")
@pytest.mark.skip("Not supported in AuraFlow.")
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
pass

@unittest.skip("Not supported in AuraFlow.")
@pytest.mark.skip("Not supported in AuraFlow.")
def test_modify_padding_mode(self):
pass

@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
@pytest.mark.skip("Text encoder LoRA is not supported in AuraFlow.")
def test_simple_inference_with_partial_text_lora(self):
pass

@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
@pytest.mark.skip("Text encoder LoRA is not supported in AuraFlow.")
def test_simple_inference_with_text_lora(self):
pass

@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
@pytest.mark.skip("Text encoder LoRA is not supported in AuraFlow.")
def test_simple_inference_with_text_lora_and_scale(self):
pass

@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
@pytest.mark.skip("Text encoder LoRA is not supported in AuraFlow.")
def test_simple_inference_with_text_lora_fused(self):
pass

@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
@pytest.mark.skip("Text encoder LoRA is not supported in AuraFlow.")
def test_simple_inference_with_text_lora_save_load(self):
pass
46 changes: 25 additions & 21 deletions tests/lora/test_lora_layers_cogvideox.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,9 @@
# limitations under the License.

import sys
import unittest

import pytest
import torch
from parameterized import parameterized
from transformers import AutoTokenizer, T5EncoderModel

from diffusers import (
Expand All @@ -39,7 +38,7 @@


@require_peft_backend
class CogVideoXLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
class TestCogVideoXLoRA(PeftLoraLoaderMixinTests):
pipeline_class = CogVideoXPipeline
scheduler_cls = CogVideoXDPMScheduler
scheduler_kwargs = {"timestep_spacing": "trailing"}
Expand Down Expand Up @@ -119,54 +118,59 @@ def get_dummy_inputs(self, with_generator=True):

return noise, input_ids, pipeline_inputs

def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
def test_simple_inference_with_text_lora_denoiser_fused_multi(self, pipe):
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3, pipe=pipe)

def test_simple_inference_with_text_denoiser_lora_unfused(self):
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
def test_simple_inference_with_text_denoiser_lora_unfused(self, pipe):
super().test_simple_inference_with_text_denoiser_lora_unfused(pipe=pipe, expected_atol=9e-3)

def test_lora_scale_kwargs_match_fusion(self):
super().test_lora_scale_kwargs_match_fusion(expected_atol=9e-3, expected_rtol=9e-3)
def test_lora_scale_kwargs_match_fusion(self, base_pipe_output):
super().test_lora_scale_kwargs_match_fusion(
base_pipe_output=base_pipe_output, expected_atol=9e-3, expected_rtol=9e-3
)

@parameterized.expand([("block_level", True), ("leaf_level", False)])
@pytest.mark.parametrize(
"offload_type, use_stream",
[("block_level", True), ("leaf_level", False)],
)
@require_torch_accelerator
def test_group_offloading_inference_denoiser(self, offload_type, use_stream):
def test_group_offloading_inference_denoiser(self, offload_type, use_stream, tmpdirname, pipe):
# TODO: We don't run the (leaf_level, True) test here that is enabled for other models.
# The reason for this can be found here: https://github.com/huggingface/diffusers/pull/11804#issuecomment-3013325338
super()._test_group_offloading_inference_denoiser(offload_type, use_stream)
super()._test_group_offloading_inference_denoiser(offload_type, use_stream, tmpdirname, pipe)

@unittest.skip("Not supported in CogVideoX.")
@pytest.mark.skip("Not supported in CogVideoX.")
def test_simple_inference_with_text_denoiser_block_scale(self):
pass

@unittest.skip("Not supported in CogVideoX.")
@pytest.mark.skip("Not supported in CogVideoX.")
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
pass

@unittest.skip("Not supported in CogVideoX.")
@pytest.mark.skip("Not supported in CogVideoX.")
def test_modify_padding_mode(self):
pass

@unittest.skip("Text encoder LoRA is not supported in CogVideoX.")
@pytest.mark.skip("Text encoder LoRA is not supported in CogVideoX.")
def test_simple_inference_with_partial_text_lora(self):
pass

@unittest.skip("Text encoder LoRA is not supported in CogVideoX.")
@pytest.mark.skip("Text encoder LoRA is not supported in CogVideoX.")
def test_simple_inference_with_text_lora(self):
pass

@unittest.skip("Text encoder LoRA is not supported in CogVideoX.")
@pytest.mark.skip("Text encoder LoRA is not supported in CogVideoX.")
def test_simple_inference_with_text_lora_and_scale(self):
pass

@unittest.skip("Text encoder LoRA is not supported in CogVideoX.")
@pytest.mark.skip("Text encoder LoRA is not supported in CogVideoX.")
def test_simple_inference_with_text_lora_fused(self):
pass

@unittest.skip("Text encoder LoRA is not supported in CogVideoX.")
@pytest.mark.skip("Text encoder LoRA is not supported in CogVideoX.")
def test_simple_inference_with_text_lora_save_load(self):
pass

@unittest.skip("Not supported in CogVideoX.")
@pytest.mark.skip("Not supported in CogVideoX.")
def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self):
pass
66 changes: 20 additions & 46 deletions tests/lora/test_lora_layers_cogview4.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,9 @@
# limitations under the License.

import sys
import tempfile
import unittest

import numpy as np
import pytest
import torch
from parameterized import parameterized
from transformers import AutoTokenizer, GlmModel

from diffusers import AutoencoderKL, CogView4Pipeline, CogView4Transformer2DModel, FlowMatchEulerDiscreteScheduler
Expand All @@ -28,7 +25,6 @@
require_peft_backend,
require_torch_accelerator,
skip_mps,
torch_device,
)


Expand All @@ -47,7 +43,7 @@ def from_pretrained(*args, **kwargs):

@require_peft_backend
@skip_mps
class CogView4LoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
class TestCogView4LoRA(PeftLoraLoaderMixinTests):
pipeline_class = CogView4Pipeline
scheduler_cls = FlowMatchEulerDiscreteScheduler
scheduler_kwargs = {}
Expand Down Expand Up @@ -113,72 +109,50 @@ def get_dummy_inputs(self, with_generator=True):

return noise, input_ids, pipeline_inputs

def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
def test_simple_inference_with_text_lora_denoiser_fused_multi(self, pipe):
super().test_simple_inference_with_text_lora_denoiser_fused_multi(pipe=pipe, expected_atol=9e-3)

def test_simple_inference_with_text_denoiser_lora_unfused(self):
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
def test_simple_inference_with_text_denoiser_lora_unfused(self, pipe):
super().test_simple_inference_with_text_denoiser_lora_unfused(pipe=pipe, expected_atol=9e-3)

def test_simple_inference_save_pretrained(self):
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's not testing any LoRA-related stuff. So, let's remove.

"""
Tests a simple usecase where users could use saving utilities for LoRA through save_pretrained
"""
components, _, _ = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
_, _, inputs = self.get_dummy_inputs(with_generator=False)

images_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]

with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(tmpdirname)

pipe_from_pretrained = self.pipeline_class.from_pretrained(tmpdirname)
pipe_from_pretrained.to(torch_device)

images_lora_save_pretrained = pipe_from_pretrained(**inputs, generator=torch.manual_seed(0))[0]

self.assertTrue(
np.allclose(images_lora, images_lora_save_pretrained, atol=1e-3, rtol=1e-3),
"Loading from saved checkpoints should give same results.",
)

@parameterized.expand([("block_level", True), ("leaf_level", False)])
@pytest.mark.parametrize(
"offload_type, use_stream",
[("block_level", True), ("leaf_level", False)],
)
@require_torch_accelerator
def test_group_offloading_inference_denoiser(self, offload_type, use_stream):
def test_group_offloading_inference_denoiser(self, offload_type, use_stream, tmpdirname, pipe):
# TODO: We don't run the (leaf_level, True) test here that is enabled for other models.
# The reason for this can be found here: https://github.com/huggingface/diffusers/pull/11804#issuecomment-3013325338
super()._test_group_offloading_inference_denoiser(offload_type, use_stream)
super()._test_group_offloading_inference_denoiser(offload_type, use_stream, tmpdirname, pipe)

@unittest.skip("Not supported in CogView4.")
@pytest.mark.skip("Not supported in CogView4.")
def test_simple_inference_with_text_denoiser_block_scale(self):
pass

@unittest.skip("Not supported in CogView4.")
@pytest.mark.skip("Not supported in CogView4.")
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
pass

@unittest.skip("Not supported in CogView4.")
@pytest.mark.skip("Not supported in CogView4.")
def test_modify_padding_mode(self):
pass

@unittest.skip("Text encoder LoRA is not supported in CogView4.")
@pytest.mark.skip("Text encoder LoRA is not supported in CogView4.")
def test_simple_inference_with_partial_text_lora(self):
pass

@unittest.skip("Text encoder LoRA is not supported in CogView4.")
@pytest.mark.skip("Text encoder LoRA is not supported in CogView4.")
def test_simple_inference_with_text_lora(self):
pass

@unittest.skip("Text encoder LoRA is not supported in CogView4.")
@pytest.mark.skip("Text encoder LoRA is not supported in CogView4.")
def test_simple_inference_with_text_lora_and_scale(self):
pass

@unittest.skip("Text encoder LoRA is not supported in CogView4.")
@pytest.mark.skip("Text encoder LoRA is not supported in CogView4.")
def test_simple_inference_with_text_lora_fused(self):
pass

@unittest.skip("Text encoder LoRA is not supported in CogView4.")
@pytest.mark.skip("Text encoder LoRA is not supported in CogView4.")
def test_simple_inference_with_text_lora_save_load(self):
pass
Loading
Loading