Created
December 20, 2025 05:37
-
-
Save GOROman/b31e9c85f2f4d9ca3cdc1bbc5982ae29 to your computer and use it in GitHub Desktop.
Qwen-Image-Layered
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import torch | |
| import torch.nn.functional as F | |
| _orig_sdp = F.scaled_dot_product_attention | |
| def _safe_sdp(*args, **kwargs): | |
| # diffusers 側が query/key/value で渡してくるケースに対応 | |
| if len(args) >= 3: | |
| q, k, v = args[:3] | |
| rest_args = args[3:] | |
| else: | |
| # どっちの名前でも拾う | |
| q = kwargs.pop("q", kwargs.pop("query", None)) | |
| k = kwargs.pop("k", kwargs.pop("key", None)) | |
| v = kwargs.pop("v", kwargs.pop("value", None)) | |
| if q is None or k is None or v is None: | |
| raise TypeError("_safe_sdp: missing q/k/v (or query/key/value)") | |
| rest_args = () | |
| try: | |
| return _orig_sdp(q, k, v, *rest_args, **kwargs) | |
| except TypeError as e: | |
| # enable_gqa が未対応の torch でも落ちないように保険 | |
| if "enable_gqa" in kwargs: | |
| kwargs.pop("enable_gqa", None) | |
| return _orig_sdp(q, k, v, *rest_args, **kwargs) | |
| raise | |
| F.scaled_dot_product_attention = _safe_sdp | |
| from diffusers import QwenImageLayeredPipeline | |
| import torch | |
| from PIL import Image | |
| pipeline = QwenImageLayeredPipeline.from_pretrained("Qwen/Qwen-Image-Layered") | |
| pipeline = pipeline.to("cuda", torch.bfloat16) | |
| pipeline.set_progress_bar_config(disable=None) | |
| image = Image.open("test.png").convert("RGBA") | |
| inputs = { | |
| "image": image, | |
| "generator": torch.Generator(device='cuda').manual_seed(777), | |
| "true_cfg_scale": 4.0, | |
| "negative_prompt": " ", | |
| "num_inference_steps": 50, | |
| "num_images_per_prompt": 1, | |
| "layers": 4, | |
| "resolution": 640, # Using different bucket (640, 1024) to determine the resolution. For this version, 640 is recommended | |
| "cfg_normalize": True, # Whether enable cfg normalization. | |
| "use_en_prompt": True, # Automatic caption language if user does not provide caption | |
| } | |
| with torch.inference_mode(): | |
| output = pipeline(**inputs) | |
| output_image = output.images[0] | |
| for i, image in enumerate(output_image): | |
| image.save(f"{i}.png") |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment