HAL1993 commited on
Commit
fde586d
·
verified ·
1 Parent(s): 1dda342

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +280 -138
app.py CHANGED
@@ -1,14 +1,20 @@
 
 
 
1
  import os
2
- import gradio as gr
3
  import numpy as np
4
- import spaces
5
  import torch
6
- import random
7
- from PIL import Image
 
8
  from typing import Iterable
 
 
9
  from gradio.themes import Soft
10
  from gradio.themes.utils import colors, fonts, sizes
11
 
 
12
  colors.steel_blue = colors.Color(
13
  name="steel_blue",
14
  c50="#EBF3F8",
@@ -24,6 +30,7 @@ colors.steel_blue = colors.Color(
24
  c950="#1E3450",
25
  )
26
 
 
27
  class SteelBlueTheme(Soft):
28
  def __init__(
29
  self,
@@ -33,10 +40,14 @@ class SteelBlueTheme(Soft):
33
  neutral_hue: colors.Color | str = colors.slate,
34
  text_size: sizes.Size | str = sizes.text_lg,
35
  font: fonts.Font | str | Iterable[fonts.Font | str] = (
36
- fonts.GoogleFont("Outfit"), "Arial", "sans-serif",
 
 
37
  ),
38
  font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (
39
- fonts.GoogleFont("IBM Plex Mono"), "ui-monospace", "monospace",
 
 
40
  ),
41
  ):
42
  super().__init__(
@@ -75,88 +86,153 @@ class SteelBlueTheme(Soft):
75
  block_label_background_fill="*primary_200",
76
  )
77
 
 
78
  steel_blue_theme = SteelBlueTheme()
79
 
 
 
 
80
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
81
-
82
- print("CUDA_VISIBLE_DEVICES=", os.environ.get("CUDA_VISIBLE_DEVICES"))
83
- print("torch.__version__ =", torch.__version__)
84
- print("torch.version.cuda =", torch.version.cuda)
85
- print("cuda available:", torch.cuda.is_available())
86
- print("cuda device count:", torch.cuda.device_count())
87
  if torch.cuda.is_available():
88
- print("current device:", torch.cuda.current_device())
89
- print("device name:", torch.cuda.get_device_name(torch.cuda.current_device()))
90
-
91
  print("Using device:", device)
92
 
 
 
 
93
  from diffusers import FlowMatchEulerDiscreteScheduler
94
  from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
95
  from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
96
  from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
97
 
98
  dtype = torch.bfloat16
99
- device = "cuda" if torch.cuda.is_available() else "cpu"
100
 
101
  pipe = QwenImageEditPlusPipeline.from_pretrained(
102
  "Qwen/Qwen-Image-Edit-2509",
103
  transformer=QwenImageTransformer2DModel.from_pretrained(
104
- "linoyts/Qwen-Image-Edit-Rapid-AIO", # [transformer weights extracted from: Phr00t/Qwen-Image-Edit-Rapid-AIO]
105
- subfolder='transformer',
106
  torch_dtype=dtype,
107
- device_map='cuda'
108
  ),
109
- torch_dtype=dtype
110
  ).to(device)
111
 
112
- pipe.load_lora_weights("autoweeb/Qwen-Image-Edit-2509-Photo-to-Anime",
113
- weight_name="Qwen-Image-Edit-2509-Photo-to-Anime_000001000.safetensors",
114
- adapter_name="anime")
115
- pipe.load_lora_weights("dx8152/Qwen-Edit-2509-Multiple-angles",
116
- weight_name="镜头转换.safetensors",
117
- adapter_name="multiple-angles")
118
- pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Light_restoration",
119
- weight_name="移除光影.safetensors",
120
- adapter_name="light-restoration")
121
- pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Relight",
122
- weight_name="Qwen-Edit-Relight.safetensors",
123
- adapter_name="relight")
124
- pipe.load_lora_weights("dx8152/Qwen-Edit-2509-Multi-Angle-Lighting",
125
- weight_name="多角度灯光-251116.safetensors",
126
- adapter_name="multi-angle-lighting")
127
- pipe.load_lora_weights("tlennon-ie/qwen-edit-skin",
128
- weight_name="qwen-edit-skin_1.1_000002750.safetensors",
129
- adapter_name="edit-skin")
130
- pipe.load_lora_weights("lovis93/next-scene-qwen-image-lora-2509",
131
- weight_name="next-scene_lora-v2-3000.safetensors",
132
- adapter_name="next-scene")
133
- pipe.load_lora_weights("vafipas663/Qwen-Edit-2509-Upscale-LoRA",
134
- weight_name="qwen-edit-enhance_64-v3_000001000.safetensors",
135
- adapter_name="upscale-image")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
 
137
  pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
138
- MAX_SEED = np.iinfo(np.int32).max
139
 
140
- def update_dimensions_on_upload(image):
141
- if image is None:
142
- return 1024, 1024
143
-
144
- original_width, original_height = image.size
145
-
146
- if original_width > original_height:
147
- new_width = 1024
148
- aspect_ratio = original_height / original_width
149
- new_height = int(new_width * aspect_ratio)
150
- else:
151
- new_height = 1024
152
- aspect_ratio = original_width / original_height
153
- new_width = int(new_height * aspect_ratio)
154
-
155
- # Ensure dimensions are multiples of 8
156
- new_width = (new_width // 8) * 8
157
- new_height = (new_height // 8) * 8
158
-
159
- return new_width, new_height
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
 
161
  @spaces.GPU(duration=30)
162
  def infer(
@@ -167,131 +243,197 @@ def infer(
167
  randomize_seed,
168
  guidance_scale,
169
  steps,
170
- progress=gr.Progress(track_tqdm=True)
171
  ):
 
172
  if input_image is None:
173
  raise gr.Error("Please upload an image to edit.")
174
 
175
- if lora_adapter == "Photo-to-Anime":
176
- pipe.set_adapters(["anime"], adapter_weights=[1.0])
177
- elif lora_adapter == "Multiple-Angles":
178
- pipe.set_adapters(["multiple-angles"], adapter_weights=[1.0])
179
- elif lora_adapter == "Light-Restoration":
180
- pipe.set_adapters(["light-restoration"], adapter_weights=[1.0])
181
- elif lora_adapter == "Relight":
182
- pipe.set_adapters(["relight"], adapter_weights=[1.0])
183
- elif lora_adapter == "Multi-Angle-Lighting":
184
- pipe.set_adapters(["multi-angle-lighting"], adapter_weights=[1.0])
185
- elif lora_adapter == "Edit-Skin":
186
- pipe.set_adapters(["edit-skin"], adapter_weights=[1.0])
187
- elif lora_adapter == "Next-Scene":
188
- pipe.set_adapters(["next-scene"], adapter_weights=[1.0])
189
- elif lora_adapter == "Upscale-Image":
190
- pipe.set_adapters(["upscale-image"], adapter_weights=[1.0])
191
-
192
  if randomize_seed:
193
  seed = random.randint(0, MAX_SEED)
194
-
195
  generator = torch.Generator(device=device).manual_seed(seed)
196
- negative_prompt = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry"
197
 
198
- original_image = input_image.convert("RGB")
199
-
200
- # Use the new function to update dimensions
201
- width, height = update_dimensions_on_upload(original_image)
 
 
 
 
 
 
202
 
 
203
  result = pipe(
204
- image=original_image,
205
  prompt=prompt,
206
  negative_prompt=negative_prompt,
207
- height=height,
208
- width=width,
209
  num_inference_steps=steps,
210
  generator=generator,
211
  true_cfg_scale=guidance_scale,
212
  ).images[0]
213
 
214
- return result, seed
 
 
 
 
215
 
 
 
 
216
  @spaces.GPU(duration=30)
217
  def infer_example(input_image, prompt, lora_adapter):
218
- input_pil = input_image.convert("RGB")
219
- guidance_scale = 1.0
220
- steps = 4
221
- result, seed = infer(input_pil, prompt, lora_adapter, 0, True, guidance_scale, steps)
222
- return result, seed
223
-
224
-
225
- css="""
226
- #col-container {
227
- margin: 0 auto;
228
- max-width: 960px;
229
- }
 
 
 
 
 
230
  #main-title h1 {font-size: 2.1em !important;}
231
  """
232
 
233
  with gr.Blocks() as demo:
234
  with gr.Column(elem_id="col-container"):
235
- gr.Markdown("# **Qwen-Image-Edit-2509-LoRAs-Fast**", elem_id="main-title")
236
- gr.Markdown("Perform diverse image edits using specialized [LoRA](https://huggingface.co/models?other=base_model:adapter:Qwen/Qwen-Image-Edit-2509) adapters for the [Qwen-Image-Edit](https://huggingface.co/Qwen/Qwen-Image-Edit-2509) model.")
 
 
 
 
237
 
238
  with gr.Row(equal_height=True):
 
239
  with gr.Column():
240
- input_image = gr.Image(label="Upload Image", type="pil", height=290)
241
-
242
- prompt = gr.Text(
 
 
 
243
  label="Edit Prompt",
244
- show_label=True,
245
- placeholder="e.g., transform into anime..",
246
  )
247
-
248
  run_button = gr.Button("Edit Image", variant="primary")
249
 
 
250
  with gr.Column():
251
- output_image = gr.Image(label="Output Image", interactive=False, format="png", height=353)
252
-
253
- with gr.Row():
254
- lora_adapter = gr.Dropdown(
255
- label="Choose Editing Style",
256
- choices=["Photo-to-Anime", "Multiple-Angles", "Light-Restoration", "Multi-Angle-Lighting", "Upscale-Image", "Relight", "Next-Scene", "Edit-Skin"],
257
- value="Photo-to-Anime"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
258
  )
259
- with gr.Accordion("Advanced Settings", open=False, visible=False):
260
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
261
- randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
262
- guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0)
263
- steps = gr.Slider(label="Inference Steps", minimum=1, maximum=50, step=1, value=4)
264
-
 
 
 
265
  gr.Examples(
266
  examples=[
267
  ["examples/1.jpg", "Transform into anime.", "Photo-to-Anime"],
268
  ["examples/5.jpg", "Remove shadows and relight the image using soft lighting.", "Light-Restoration"],
269
- ["examples/4.jpg", "Use a subtle golden-hour filter with smooth light diffusion.", "Relight"],
270
  ["examples/2.jpeg", "Rotate the camera 45 degrees to the left.", "Multiple-Angles"],
271
  ["examples/7.jpg", "Light source from the Right Rear", "Multi-Angle-Lighting"],
272
  ["examples/10.jpeg", "Upscale the image.", "Upscale-Image"],
273
  ["examples/7.jpg", "Light source from the Below", "Multi-Angle-Lighting"],
274
- ["examples/2.jpeg", "Switch the camera to a top-down right corner view.", "Multiple-Angles"],
275
  ["examples/9.jpg", "The camera moves slightly forward as sunlight breaks through the clouds, casting a soft glow around the character's silhouette in the mist. Realistic cinematic style, atmospheric depth.", "Next-Scene"],
276
  ["examples/8.jpg", "Make the subjects skin details more prominent and natural.", "Edit-Skin"],
277
- ["examples/6.jpg", "Switch the camera to a bottom-up view.", "Multiple-Angles"],
278
  ["examples/6.jpg", "Rotate the camera 180 degrees upside down.", "Multiple-Angles"],
279
  ["examples/4.jpg", "Rotate the camera 45 degrees to the right.", "Multiple-Angles"],
280
- ["examples/4.jpg", "Switch the camera to a top-down view.", "Multiple-Angles"],
281
- ["examples/4.jpg", "Switch the camera to a wide-angle lens.", "Multiple-Angles"],
282
  ],
283
  inputs=[input_image, prompt, lora_adapter],
284
  outputs=[output_image, seed],
285
  fn=infer_example,
286
  cache_examples=False,
287
- label="Examples"
288
  )
289
 
290
- run_button.click(
291
- fn=infer,
292
- inputs=[input_image, prompt, lora_adapter, seed, randomize_seed, guidance_scale, steps],
293
- outputs=[output_image, seed]
294
- )
 
 
 
 
 
 
 
 
 
295
 
296
  if __name__ == "__main__":
297
- demo.queue(max_size=30).launch(css=css, theme=steel_blue_theme, mcp_server=True, ssr_mode=False, show_error=True)
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------------
2
+ # Qwen‑Image‑Edit‑2509 LoRA Demo – fixed‑aspect‑ratio version
3
+ # --------------------------------------------------------------
4
  import os
5
+ import random
6
  import numpy as np
 
7
  import torch
8
+ import gradio as gr
9
+ import spaces
10
+ from PIL import Image, ImageOps
11
  from typing import Iterable
12
+
13
+ # -------------------- THEME ---------------------------------
14
  from gradio.themes import Soft
15
  from gradio.themes.utils import colors, fonts, sizes
16
 
17
+ # add a custom colour
18
  colors.steel_blue = colors.Color(
19
  name="steel_blue",
20
  c50="#EBF3F8",
 
30
  c950="#1E3450",
31
  )
32
 
33
+
34
  class SteelBlueTheme(Soft):
35
  def __init__(
36
  self,
 
40
  neutral_hue: colors.Color | str = colors.slate,
41
  text_size: sizes.Size | str = sizes.text_lg,
42
  font: fonts.Font | str | Iterable[fonts.Font | str] = (
43
+ fonts.GoogleFont("Outfit"),
44
+ "Arial",
45
+ "sans-serif",
46
  ),
47
  font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (
48
+ fonts.GoogleFont("IBM Plex Mono"),
49
+ "ui-monospace",
50
+ "monospace",
51
  ),
52
  ):
53
  super().__init__(
 
86
  block_label_background_fill="*primary_200",
87
  )
88
 
89
+
90
  steel_blue_theme = SteelBlueTheme()
91
 
92
+ # --------------------------------------------------------------
93
+ # Device & diagnostics
94
+ # --------------------------------------------------------------
95
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
96
+ print("CUDA_VISIBLE_DEVICES =", os.getenv("CUDA_VISIBLE_DEVICES"))
97
+ print("torch.__version__ =", torch.__version__)
98
+ print("torch.version.cuda =", torch.version.cuda)
99
+ print("cuda available? :", torch.cuda.is_available())
100
+ print("cuda device count :", torch.cuda.device_count())
 
101
  if torch.cuda.is_available():
102
+ print("current device :", torch.cuda.current_device())
103
+ print("device name :", torch.cuda.get_device_name(torch.cuda.current_device()))
 
104
  print("Using device:", device)
105
 
106
+ # --------------------------------------------------------------
107
+ # Load the Qwen‑Image‑Edit model + LoRA adapters
108
+ # --------------------------------------------------------------
109
  from diffusers import FlowMatchEulerDiscreteScheduler
110
  from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
111
  from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
112
  from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
113
 
114
  dtype = torch.bfloat16
 
115
 
116
  pipe = QwenImageEditPlusPipeline.from_pretrained(
117
  "Qwen/Qwen-Image-Edit-2509",
118
  transformer=QwenImageTransformer2DModel.from_pretrained(
119
+ "linoyts/Qwen-Image-Edit-Rapid-AIO",
120
+ subfolder="transformer",
121
  torch_dtype=dtype,
122
+ device_map="cuda",
123
  ),
124
+ torch_dtype=dtype,
125
  ).to(device)
126
 
127
+ # ---- LoRA adapters -------------------------------------------------
128
+ pipe.load_lora_weights(
129
+ "autoweeb/Qwen-Image-Edit-2509-Photo-to-Anime",
130
+ weight_name="Qwen-Image-Edit-2509-Photo-to-Anime_000001000.safetensors",
131
+ adapter_name="anime",
132
+ )
133
+ pipe.load_lora_weights(
134
+ "dx8152/Qwen-Edit-2509-Multiple-angles",
135
+ weight_name="镜头转换.safetensors",
136
+ adapter_name="multiple-angles",
137
+ )
138
+ pipe.load_lora_weights(
139
+ "dx8152/Qwen-Image-Edit-2509-Light_restoration",
140
+ weight_name="移除光影.safetensors",
141
+ adapter_name="light-restoration",
142
+ )
143
+ pipe.load_lora_weights(
144
+ "dx8152/Qwen-Image-Edit-2509-Relight",
145
+ weight_name="Qwen-Edit-Relight.safetensors",
146
+ adapter_name="relight",
147
+ )
148
+ pipe.load_lora_weights(
149
+ "dx8152/Qwen-Edit-2509-Multi-Angle-Lighting",
150
+ weight_name="多角度灯光-251116.safetensors",
151
+ adapter_name="multi-angle-lighting",
152
+ )
153
+ pipe.load_lora_weights(
154
+ "tlennon-ie/qwen-edit-skin",
155
+ weight_name="qwen-edit-skin_1.1_000002750.safetensors",
156
+ adapter_name="edit-skin",
157
+ )
158
+ pipe.load_lora_weights(
159
+ "lovis93/next-scene-qwen-image-lora-2509",
160
+ weight_name="next-scene_lora-v2-3000.safetensors",
161
+ adapter_name="next-scene",
162
+ )
163
+ pipe.load_lora_weights(
164
+ "vafipas663/Qwen-Edit-2509-Upscale-LoRA",
165
+ weight_name="qwen-edit-enhance_64-v3_000001000.safetensors",
166
+ adapter_name="upscale-image",
167
+ )
168
 
169
  pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
 
170
 
171
+ # --------------------------------------------------------------
172
+ # Helper keep aspect ratio, pad to a size accepted by the model
173
+ # --------------------------------------------------------------
174
+ MAX_SIDE = 1024 # longest side we allow (model limit)
175
+ DIVISIBLE_BY = 8 # all dimensions must be a multiple of 8
176
+
177
+ def _make_multiple(x: int, base: int = DIVISIBLE_BY) -> int:
178
+ """Round *down* to the nearest multiple of `base`."""
179
+ return (x // base) * base
180
+
181
+ def prepare_image_for_pipe(pil_img: Image.Image):
182
+ """
183
+ 1. Resize the longer side to ``MAX_SIDE`` while preserving aspect‑ratio.
184
+ 2. Pad the resized image (using black pixels) so both dimensions are
185
+ multiples of 8 – exactly what the diffusion model expects.
186
+ 3. Return the padded image *and* the box that later lets us crop the
187
+ generation back to the original aspect‑ratio.
188
+ """
189
+ # ---- 1️⃣ Resize keeping aspect ratio ---------------------------------
190
+ w, h = pil_img.size
191
+ if max(w, h) > MAX_SIDE:
192
+ if w >= h: # wide image
193
+ new_w = MAX_SIDE
194
+ new_h = int(h * MAX_SIDE / w)
195
+ else: # tall image
196
+ new_h = MAX_SIDE
197
+ new_w = int(w * MAX_SIDE / h)
198
+ else: # already small enough
199
+ new_w, new_h = w, h
200
+
201
+ resized = pil_img.resize((new_w, new_h), Image.LANCZOS)
202
+
203
+ # ---- 2️⃣ Pad to multiples of 8 ----------------------------------------
204
+ pad_w = _make_multiple(new_w)
205
+ pad_h = _make_multiple(new_h)
206
+
207
+ # ``ImageOps.pad`` does the padding *and* keeps the image centred.
208
+ padded = ImageOps.pad(resized, (pad_w, pad_h), method=Image.LANCZOS, color=(0, 0, 0))
209
+
210
+ # The region of the original (resized) picture inside the padded canvas.
211
+ left = (pad_w - new_w) // 2
212
+ top = (pad_h - new_h) // 2
213
+ crop_box = (left, top, left + new_w, top + new_h)
214
+
215
+ return padded, crop_box, (new_w, new_h) # padded img, where to cut, size after resize
216
+
217
+
218
+ def crop_back_to_original(gen_img: Image.Image, crop_box, final_size):
219
+ """
220
+ ``gen_img`` is the image produced by the pipeline (same size as the padded
221
+ input). We first crop to the region that corresponds to the *resized*
222
+ original picture and then upscale/downscale back to the exact dimensions
223
+ the user uploaded.
224
+ """
225
+ # 1️⃣ Crop to the region that contains the content
226
+ cropped = gen_img.crop(crop_box)
227
+
228
+ # 2��⃣ Resize back to the *original* dimensions (the size the user gave us)
229
+ return cropped.resize(final_size, Image.LANCZOS)
230
+
231
+
232
+ # --------------------------------------------------------------
233
+ # Inference function (GPU‑bound)
234
+ # --------------------------------------------------------------
235
+ MAX_SEED = np.iinfo(np.int32).max
236
 
237
  @spaces.GPU(duration=30)
238
  def infer(
 
243
  randomize_seed,
244
  guidance_scale,
245
  steps,
246
+ progress=gr.Progress(track_tqdm=True),
247
  ):
248
+ """Run a single edit – returns the edited image *with the same aspect‑ratio*."""
249
  if input_image is None:
250
  raise gr.Error("Please upload an image to edit.")
251
 
252
+ # ---------- LoRA selection ----------
253
+ adapter_map = {
254
+ "Photo-to-Anime": ["anime"],
255
+ "Multiple-Angles": ["multiple-angles"],
256
+ "Light-Restoration": ["light-restoration"],
257
+ "Relight": ["relight"],
258
+ "Multi-Angle-Lighting":["multi-angle-lighting"],
259
+ "Edit-Skin": ["edit-skin"],
260
+ "Next-Scene": ["next-scene"],
261
+ "Upscale-Image": ["upscale-image"],
262
+ }
263
+ pipe.set_adapters(adapter_map.get(lora_adapter, []), adapter_weights=[1.0])
264
+
265
+ # ---------- Seed ----------
 
 
 
266
  if randomize_seed:
267
  seed = random.randint(0, MAX_SEED)
 
268
  generator = torch.Generator(device=device).manual_seed(seed)
 
269
 
270
+ # ---------- Prompt ----------
271
+ negative_prompt = (
272
+ "worst quality, low quality, bad anatomy, bad hands, text, error, "
273
+ "missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, "
274
+ "signature, watermark, username, blurry"
275
+ )
276
+
277
+ # ---------- Image preparation ----------
278
+ original = input_image.convert("RGB")
279
+ padded, crop_box, resized_sz = prepare_image_for_pipe(original)
280
 
281
+ # ---------- Diffusion ----------
282
  result = pipe(
283
+ image=padded,
284
  prompt=prompt,
285
  negative_prompt=negative_prompt,
286
+ height=padded.height,
287
+ width=padded.width,
288
  num_inference_steps=steps,
289
  generator=generator,
290
  true_cfg_scale=guidance_scale,
291
  ).images[0]
292
 
293
+ # ---------- Crop / resize back ----------
294
+ final = crop_back_to_original(result, crop_box, original.size)
295
+
296
+ return final, seed
297
+
298
 
299
+ # --------------------------------------------------------------
300
+ # Example helper (uses the same logic but with deterministic settings)
301
+ # --------------------------------------------------------------
302
  @spaces.GPU(duration=30)
303
  def infer_example(input_image, prompt, lora_adapter):
304
+ """Runs a quick example – 4 steps, guidance 1.0, random seed."""
305
+ return infer(
306
+ input_image,
307
+ prompt,
308
+ lora_adapter,
309
+ seed=0,
310
+ randomize_seed=True,
311
+ guidance_scale=1.0,
312
+ steps=4,
313
+ )
314
+
315
+
316
+ # --------------------------------------------------------------
317
+ # UI
318
+ # --------------------------------------------------------------
319
+ css = """
320
+ #col-container {margin: 0 auto; max-width: 960px;}
321
  #main-title h1 {font-size: 2.1em !important;}
322
  """
323
 
324
  with gr.Blocks() as demo:
325
  with gr.Column(elem_id="col-container"):
326
+ gr.Markdown("# **QwenImageEdit2509 LoRAs – Fixed Aspect Ratio**",
327
+ elem_id="main-title")
328
+ gr.Markdown(
329
+ "Edit images with a variety of LoRA adapters while preserving the "
330
+ "original aspect‑ratio (no unexpected cropping)."
331
+ )
332
 
333
  with gr.Row(equal_height=True):
334
+ # ---------- left column ----------
335
  with gr.Column():
336
+ input_image = gr.Image(
337
+ label="Upload Image",
338
+ type="pil",
339
+ height=290,
340
+ )
341
+ prompt = gr.Textbox(
342
  label="Edit Prompt",
343
+ placeholder="e.g. transform into anime…",
 
344
  )
 
345
  run_button = gr.Button("Edit Image", variant="primary")
346
 
347
+ # ---------- right column ----------
348
  with gr.Column():
349
+ output_image = gr.Image(
350
+ label="Output Image",
351
+ interactive=False,
352
+ format="png",
353
+ height=353,
354
+ )
355
+ lora_adapter = gr.Dropdown(
356
+ label="Choose Editing Style",
357
+ choices=[
358
+ "Photo-to-Anime", "Multiple-Angles", "Light-Restoration",
359
+ "Multi-Angle-Lighting", "Upscale-Image", "Relight",
360
+ "Next-Scene", "Edit-Skin",
361
+ ],
362
+ value="Photo-to-Anime",
363
+ )
364
+ with gr.Accordion("Advanced Settings", open=False):
365
+ seed = gr.Slider(
366
+ label="Seed",
367
+ minimum=0,
368
+ maximum=MAX_SEED,
369
+ step=1,
370
+ value=0,
371
+ )
372
+ randomize_seed = gr.Checkbox(
373
+ label="Randomize Seed",
374
+ value=True,
375
+ )
376
+ guidance_scale = gr.Slider(
377
+ label="Guidance Scale",
378
+ minimum=1.0,
379
+ maximum=10.0,
380
+ step=0.1,
381
+ value=1.0,
382
  )
383
+ steps = gr.Slider(
384
+ label="Inference Steps",
385
+ minimum=1,
386
+ maximum=50,
387
+ step=1,
388
+ value=4,
389
+ )
390
+
391
+ # ---------- examples ----------
392
  gr.Examples(
393
  examples=[
394
  ["examples/1.jpg", "Transform into anime.", "Photo-to-Anime"],
395
  ["examples/5.jpg", "Remove shadows and relight the image using soft lighting.", "Light-Restoration"],
396
+ ["examples/4.jpg", "Use a subtle goldenhour filter with smooth light diffusion.", "Relight"],
397
  ["examples/2.jpeg", "Rotate the camera 45 degrees to the left.", "Multiple-Angles"],
398
  ["examples/7.jpg", "Light source from the Right Rear", "Multi-Angle-Lighting"],
399
  ["examples/10.jpeg", "Upscale the image.", "Upscale-Image"],
400
  ["examples/7.jpg", "Light source from the Below", "Multi-Angle-Lighting"],
401
+ ["examples/2.jpeg", "Switch the camera to a topdown right corner view.", "Multiple-Angles"],
402
  ["examples/9.jpg", "The camera moves slightly forward as sunlight breaks through the clouds, casting a soft glow around the character's silhouette in the mist. Realistic cinematic style, atmospheric depth.", "Next-Scene"],
403
  ["examples/8.jpg", "Make the subjects skin details more prominent and natural.", "Edit-Skin"],
404
+ ["examples/6.jpg", "Switch the camera to a bottomup view.", "Multiple-Angles"],
405
  ["examples/6.jpg", "Rotate the camera 180 degrees upside down.", "Multiple-Angles"],
406
  ["examples/4.jpg", "Rotate the camera 45 degrees to the right.", "Multiple-Angles"],
407
+ ["examples/4.jpg", "Switch the camera to a topdown view.", "Multiple-Angles"],
408
+ ["examples/4.jpg", "Switch the camera to a wideangle lens.", "Multiple-Angles"],
409
  ],
410
  inputs=[input_image, prompt, lora_adapter],
411
  outputs=[output_image, seed],
412
  fn=infer_example,
413
  cache_examples=False,
414
+ label="Examples",
415
  )
416
 
417
+ # ---------- button action ----------
418
+ run_button.click(
419
+ fn=infer,
420
+ inputs=[
421
+ input_image,
422
+ prompt,
423
+ lora_adapter,
424
+ seed,
425
+ randomize_seed,
426
+ guidance_scale,
427
+ steps,
428
+ ],
429
+ outputs=[output_image, seed],
430
+ )
431
 
432
  if __name__ == "__main__":
433
+ demo.queue(max_size=30).launch(
434
+ css=css,
435
+ theme=steel_blue_theme,
436
+ mcp_server=True,
437
+ ssr_mode=False,
438
+ show_error=True,
439
+ )