Spaces:
Runtime error
Runtime error
strength
Browse files
app.py
CHANGED
|
@@ -147,6 +147,7 @@ def process_canny(input_image, prompt, a_prompt, n_prompt, num_samples, image_re
|
|
| 147 |
|
| 148 |
if config.save_memory:
|
| 149 |
model.low_vram_shift(is_diffusing=True)
|
|
|
|
| 150 |
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
|
| 151 |
shape, cond, verbose=False, eta=eta,
|
| 152 |
unconditional_guidance_scale=scale,
|
|
@@ -205,7 +206,7 @@ def process_hed(input_image, prompt, a_prompt, n_prompt, num_samples, image_reso
|
|
| 205 |
|
| 206 |
if config.save_memory:
|
| 207 |
model.low_vram_shift(is_diffusing=True)
|
| 208 |
-
|
| 209 |
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
|
| 210 |
shape, cond, verbose=False, eta=eta,
|
| 211 |
unconditional_guidance_scale=scale,
|
|
@@ -262,7 +263,8 @@ def process_depth(input_image, prompt, a_prompt, n_prompt, num_samples, image_re
|
|
| 262 |
|
| 263 |
if config.save_memory:
|
| 264 |
model.low_vram_shift(is_diffusing=True)
|
| 265 |
-
|
|
|
|
| 266 |
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
|
| 267 |
shape, cond, verbose=False, eta=eta,
|
| 268 |
unconditional_guidance_scale=scale,
|
|
@@ -320,7 +322,8 @@ def process_normal(input_image, prompt, a_prompt, n_prompt, num_samples, image_r
|
|
| 320 |
|
| 321 |
if config.save_memory:
|
| 322 |
model.low_vram_shift(is_diffusing=True)
|
| 323 |
-
|
|
|
|
| 324 |
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
|
| 325 |
shape, cond, verbose=False, eta=eta,
|
| 326 |
unconditional_guidance_scale=scale,
|
|
@@ -377,6 +380,8 @@ def process_pose(input_image, prompt, a_prompt, n_prompt, num_samples, image_res
|
|
| 377 |
|
| 378 |
if config.save_memory:
|
| 379 |
model.low_vram_shift(is_diffusing=True)
|
|
|
|
|
|
|
| 380 |
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
|
| 381 |
shape, cond, verbose=False, eta=eta,
|
| 382 |
unconditional_guidance_scale=scale,
|
|
@@ -433,6 +438,8 @@ def process_seg(input_image, prompt, a_prompt, n_prompt, num_samples, image_reso
|
|
| 433 |
|
| 434 |
if config.save_memory:
|
| 435 |
model.low_vram_shift(is_diffusing=True)
|
|
|
|
|
|
|
| 436 |
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
|
| 437 |
shape, cond, verbose=False, eta=eta,
|
| 438 |
unconditional_guidance_scale=scale,
|
|
@@ -604,7 +611,8 @@ def process_bbox(input_image, prompt, a_prompt, n_prompt, num_samples, image_res
|
|
| 604 |
|
| 605 |
if config.save_memory:
|
| 606 |
model.low_vram_shift(is_diffusing=True)
|
| 607 |
-
|
|
|
|
| 608 |
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
|
| 609 |
shape, cond, verbose=False, eta=eta,
|
| 610 |
unconditional_guidance_scale=scale,
|
|
@@ -662,6 +670,8 @@ def process_outpainting(input_image, prompt, a_prompt, n_prompt, num_samples, im
|
|
| 662 |
|
| 663 |
if config.save_memory:
|
| 664 |
model.low_vram_shift(is_diffusing=True)
|
|
|
|
|
|
|
| 665 |
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
|
| 666 |
shape, cond, verbose=False, eta=eta,
|
| 667 |
unconditional_guidance_scale=scale,
|
|
@@ -739,6 +749,8 @@ def process_sketch(input_image, prompt, a_prompt, n_prompt, num_samples, image_r
|
|
| 739 |
|
| 740 |
if config.save_memory:
|
| 741 |
model.low_vram_shift(is_diffusing=True)
|
|
|
|
|
|
|
| 742 |
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
|
| 743 |
shape, cond, verbose=False, eta=eta,
|
| 744 |
unconditional_guidance_scale=scale,
|
|
@@ -797,7 +809,8 @@ def process_colorization(input_image, prompt, a_prompt, n_prompt, num_samples, i
|
|
| 797 |
|
| 798 |
if config.save_memory:
|
| 799 |
model.low_vram_shift(is_diffusing=True)
|
| 800 |
-
|
|
|
|
| 801 |
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
|
| 802 |
shape, cond, verbose=False, eta=eta,
|
| 803 |
unconditional_guidance_scale=scale,
|
|
@@ -854,7 +867,8 @@ def process_deblur(input_image, prompt, a_prompt, n_prompt, num_samples, image_r
|
|
| 854 |
|
| 855 |
if config.save_memory:
|
| 856 |
model.low_vram_shift(is_diffusing=True)
|
| 857 |
-
|
|
|
|
| 858 |
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
|
| 859 |
shape, cond, verbose=False, eta=eta,
|
| 860 |
unconditional_guidance_scale=scale,
|
|
@@ -910,7 +924,8 @@ def process_inpainting(input_image, prompt, a_prompt, n_prompt, num_samples, ima
|
|
| 910 |
|
| 911 |
if config.save_memory:
|
| 912 |
model.low_vram_shift(is_diffusing=True)
|
| 913 |
-
|
|
|
|
| 914 |
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
|
| 915 |
shape, cond, verbose=False, eta=eta,
|
| 916 |
unconditional_guidance_scale=scale,
|
|
|
|
| 147 |
|
| 148 |
if config.save_memory:
|
| 149 |
model.low_vram_shift(is_diffusing=True)
|
| 150 |
+
model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else ([strength] * 13)
|
| 151 |
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
|
| 152 |
shape, cond, verbose=False, eta=eta,
|
| 153 |
unconditional_guidance_scale=scale,
|
|
|
|
| 206 |
|
| 207 |
if config.save_memory:
|
| 208 |
model.low_vram_shift(is_diffusing=True)
|
| 209 |
+
model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else ([strength] * 13)
|
| 210 |
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
|
| 211 |
shape, cond, verbose=False, eta=eta,
|
| 212 |
unconditional_guidance_scale=scale,
|
|
|
|
| 263 |
|
| 264 |
if config.save_memory:
|
| 265 |
model.low_vram_shift(is_diffusing=True)
|
| 266 |
+
model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else (
|
| 267 |
+
[strength] * 13)
|
| 268 |
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
|
| 269 |
shape, cond, verbose=False, eta=eta,
|
| 270 |
unconditional_guidance_scale=scale,
|
|
|
|
| 322 |
|
| 323 |
if config.save_memory:
|
| 324 |
model.low_vram_shift(is_diffusing=True)
|
| 325 |
+
model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else (
|
| 326 |
+
[strength] * 13)
|
| 327 |
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
|
| 328 |
shape, cond, verbose=False, eta=eta,
|
| 329 |
unconditional_guidance_scale=scale,
|
|
|
|
| 380 |
|
| 381 |
if config.save_memory:
|
| 382 |
model.low_vram_shift(is_diffusing=True)
|
| 383 |
+
model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else (
|
| 384 |
+
[strength] * 13)
|
| 385 |
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
|
| 386 |
shape, cond, verbose=False, eta=eta,
|
| 387 |
unconditional_guidance_scale=scale,
|
|
|
|
| 438 |
|
| 439 |
if config.save_memory:
|
| 440 |
model.low_vram_shift(is_diffusing=True)
|
| 441 |
+
model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else (
|
| 442 |
+
[strength] * 13)
|
| 443 |
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
|
| 444 |
shape, cond, verbose=False, eta=eta,
|
| 445 |
unconditional_guidance_scale=scale,
|
|
|
|
| 611 |
|
| 612 |
if config.save_memory:
|
| 613 |
model.low_vram_shift(is_diffusing=True)
|
| 614 |
+
model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else (
|
| 615 |
+
[strength] * 13)
|
| 616 |
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
|
| 617 |
shape, cond, verbose=False, eta=eta,
|
| 618 |
unconditional_guidance_scale=scale,
|
|
|
|
| 670 |
|
| 671 |
if config.save_memory:
|
| 672 |
model.low_vram_shift(is_diffusing=True)
|
| 673 |
+
model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else (
|
| 674 |
+
[strength] * 13)
|
| 675 |
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
|
| 676 |
shape, cond, verbose=False, eta=eta,
|
| 677 |
unconditional_guidance_scale=scale,
|
|
|
|
| 749 |
|
| 750 |
if config.save_memory:
|
| 751 |
model.low_vram_shift(is_diffusing=True)
|
| 752 |
+
model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else (
|
| 753 |
+
[strength] * 13)
|
| 754 |
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
|
| 755 |
shape, cond, verbose=False, eta=eta,
|
| 756 |
unconditional_guidance_scale=scale,
|
|
|
|
| 809 |
|
| 810 |
if config.save_memory:
|
| 811 |
model.low_vram_shift(is_diffusing=True)
|
| 812 |
+
model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else (
|
| 813 |
+
[strength] * 13)
|
| 814 |
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
|
| 815 |
shape, cond, verbose=False, eta=eta,
|
| 816 |
unconditional_guidance_scale=scale,
|
|
|
|
| 867 |
|
| 868 |
if config.save_memory:
|
| 869 |
model.low_vram_shift(is_diffusing=True)
|
| 870 |
+
model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else (
|
| 871 |
+
[strength] * 13)
|
| 872 |
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
|
| 873 |
shape, cond, verbose=False, eta=eta,
|
| 874 |
unconditional_guidance_scale=scale,
|
|
|
|
| 924 |
|
| 925 |
if config.save_memory:
|
| 926 |
model.low_vram_shift(is_diffusing=True)
|
| 927 |
+
model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else (
|
| 928 |
+
[strength] * 13)
|
| 929 |
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
|
| 930 |
shape, cond, verbose=False, eta=eta,
|
| 931 |
unconditional_guidance_scale=scale,
|