Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
c1b91a1
1
Parent(s):
5492618
modified: src/flux/util.py
Browse files- app.py +1 -1
- src/flux/__pycache__/__init__.cpython-310.pyc +0 -0
- src/flux/__pycache__/_version.cpython-310.pyc +0 -0
- src/flux/__pycache__/math.cpython-310.pyc +0 -0
- src/flux/__pycache__/model.cpython-310.pyc +0 -0
- src/flux/__pycache__/sampling.cpython-310.pyc +0 -0
- src/flux/__pycache__/util.cpython-310.pyc +0 -0
- src/flux/modules/__pycache__/autoencoder.cpython-310.pyc +0 -0
- src/flux/modules/__pycache__/conditioner.cpython-310.pyc +0 -0
- src/flux/modules/__pycache__/layers.cpython-310.pyc +0 -0
- src/flux/modules/conditioner.py +3 -3
- src/flux/util.py +2 -2
app.py
CHANGED
|
@@ -341,7 +341,7 @@ class FluxEditor:
|
|
| 341 |
self.model = self.model.to(self.device)
|
| 342 |
|
| 343 |
self.model = self.model.to(self.device)
|
| 344 |
-
print("model has been moved to {self.device}")
|
| 345 |
|
| 346 |
#----------------------------- 1 Inverting current image -------------------------------------#
|
| 347 |
denoise_strategies = ['fireflow', 'rf', 'rf_solver', 'midpoint', 'rf_inversion', 'multi_turn_consistent']
|
|
|
|
| 341 |
self.model = self.model.to(self.device)
|
| 342 |
|
| 343 |
self.model = self.model.to(self.device)
|
| 344 |
+
print(f"model has been moved to {self.device}")
|
| 345 |
|
| 346 |
#----------------------------- 1 Inverting current image -------------------------------------#
|
| 347 |
denoise_strategies = ['fireflow', 'rf', 'rf_solver', 'midpoint', 'rf_inversion', 'multi_turn_consistent']
|
src/flux/__pycache__/__init__.cpython-310.pyc
CHANGED
|
Binary files a/src/flux/__pycache__/__init__.cpython-310.pyc and b/src/flux/__pycache__/__init__.cpython-310.pyc differ
|
|
|
src/flux/__pycache__/_version.cpython-310.pyc
CHANGED
|
Binary files a/src/flux/__pycache__/_version.cpython-310.pyc and b/src/flux/__pycache__/_version.cpython-310.pyc differ
|
|
|
src/flux/__pycache__/math.cpython-310.pyc
CHANGED
|
Binary files a/src/flux/__pycache__/math.cpython-310.pyc and b/src/flux/__pycache__/math.cpython-310.pyc differ
|
|
|
src/flux/__pycache__/model.cpython-310.pyc
CHANGED
|
Binary files a/src/flux/__pycache__/model.cpython-310.pyc and b/src/flux/__pycache__/model.cpython-310.pyc differ
|
|
|
src/flux/__pycache__/sampling.cpython-310.pyc
CHANGED
|
Binary files a/src/flux/__pycache__/sampling.cpython-310.pyc and b/src/flux/__pycache__/sampling.cpython-310.pyc differ
|
|
|
src/flux/__pycache__/util.cpython-310.pyc
CHANGED
|
Binary files a/src/flux/__pycache__/util.cpython-310.pyc and b/src/flux/__pycache__/util.cpython-310.pyc differ
|
|
|
src/flux/modules/__pycache__/autoencoder.cpython-310.pyc
CHANGED
|
Binary files a/src/flux/modules/__pycache__/autoencoder.cpython-310.pyc and b/src/flux/modules/__pycache__/autoencoder.cpython-310.pyc differ
|
|
|
src/flux/modules/__pycache__/conditioner.cpython-310.pyc
CHANGED
|
Binary files a/src/flux/modules/__pycache__/conditioner.cpython-310.pyc and b/src/flux/modules/__pycache__/conditioner.cpython-310.pyc differ
|
|
|
src/flux/modules/__pycache__/layers.cpython-310.pyc
CHANGED
|
Binary files a/src/flux/modules/__pycache__/layers.cpython-310.pyc and b/src/flux/modules/__pycache__/layers.cpython-310.pyc differ
|
|
|
src/flux/modules/conditioner.py
CHANGED
|
@@ -12,8 +12,8 @@ class HFEmbedder(nn.Module):
|
|
| 12 |
self.output_key = "pooler_output" if self.is_clip else "last_hidden_state"
|
| 13 |
|
| 14 |
if self.is_clip:
|
| 15 |
-
self.tokenizer: CLIPTokenizer = CLIPTokenizer.from_pretrained(version, max_length=max_length, truncation=True)
|
| 16 |
-
|
| 17 |
self.hf_module: CLIPTextModel = CLIPTextModel.from_pretrained(version, **hf_kwargs)
|
| 18 |
|
| 19 |
# --- DEBUG 信息 ---
|
|
@@ -75,7 +75,7 @@ class HFEmbedder(nn.Module):
|
|
| 75 |
|
| 76 |
# 更严格的断言
|
| 77 |
assert input_ids.shape == (len(text), self.max_length), f"Input IDs shape {input_ids.shape} does not match expected ({len(text)}, {self.max_length})"
|
| 78 |
-
print(f"Input IDs:\n{input_ids}")
|
| 79 |
|
| 80 |
# --- 关键调试:检查输入 ID 范围 ---
|
| 81 |
min_id, max_id = input_ids.min().item(), input_ids.max().item()
|
|
|
|
| 12 |
self.output_key = "pooler_output" if self.is_clip else "last_hidden_state"
|
| 13 |
|
| 14 |
if self.is_clip:
|
| 15 |
+
#self.tokenizer: CLIPTokenizer = CLIPTokenizer.from_pretrained(version, max_length=max_length, truncation=True)
|
| 16 |
+
self.tokenizer: CLIPTokenizer = CLIPTokenizer.from_pretrained("/home/user/app/models/tokenizer", max_length=max_length, truncation=True)
|
| 17 |
self.hf_module: CLIPTextModel = CLIPTextModel.from_pretrained(version, **hf_kwargs)
|
| 18 |
|
| 19 |
# --- DEBUG 信息 ---
|
|
|
|
| 75 |
|
| 76 |
# 更严格的断言
|
| 77 |
assert input_ids.shape == (len(text), self.max_length), f"Input IDs shape {input_ids.shape} does not match expected ({len(text)}, {self.max_length})"
|
| 78 |
+
#print(f"Input IDs:\n{input_ids}")
|
| 79 |
|
| 80 |
# --- 关键调试:检查输入 ID 范围 ---
|
| 81 |
min_id, max_id = input_ids.min().item(), input_ids.max().item()
|
src/flux/util.py
CHANGED
|
@@ -136,8 +136,8 @@ def load_t5(device: str | torch.device = "cuda", max_length: int = 77) -> HFEmbe
|
|
| 136 |
|
| 137 |
|
| 138 |
def load_clip(device: str | torch.device = "cuda") -> HFEmbedder:
|
| 139 |
-
return HFEmbedder("openai/clip-vit-base-patch32", max_length=77, is_clip=True, torch_dtype=torch.bfloat16).to(device)
|
| 140 |
-
|
| 141 |
|
| 142 |
|
| 143 |
def load_ae(name: str, device: str | torch.device = "cuda", hf_download: bool = True) -> AutoEncoder:
|
|
|
|
| 136 |
|
| 137 |
|
| 138 |
def load_clip(device: str | torch.device = "cuda") -> HFEmbedder:
|
| 139 |
+
#return HFEmbedder("openai/clip-vit-base-patch32", max_length=77, is_clip=True, torch_dtype=torch.bfloat16).to(device)
|
| 140 |
+
return HFEmbedder("/home/user/app/models/text_encoder", max_length=77, is_clip=True, torch_dtype=torch.bfloat16).to(device)
|
| 141 |
|
| 142 |
|
| 143 |
def load_ae(name: str, device: str | torch.device = "cuda", hf_download: bool = True) -> AutoEncoder:
|