Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -7,7 +7,6 @@ import os
|
|
| 7 |
# Force Dynamo off at import‐time of torch, pytorch3d, etc.
|
| 8 |
os.environ["TORCHDYNAMO_DISABLE"] = "1"
|
| 9 |
|
| 10 |
-
from gradio_litmodel3d import LitModel3D
|
| 11 |
import subprocess
|
| 12 |
import tempfile
|
| 13 |
import uuid
|
|
@@ -64,6 +63,7 @@ install_cuda_toolkit()
|
|
| 64 |
|
| 65 |
from omegaconf import OmegaConf
|
| 66 |
from pixel3dmm.network_inference import normals_n_uvs
|
|
|
|
| 67 |
|
| 68 |
DEVICE = "cuda"
|
| 69 |
|
|
@@ -92,7 +92,6 @@ def reset_all():
|
|
| 92 |
None, # uv_img
|
| 93 |
None, # track_img
|
| 94 |
"Time to Generate!", # status
|
| 95 |
-
{}, # state
|
| 96 |
gr.update(interactive=True), # preprocess_btn
|
| 97 |
gr.update(interactive=True), # normals_btn
|
| 98 |
gr.update(interactive=True), # uv_map_btn
|
|
@@ -111,15 +110,27 @@ def preprocess_image(image_array, session_id):
|
|
| 111 |
img = Image.fromarray(image_array)
|
| 112 |
saved_image_path = os.path.join(os.environ["PIXEL3DMM_PREPROCESSED_DATA"], session_id, f"{session_id}.png")
|
| 113 |
img.save(saved_image_path)
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
"python", "scripts/run_preprocessing.py", "--video_or_images_path", saved_image_path
|
| 118 |
], check=True, capture_output=True, text=True)
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
shutil.rmtree(base_dir, ignore_errors=True)
|
| 122 |
-
return err, None, gr.update(interactive=True), gr.update(interactive=True)
|
| 123 |
|
| 124 |
crop_dir = os.path.join(os.environ["PIXEL3DMM_PREPROCESSED_DATA"], session_id, "cropped")
|
| 125 |
image = first_image_from_dir(crop_dir)
|
|
@@ -246,7 +257,7 @@ def generate_results_and_mesh(image, session_id=None):
|
|
| 246 |
# Step 1
|
| 247 |
status1, crop_img, _, _ = preprocess_image(image, session_id)
|
| 248 |
if "❌" in status1:
|
| 249 |
-
|
| 250 |
# Step 2
|
| 251 |
status2, normals_img, _, _ = step2_normals(session_id)
|
| 252 |
# Step 3
|
|
@@ -348,4 +359,4 @@ with gr.Blocks(css=css) as demo:
|
|
| 348 |
demo.queue(default_concurrency_limit=1, # ≤ 1 worker per event
|
| 349 |
max_size=20) # optional: allow 20 waiting jobs
|
| 350 |
|
| 351 |
-
demo.launch()
|
|
|
|
| 7 |
# Force Dynamo off at import‐time of torch, pytorch3d, etc.
|
| 8 |
os.environ["TORCHDYNAMO_DISABLE"] = "1"
|
| 9 |
|
|
|
|
| 10 |
import subprocess
|
| 11 |
import tempfile
|
| 12 |
import uuid
|
|
|
|
| 63 |
|
| 64 |
from omegaconf import OmegaConf
|
| 65 |
from pixel3dmm.network_inference import normals_n_uvs
|
| 66 |
+
from pixel3dmm.run_facer_segmentation import segment
|
| 67 |
|
| 68 |
DEVICE = "cuda"
|
| 69 |
|
|
|
|
| 92 |
None, # uv_img
|
| 93 |
None, # track_img
|
| 94 |
"Time to Generate!", # status
|
|
|
|
| 95 |
gr.update(interactive=True), # preprocess_btn
|
| 96 |
gr.update(interactive=True), # normals_btn
|
| 97 |
gr.update(interactive=True), # uv_map_btn
|
|
|
|
| 110 |
img = Image.fromarray(image_array)
|
| 111 |
saved_image_path = os.path.join(os.environ["PIXEL3DMM_PREPROCESSED_DATA"], session_id, f"{session_id}.png")
|
| 112 |
img.save(saved_image_path)
|
| 113 |
+
|
| 114 |
+
import facer
|
| 115 |
+
|
| 116 |
+
if "face_detector" not in _model_cache:
|
| 117 |
+
|
| 118 |
+
device = 'cuda'
|
| 119 |
+
|
| 120 |
+
# This call downloads/loads the RetinaFace Mobilenet weights
|
| 121 |
+
face_detector = facer.face_detector('retinaface/mobilenet', device=device)
|
| 122 |
+
|
| 123 |
+
# This call downloads/loads the FARL parsing model (celeba mask model)
|
| 124 |
+
face_parser = facer.face_parser ('farl/celebm/448', device=device)
|
| 125 |
+
|
| 126 |
+
_model_cache['face_detector'] = face_detector
|
| 127 |
+
_model_cache['face_parser'] = face_parser
|
| 128 |
+
|
| 129 |
+
subprocess.run([
|
| 130 |
"python", "scripts/run_preprocessing.py", "--video_or_images_path", saved_image_path
|
| 131 |
], check=True, capture_output=True, text=True)
|
| 132 |
+
|
| 133 |
+
segment(f'{session_id}', _model_cache['face_detector'], _model_cache['face_parser'])
|
|
|
|
|
|
|
| 134 |
|
| 135 |
crop_dir = os.path.join(os.environ["PIXEL3DMM_PREPROCESSED_DATA"], session_id, "cropped")
|
| 136 |
image = first_image_from_dir(crop_dir)
|
|
|
|
| 257 |
# Step 1
|
| 258 |
status1, crop_img, _, _ = preprocess_image(image, session_id)
|
| 259 |
if "❌" in status1:
|
| 260 |
+
return status1, None, None, None, None, None
|
| 261 |
# Step 2
|
| 262 |
status2, normals_img, _, _ = step2_normals(session_id)
|
| 263 |
# Step 3
|
|
|
|
| 359 |
demo.queue(default_concurrency_limit=1, # ≤ 1 worker per event
|
| 360 |
max_size=20) # optional: allow 20 waiting jobs
|
| 361 |
|
| 362 |
+
demo.launch(share=True)
|