Update app.py
Browse files
app.py
CHANGED
|
@@ -37,24 +37,21 @@ class ValidImgDetector:
|
|
| 37 |
mode: str,
|
| 38 |
predictor: Predictor,
|
| 39 |
) -> np.ndarray:
|
| 40 |
-
# input is RGB image, output must be RGB too
|
| 41 |
predictor.detector.detector_kwargs["conf"] = score_threshold
|
| 42 |
predictor.detector.detector_kwargs["iou"] = iou_threshold
|
|
|
|
| 43 |
if mode == "Use persons and faces":
|
| 44 |
use_persons = True
|
| 45 |
disable_faces = False
|
| 46 |
-
|
| 47 |
elif mode == "Use persons only":
|
| 48 |
use_persons = True
|
| 49 |
disable_faces = True
|
| 50 |
-
|
| 51 |
elif mode == "Use faces only":
|
| 52 |
use_persons = False
|
| 53 |
disable_faces = False
|
| 54 |
|
| 55 |
predictor.age_gender_model.meta.use_persons = use_persons
|
| 56 |
predictor.age_gender_model.meta.disable_faces = disable_faces
|
| 57 |
-
# image = image[:, :, ::-1] # RGB -> BGR
|
| 58 |
detected_objects, out_im = predictor.recognize(image)
|
| 59 |
has_child, has_female, has_male = False, False, False
|
| 60 |
if len(detected_objects.ages) > 0:
|
|
@@ -64,24 +61,41 @@ class ValidImgDetector:
|
|
| 64 |
|
| 65 |
return out_im[:, :, ::-1], has_child, has_female, has_male
|
| 66 |
|
| 67 |
-
def
|
| 68 |
-
|
| 69 |
-
|
| 70 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
|
| 72 |
-
|
| 73 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
if os.path.exists(TMP_DIR):
|
| 75 |
shutil.rmtree(TMP_DIR)
|
| 76 |
|
| 77 |
-
|
| 78 |
|
| 79 |
detector = ValidImgDetector()
|
| 80 |
-
if not
|
| 81 |
-
return None, None, None, "Please input the
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
|
| 86 |
if __name__ == "__main__":
|
| 87 |
with gr.Blocks() as iface:
|
|
@@ -89,22 +103,7 @@ if __name__ == "__main__":
|
|
| 89 |
with gr.Tab("Upload Mode"):
|
| 90 |
gr.Interface(
|
| 91 |
fn=infer,
|
| 92 |
-
inputs=gr.
|
| 93 |
-
outputs=[
|
| 94 |
-
gr.Image(label="Detection Result", type="numpy"),
|
| 95 |
-
gr.Textbox(label="Has Child"),
|
| 96 |
-
gr.Textbox(label="Has Female"),
|
| 97 |
-
gr.Textbox(label="Has Male"),
|
| 98 |
-
],
|
| 99 |
-
examples=get_jpg_files(f"{MODEL_DIR}/examples"),
|
| 100 |
-
allow_flagging="never",
|
| 101 |
-
cache_examples=False,
|
| 102 |
-
)
|
| 103 |
-
|
| 104 |
-
with gr.Tab("Online Mode"):
|
| 105 |
-
gr.Interface(
|
| 106 |
-
fn=infer,
|
| 107 |
-
inputs=gr.Textbox(label="Online Picture URL"),
|
| 108 |
outputs=[
|
| 109 |
gr.Image(label="Detection Result", type="numpy"),
|
| 110 |
gr.Textbox(label="Has Child"),
|
|
|
|
| 37 |
mode: str,
|
| 38 |
predictor: Predictor,
|
| 39 |
) -> np.ndarray:
|
|
|
|
| 40 |
predictor.detector.detector_kwargs["conf"] = score_threshold
|
| 41 |
predictor.detector.detector_kwargs["iou"] = iou_threshold
|
| 42 |
+
|
| 43 |
if mode == "Use persons and faces":
|
| 44 |
use_persons = True
|
| 45 |
disable_faces = False
|
|
|
|
| 46 |
elif mode == "Use persons only":
|
| 47 |
use_persons = True
|
| 48 |
disable_faces = True
|
|
|
|
| 49 |
elif mode == "Use faces only":
|
| 50 |
use_persons = False
|
| 51 |
disable_faces = False
|
| 52 |
|
| 53 |
predictor.age_gender_model.meta.use_persons = use_persons
|
| 54 |
predictor.age_gender_model.meta.disable_faces = disable_faces
|
|
|
|
| 55 |
detected_objects, out_im = predictor.recognize(image)
|
| 56 |
has_child, has_female, has_male = False, False, False
|
| 57 |
if len(detected_objects.ages) > 0:
|
|
|
|
| 61 |
|
| 62 |
return out_im[:, :, ::-1], has_child, has_female, has_male
|
| 63 |
|
| 64 |
+
def valid_video(self, video_path):
|
| 65 |
+
cap = cv2.VideoCapture(video_path)
|
| 66 |
+
results = []
|
| 67 |
|
| 68 |
+
while cap.isOpened():
|
| 69 |
+
ret, frame = cap.read()
|
| 70 |
+
if not ret:
|
| 71 |
+
break
|
| 72 |
|
| 73 |
+
# Process each frame
|
| 74 |
+
out_frame, has_child, has_female, has_male = self._detect(frame, 0.4, 0.7, "Use persons and faces", self.predictor)
|
| 75 |
+
results.append((out_frame, has_child, has_female, has_male))
|
| 76 |
+
|
| 77 |
+
cap.release()
|
| 78 |
+
return results
|
| 79 |
+
|
| 80 |
+
def infer(video_path: str):
|
| 81 |
+
if is_url(video_path):
|
| 82 |
if os.path.exists(TMP_DIR):
|
| 83 |
shutil.rmtree(TMP_DIR)
|
| 84 |
|
| 85 |
+
video_path = download_file(video_path, f"{TMP_DIR}/download.mp4")
|
| 86 |
|
| 87 |
detector = ValidImgDetector()
|
| 88 |
+
if not video_path or not os.path.exists(video_path):
|
| 89 |
+
return None, None, None, "Please input the video correctly"
|
| 90 |
+
|
| 91 |
+
results = detector.valid_video(video_path)
|
| 92 |
+
|
| 93 |
+
# Process results to only return first frame's result as an example
|
| 94 |
+
if results:
|
| 95 |
+
first_frame_result = results[0]
|
| 96 |
+
return first_frame_result
|
| 97 |
+
else:
|
| 98 |
+
return None, None, None, "No frames detected in video."
|
| 99 |
|
| 100 |
if __name__ == "__main__":
|
| 101 |
with gr.Blocks() as iface:
|
|
|
|
| 103 |
with gr.Tab("Upload Mode"):
|
| 104 |
gr.Interface(
|
| 105 |
fn=infer,
|
| 106 |
+
inputs=gr.Video(label="Upload Video"),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
outputs=[
|
| 108 |
gr.Image(label="Detection Result", type="numpy"),
|
| 109 |
gr.Textbox(label="Has Child"),
|