oriqqqqqqat commited on
Commit
b494aa1
·
1 Parent(s): 98c1616
Files changed (2) hide show
  1. main.py +99 -25
  2. models/densenet/best_fusion_densenet.pth +0 -3
main.py CHANGED
@@ -15,6 +15,7 @@ import torch
15
  import torch.nn as nn
16
  from PIL import Image, ImageOps
17
  from matplotlib import cm
 
18
 
19
  import cv2
20
  from fastapi import FastAPI, File, UploadFile, Form, Request, Depends
@@ -22,152 +23,206 @@ from fastapi.responses import HTMLResponse, RedirectResponse
22
  from fastapi.templating import Jinja2Templates
23
  from fastapi.staticfiles import StaticFiles
24
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  sys.path.append(os.path.abspath(os.path.dirname(__file__)))
26
  from models.densenet.preprocess.preprocessingwangchan import get_tokenizer, get_transforms
27
  from models.densenet.train_densenet_only import DenseNet121Classifier
28
  from models.densenet.train_text_only import TextClassifier
 
29
  torch.manual_seed(42); np.random.seed(42); random.seed(42)
 
30
  FUSION_LABELMAP_PATH = "models/densenet/label_map_fusion_densenet.json"
31
- FUSION_WEIGHTS_PATH = "models/densenet/best_fusion_densenet.pth"
32
  with open(FUSION_LABELMAP_PATH, "r", encoding="utf-8") as f:
33
  label_map = json.load(f)
 
34
  class_names = [label for label, _ in sorted(label_map.items(), key=lambda x: x[1])]
35
  NUM_CLASSES = len(class_names)
 
36
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
37
  print(f"🧠 Using device: {device}")
 
 
38
  class FusionDenseNetText(nn.Module):
39
  def __init__(self, num_classes, dropout=0.3):
40
  super().__init__()
41
  self.image_model = DenseNet121Classifier(num_classes=num_classes)
42
  self.text_model = TextClassifier(num_classes=num_classes)
 
43
  self.fusion = nn.Sequential(
44
  nn.Linear(num_classes * 2, 128), nn.ReLU(),
45
  nn.Dropout(dropout), nn.Linear(128, num_classes)
46
  )
 
47
  def forward(self, image, input_ids, attention_mask):
48
  logits_img = self.image_model(image)
49
  logits_txt = self.text_model(input_ids, attention_mask)
50
  fused_in = torch.cat([logits_img, logits_txt], dim=1)
51
  fused_out = self.fusion(fused_in)
52
  return fused_out, logits_img, logits_txt
 
 
53
  print("🔄 Loading AI model...")
 
 
 
 
54
  fusion_model = FusionDenseNetText(num_classes=NUM_CLASSES).to(device)
55
- fusion_model.load_state_dict(torch.load(FUSION_WEIGHTS_PATH, map_location=device))
56
  fusion_model.eval()
 
57
  print("✅ AI Model loaded successfully!")
 
 
58
  tokenizer = get_tokenizer()
59
  transform = get_transforms((224, 224))
 
60
  def _find_last_conv2d(mod: torch.nn.Module):
61
  last = None
62
  for m in mod.modules():
63
  if isinstance(m, torch.nn.Conv2d): last = m
64
  return last
 
65
  def compute_gradcam_overlay(img_pil, image_tensor, target_class_idx):
66
  img_branch = fusion_model.image_model
67
  target_layer = _find_last_conv2d(img_branch)
68
- if target_layer is None: return None
 
 
69
  activations, gradients = [], []
 
70
  def fwd_hook(_m, _i, o): activations.append(o)
71
  def bwd_hook(_m, gin, gout): gradients.append(gout[0])
 
72
  h1 = target_layer.register_forward_hook(fwd_hook)
73
  h2 = target_layer.register_full_backward_hook(bwd_hook)
 
74
  try:
75
  img_branch.zero_grad()
76
  logits_img = img_branch(image_tensor)
77
  score = logits_img[0, target_class_idx]
78
  score.backward()
 
79
  act = activations[-1].detach()[0]
80
  grad = gradients[-1].detach()[0]
81
  weights = torch.mean(grad, dim=(1, 2))
 
82
  cam = torch.relu(torch.sum(weights[:, None, None] * act, dim=0))
83
- cam -= cam.min(); cam /= (cam.max() + 1e-8)
 
 
84
  cam_img = Image.fromarray((cam.cpu().numpy() * 255).astype(np.uint8)).resize(img_pil.size, Image.BILINEAR)
85
- cam_np = np.asarray(cam_img).astype(np.float32) / 255.0
86
- heatmap = cm.get_cmap("jet")(cam_np)[:, :, :3]
 
87
  img_np = np.asarray(img_pil.convert("RGB")).astype(np.float32) / 255.0
 
88
  overlay = (0.6 * img_np + 0.4 * heatmap)
89
  return np.clip(overlay * 255, 0, 255).astype(np.uint8)
 
90
  finally:
91
- h1.remove(); h2.remove(); img_branch.zero_grad()
 
 
92
 
93
 
 
94
  app = FastAPI()
95
  app.mount("/static", StaticFiles(directory="static"), name="static")
96
  templates = Jinja2Templates(directory="templates")
97
  os.makedirs("uploads", exist_ok=True)
98
 
 
99
  EXPIRATION_MINUTES = 10
100
  results_cache = {}
101
  cache_lock = threading.Lock()
102
 
103
  def cleanup_expired_cache():
104
- """
105
- ฟังก์ชันนี้จะทำงานใน Background Thread เพื่อตรวจสอบและลบ Cache ที่หมดอายุ
106
- """
107
  while True:
108
- with cache_lock: # ล็อคเพื่อความปลอดภัยในการเข้าถึง cache
109
- # สร้าง list ของ key ที่จะลบ เพื่อไม่ให้แก้ไข dict ขณะวน loop
110
  expired_keys = []
111
  current_time = time.time()
112
  for key, value in results_cache.items():
113
  if current_time - value["created_at"] > EXPIRATION_MINUTES * 60:
114
  expired_keys.append(key)
115
-
116
- # ลบ key ที่หมดอายุ
117
  for key in expired_keys:
118
  del results_cache[key]
119
  print(f"🧹 Cache expired and removed for key: {key}")
120
-
121
- time.sleep(60) # ตรวจสอบทุกๆ 60 วินาที
122
 
123
  @app.on_event("startup")
124
  async def startup_event():
125
- """
126
- เริ่ม Background Thread สำหรับทำความสะอาด Cache เมื่อแอปเริ่มทำงาน
127
- """
128
  cleanup_thread = threading.Thread(target=cleanup_expired_cache, daemon=True)
129
  cleanup_thread.start()
130
- print("🗑️ Cache cleanup task started.")
131
 
132
  SYMPTOM_MAP = {
133
  "noSymptoms": "ไม่มีอาการ", "drinkAlcohol": "ดื่มเหล้า", "smoking": "สูบบุหรี่",
134
  "chewBetelNut": "เคี้ยวหมาก", "eatSpicyFood": "กินเผ็ดแสบ", "wipeOff": "เช็ดออกได้",
135
  "alwaysHurts": "เจ็บเมื่อโดนแผล"
136
  }
 
137
  def process_with_ai_model(image_path: str, prompt_text: str):
138
  try:
139
  image_pil = Image.open(image_path)
140
  image_pil = ImageOps.exif_transpose(image_pil)
141
  image_pil = image_pil.convert("RGB")
 
142
  image_tensor = transform(image_pil).unsqueeze(0).to(device)
143
  enc = tokenizer(prompt_text, return_tensors="pt", padding="max_length",
144
  truncation=True, max_length=128)
 
145
  ids, mask = enc["input_ids"].to(device), enc["attention_mask"].to(device)
 
146
  with torch.no_grad():
147
  fused_logits, _, _ = fusion_model(image_tensor, ids, mask)
148
  probs_fused = torch.softmax(fused_logits, dim=1)[0].cpu().numpy()
 
149
  pred_idx = int(np.argmax(probs_fused))
150
  pred_label = class_names[pred_idx]
151
  confidence = float(probs_fused[pred_idx]) * 100
 
152
  gradcam_overlay_np = compute_gradcam_overlay(image_pil, image_tensor, pred_idx)
 
153
  def image_to_base64(img):
154
  buffered = BytesIO()
155
  img.save(buffered, format="JPEG")
156
  return base64.b64encode(buffered.getvalue()).decode('utf-8')
 
157
  original_b64 = image_to_base64(image_pil)
 
158
  if gradcam_overlay_np is not None:
159
  gradcam_pil = Image.fromarray(gradcam_overlay_np)
160
  gradcam_b64 = image_to_base64(gradcam_pil)
161
  else:
162
  gradcam_b64 = original_b64
 
163
  return original_b64, gradcam_b64, pred_label, f"{confidence:.2f}"
 
164
  except Exception as e:
165
  print(f"❌ Error during AI processing: {e}")
166
  return None, None, "Error", "0.00"
167
 
 
168
  @app.get("/", response_class=RedirectResponse)
169
  async def root():
170
  return RedirectResponse(url="/detect")
 
171
  @app.get("/detect", response_class=HTMLResponse)
172
  async def show_upload_form(request: Request):
173
  return templates.TemplateResponse("detect.html", {"request": request})
@@ -180,45 +235,61 @@ async def handle_upload(
180
  symptom_text: str = Form("")
181
  ):
182
  temp_filepath = os.path.join("uploads", f"{uuid.uuid4()}_{file.filename}")
 
183
  with open(temp_filepath, "wb") as buffer:
184
  shutil.copyfileobj(file.file, buffer)
185
- final_prompt_parts = []
186
  selected_symptoms_thai = {SYMPTOM_MAP.get(cb) for cb in checkboxes if SYMPTOM_MAP.get(cb)}
 
 
 
187
  if "ไม่มีอาการ" in selected_symptoms_thai:
188
  symptoms_group = {"เจ็บเมื่อโดนแผล", "กินเผ็ดแสบ"}
189
  lifestyles_group = {"ดื่ม��หล้า", "สูบบุหรี่", "เคี้ยวหมาก"}
190
  patterns_group = {"เช็ดออกได้"}
191
  special_group = {"ไม่มีอาการ"}
 
192
  final_selected = (selected_symptoms_thai - symptoms_group) | \
193
  (selected_symptoms_thai & (lifestyles_group | patterns_group | special_group))
 
194
  final_prompt_parts.append(" ".join(sorted(list(final_selected))))
195
  elif selected_symptoms_thai:
196
  final_prompt_parts.append(" ".join(sorted(list(selected_symptoms_thai))))
 
197
  if symptom_text and symptom_text.strip():
198
  final_prompt_parts.append(symptom_text.strip())
 
199
  final_prompt = "; ".join(final_prompt_parts) if final_prompt_parts else "ไม่มีอาการ"
 
200
  image_b64, gradcam_b64, name_out, eva_output = process_with_ai_model(
201
  image_path=temp_filepath, prompt_text=final_prompt
202
  )
 
203
  os.remove(temp_filepath)
 
204
  result_id = str(uuid.uuid4())
205
  result_data = {
206
- "image_b64_data": image_b64, "gradcam_b64_data": gradcam_b64,
207
- "name_out": name_out, "eva_output": eva_output,
 
 
208
  }
 
209
  with cache_lock:
210
  results_cache[result_id] = {
211
  "data": result_data,
212
- "created_at": time.time()
213
  }
214
 
215
  results_url = request.url_for('show_results', result_id=result_id)
216
  return RedirectResponse(url=results_url, status_code=303)
217
 
 
218
  @app.get("/results/{result_id}", response_class=HTMLResponse)
219
  async def show_results(request: Request, result_id: str):
220
  with cache_lock:
221
  cached_item = results_cache.get(result_id)
 
222
  if not cached_item or (time.time() - cached_item["created_at"] > EXPIRATION_MINUTES * 60):
223
  if cached_item:
224
  with cache_lock:
@@ -228,6 +299,9 @@ async def show_results(request: Request, result_id: str):
228
  context = {"request": request, **cached_item["data"]}
229
  return templates.TemplateResponse("detect.html", context)
230
 
 
 
231
  if __name__ == "__main__":
 
232
  import uvicorn
233
- uvicorn.run(app, host="0.0.0.0", port=7860)
 
15
  import torch.nn as nn
16
  from PIL import Image, ImageOps
17
  from matplotlib import cm
18
+ import requests # <--- เพิ่มเพื่อโหลดจาก HuggingFace
19
 
20
  import cv2
21
  from fastapi import FastAPI, File, UploadFile, Form, Request, Depends
 
23
  from fastapi.templating import Jinja2Templates
24
  from fastapi.staticfiles import StaticFiles
25
 
26
+ # ============ เพิ่มระบบดาวน์โหลดโมเดลจาก HuggingFace ============
27
+ HF_MODEL_URL = "https://huggingface.co/qqqqqqat/densenet_wangchan/resolve/main/best_fusion_densenet.pth"
28
+ LOCAL_MODEL_PATH = "models/densenet/best_fusion_densenet.pth"
29
+
30
+ def download_model_if_needed():
31
+ if not os.path.exists(LOCAL_MODEL_PATH):
32
+ print("📥 Downloading model from HuggingFace...")
33
+ os.makedirs(os.path.dirname(LOCAL_MODEL_PATH), exist_ok=True)
34
+ response = requests.get(HF_MODEL_URL)
35
+ with open(LOCAL_MODEL_PATH, "wb") as f:
36
+ f.write(response.content)
37
+ print("✅ Model downloaded from HuggingFace!")
38
+ # ===================================================================
39
+
40
  sys.path.append(os.path.abspath(os.path.dirname(__file__)))
41
  from models.densenet.preprocess.preprocessingwangchan import get_tokenizer, get_transforms
42
  from models.densenet.train_densenet_only import DenseNet121Classifier
43
  from models.densenet.train_text_only import TextClassifier
44
+
45
  torch.manual_seed(42); np.random.seed(42); random.seed(42)
46
+
47
  FUSION_LABELMAP_PATH = "models/densenet/label_map_fusion_densenet.json"
48
+
49
  with open(FUSION_LABELMAP_PATH, "r", encoding="utf-8") as f:
50
  label_map = json.load(f)
51
+
52
  class_names = [label for label, _ in sorted(label_map.items(), key=lambda x: x[1])]
53
  NUM_CLASSES = len(class_names)
54
+
55
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
56
  print(f"🧠 Using device: {device}")
57
+
58
+ # ====================== Model Fusion Class ==========================
59
  class FusionDenseNetText(nn.Module):
60
  def __init__(self, num_classes, dropout=0.3):
61
  super().__init__()
62
  self.image_model = DenseNet121Classifier(num_classes=num_classes)
63
  self.text_model = TextClassifier(num_classes=num_classes)
64
+
65
  self.fusion = nn.Sequential(
66
  nn.Linear(num_classes * 2, 128), nn.ReLU(),
67
  nn.Dropout(dropout), nn.Linear(128, num_classes)
68
  )
69
+
70
  def forward(self, image, input_ids, attention_mask):
71
  logits_img = self.image_model(image)
72
  logits_txt = self.text_model(input_ids, attention_mask)
73
  fused_in = torch.cat([logits_img, logits_txt], dim=1)
74
  fused_out = self.fusion(fused_in)
75
  return fused_out, logits_img, logits_txt
76
+
77
+ # ===================== Load Model ============================
78
  print("🔄 Loading AI model...")
79
+
80
+ # โหลดไฟล์โมเดลจาก HuggingFace ถ้ายังไม่มี
81
+ download_model_if_needed()
82
+
83
  fusion_model = FusionDenseNetText(num_classes=NUM_CLASSES).to(device)
84
+ fusion_model.load_state_dict(torch.load(LOCAL_MODEL_PATH, map_location=device))
85
  fusion_model.eval()
86
+
87
  print("✅ AI Model loaded successfully!")
88
+ # =============================================================
89
+
90
  tokenizer = get_tokenizer()
91
  transform = get_transforms((224, 224))
92
+
93
  def _find_last_conv2d(mod: torch.nn.Module):
94
  last = None
95
  for m in mod.modules():
96
  if isinstance(m, torch.nn.Conv2d): last = m
97
  return last
98
+
99
  def compute_gradcam_overlay(img_pil, image_tensor, target_class_idx):
100
  img_branch = fusion_model.image_model
101
  target_layer = _find_last_conv2d(img_branch)
102
+ if target_layer is None:
103
+ return None
104
+
105
  activations, gradients = [], []
106
+
107
  def fwd_hook(_m, _i, o): activations.append(o)
108
  def bwd_hook(_m, gin, gout): gradients.append(gout[0])
109
+
110
  h1 = target_layer.register_forward_hook(fwd_hook)
111
  h2 = target_layer.register_full_backward_hook(bwd_hook)
112
+
113
  try:
114
  img_branch.zero_grad()
115
  logits_img = img_branch(image_tensor)
116
  score = logits_img[0, target_class_idx]
117
  score.backward()
118
+
119
  act = activations[-1].detach()[0]
120
  grad = gradients[-1].detach()[0]
121
  weights = torch.mean(grad, dim=(1, 2))
122
+
123
  cam = torch.relu(torch.sum(weights[:, None, None] * act, dim=0))
124
+ cam -= cam.min()
125
+ cam /= (cam.max() + 1e-8)
126
+
127
  cam_img = Image.fromarray((cam.cpu().numpy() * 255).astype(np.uint8)).resize(img_pil.size, Image.BILINEAR)
128
+
129
+ heatmap = cm.get_cmap("jet")(cam_img)[:, :, :3]
130
+
131
  img_np = np.asarray(img_pil.convert("RGB")).astype(np.float32) / 255.0
132
+
133
  overlay = (0.6 * img_np + 0.4 * heatmap)
134
  return np.clip(overlay * 255, 0, 255).astype(np.uint8)
135
+
136
  finally:
137
+ h1.remove()
138
+ h2.remove()
139
+ img_branch.zero_grad()
140
 
141
 
142
+ # ==================== FastAPI Server ==========================
143
  app = FastAPI()
144
  app.mount("/static", StaticFiles(directory="static"), name="static")
145
  templates = Jinja2Templates(directory="templates")
146
  os.makedirs("uploads", exist_ok=True)
147
 
148
+ # Cache system
149
  EXPIRATION_MINUTES = 10
150
  results_cache = {}
151
  cache_lock = threading.Lock()
152
 
153
  def cleanup_expired_cache():
 
 
 
154
  while True:
155
+ with cache_lock:
 
156
  expired_keys = []
157
  current_time = time.time()
158
  for key, value in results_cache.items():
159
  if current_time - value["created_at"] > EXPIRATION_MINUTES * 60:
160
  expired_keys.append(key)
161
+
 
162
  for key in expired_keys:
163
  del results_cache[key]
164
  print(f"🧹 Cache expired and removed for key: {key}")
165
+
166
+ time.sleep(60)
167
 
168
  @app.on_event("startup")
169
  async def startup_event():
 
 
 
170
  cleanup_thread = threading.Thread(target=cleanup_expired_cache, daemon=True)
171
  cleanup_thread.start()
172
+ print("🗑️ Cache cleanup task started.")
173
 
174
  SYMPTOM_MAP = {
175
  "noSymptoms": "ไม่มีอาการ", "drinkAlcohol": "ดื่มเหล้า", "smoking": "สูบบุหรี่",
176
  "chewBetelNut": "เคี้ยวหมาก", "eatSpicyFood": "กินเผ็ดแสบ", "wipeOff": "เช็ดออกได้",
177
  "alwaysHurts": "เจ็บเมื่อโดนแผล"
178
  }
179
+
180
  def process_with_ai_model(image_path: str, prompt_text: str):
181
  try:
182
  image_pil = Image.open(image_path)
183
  image_pil = ImageOps.exif_transpose(image_pil)
184
  image_pil = image_pil.convert("RGB")
185
+
186
  image_tensor = transform(image_pil).unsqueeze(0).to(device)
187
  enc = tokenizer(prompt_text, return_tensors="pt", padding="max_length",
188
  truncation=True, max_length=128)
189
+
190
  ids, mask = enc["input_ids"].to(device), enc["attention_mask"].to(device)
191
+
192
  with torch.no_grad():
193
  fused_logits, _, _ = fusion_model(image_tensor, ids, mask)
194
  probs_fused = torch.softmax(fused_logits, dim=1)[0].cpu().numpy()
195
+
196
  pred_idx = int(np.argmax(probs_fused))
197
  pred_label = class_names[pred_idx]
198
  confidence = float(probs_fused[pred_idx]) * 100
199
+
200
  gradcam_overlay_np = compute_gradcam_overlay(image_pil, image_tensor, pred_idx)
201
+
202
  def image_to_base64(img):
203
  buffered = BytesIO()
204
  img.save(buffered, format="JPEG")
205
  return base64.b64encode(buffered.getvalue()).decode('utf-8')
206
+
207
  original_b64 = image_to_base64(image_pil)
208
+
209
  if gradcam_overlay_np is not None:
210
  gradcam_pil = Image.fromarray(gradcam_overlay_np)
211
  gradcam_b64 = image_to_base64(gradcam_pil)
212
  else:
213
  gradcam_b64 = original_b64
214
+
215
  return original_b64, gradcam_b64, pred_label, f"{confidence:.2f}"
216
+
217
  except Exception as e:
218
  print(f"❌ Error during AI processing: {e}")
219
  return None, None, "Error", "0.00"
220
 
221
+
222
  @app.get("/", response_class=RedirectResponse)
223
  async def root():
224
  return RedirectResponse(url="/detect")
225
+
226
  @app.get("/detect", response_class=HTMLResponse)
227
  async def show_upload_form(request: Request):
228
  return templates.TemplateResponse("detect.html", {"request": request})
 
235
  symptom_text: str = Form("")
236
  ):
237
  temp_filepath = os.path.join("uploads", f"{uuid.uuid4()}_{file.filename}")
238
+
239
  with open(temp_filepath, "wb") as buffer:
240
  shutil.copyfileobj(file.file, buffer)
241
+
242
  selected_symptoms_thai = {SYMPTOM_MAP.get(cb) for cb in checkboxes if SYMPTOM_MAP.get(cb)}
243
+
244
+ final_prompt_parts = []
245
+
246
  if "ไม่มีอาการ" in selected_symptoms_thai:
247
  symptoms_group = {"เจ็บเมื่อโดนแผล", "กินเผ็ดแสบ"}
248
  lifestyles_group = {"ดื่ม��หล้า", "สูบบุหรี่", "เคี้ยวหมาก"}
249
  patterns_group = {"เช็ดออกได้"}
250
  special_group = {"ไม่มีอาการ"}
251
+
252
  final_selected = (selected_symptoms_thai - symptoms_group) | \
253
  (selected_symptoms_thai & (lifestyles_group | patterns_group | special_group))
254
+
255
  final_prompt_parts.append(" ".join(sorted(list(final_selected))))
256
  elif selected_symptoms_thai:
257
  final_prompt_parts.append(" ".join(sorted(list(selected_symptoms_thai))))
258
+
259
  if symptom_text and symptom_text.strip():
260
  final_prompt_parts.append(symptom_text.strip())
261
+
262
  final_prompt = "; ".join(final_prompt_parts) if final_prompt_parts else "ไม่มีอาการ"
263
+
264
  image_b64, gradcam_b64, name_out, eva_output = process_with_ai_model(
265
  image_path=temp_filepath, prompt_text=final_prompt
266
  )
267
+
268
  os.remove(temp_filepath)
269
+
270
  result_id = str(uuid.uuid4())
271
  result_data = {
272
+ "image_b64_data": image_b64,
273
+ "gradcam_b64_data": gradcam_b64,
274
+ "name_out": name_out,
275
+ "eva_output": eva_output,
276
  }
277
+
278
  with cache_lock:
279
  results_cache[result_id] = {
280
  "data": result_data,
281
+ "created_at": time.time()
282
  }
283
 
284
  results_url = request.url_for('show_results', result_id=result_id)
285
  return RedirectResponse(url=results_url, status_code=303)
286
 
287
+
288
  @app.get("/results/{result_id}", response_class=HTMLResponse)
289
  async def show_results(request: Request, result_id: str):
290
  with cache_lock:
291
  cached_item = results_cache.get(result_id)
292
+
293
  if not cached_item or (time.time() - cached_item["created_at"] > EXPIRATION_MINUTES * 60):
294
  if cached_item:
295
  with cache_lock:
 
299
  context = {"request": request, **cached_item["data"]}
300
  return templates.TemplateResponse("detect.html", context)
301
 
302
+
303
+ # =============== รองรับ Render / Railway / VPS ================
304
  if __name__ == "__main__":
305
+ port = int(os.environ.get("PORT", 8000))
306
  import uvicorn
307
+ uvicorn.run(app, host="0.0.0.0", port=port)
models/densenet/best_fusion_densenet.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a2431a8b7f458d21df66690c625f631e2263f2b433bea3e4401a13e835a63d62
3
- size 451379781