ddingdol commited on
Commit
a8ccad8
·
verified ·
1 Parent(s): 6533c2b

Upload 10 files

Browse files
Files changed (10) hide show
  1. .gitattributes +40 -35
  2. README.md +12 -12
  3. app.py +98 -0
  4. bicycle.jpg +3 -0
  5. buildings.jpg +3 -0
  6. chicago.jpg +3 -0
  7. labels.txt +19 -0
  8. requirements.txt +6 -0
  9. spain.jpg +3 -0
  10. street.jpg +3 -0
.gitattributes CHANGED
@@ -1,35 +1,40 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ bicycle.jpg filter=lfs diff=lfs merge=lfs -text
37
+ buildings.jpg filter=lfs diff=lfs merge=lfs -text
38
+ chicago.jpg filter=lfs diff=lfs merge=lfs -text
39
+ spain.jpg filter=lfs diff=lfs merge=lfs -text
40
+ street.jpg filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,12 +1,12 @@
1
- ---
2
- title: GradioAssignmentTask3
3
- emoji: 🏃
4
- colorFrom: purple
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 5.49.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ ---
2
+ title: GradioAssignmentTask3
3
+ emoji: 🏃
4
+ colorFrom: purple
5
+ colorTo: pink
6
+ sdk: gradio
7
+ sdk_version: 5.49.1
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from matplotlib import gridspec
3
+ import matplotlib.pyplot as plt
4
+ import numpy as np
5
+ from PIL import Image
6
+ import torch
7
+ from transformers import AutoImageProcessor, AutoModelForSemanticSegmentation
8
+
9
+ MODEL_ID = "nvidia/segformer-b4-finetuned-cityscapes-1024-1024"
10
+ processor = AutoImageProcessor.from_pretrained(MODEL_ID)
11
+ model = AutoModelForSemanticSegmentation.from_pretrained(MODEL_ID)
12
+
13
+ def ade_palette():
14
+ """ADE20K palette that maps each class to RGB values."""
15
+ return [
16
+ [255, 0, 0], [0, 128, 0], [0, 0, 255], [255, 255, 0],
17
+ [255, 165, 0], [128, 0, 128], [0, 255, 255], [255, 0, 255],
18
+ [255, 192, 203], [165, 42, 42], [0, 128, 128], [50, 205, 50],
19
+ [0, 0, 128], [128, 128, 0], [128, 0, 0], [135, 206, 235],
20
+ [128, 128, 128], [245, 245, 220], [75, 0, 130]
21
+ ]
22
+
23
+ labels_list = []
24
+ with open("labels.txt", "r", encoding="utf-8") as fp:
25
+ for line in fp:
26
+ labels_list.append(line.rstrip("\n"))
27
+
28
+ colormap = np.asarray(ade_palette(), dtype=np.uint8)
29
+
30
+ def label_to_color_image(label):
31
+ if label.ndim != 2:
32
+ raise ValueError("Expect 2-D input label")
33
+ if np.max(label) >= len(colormap):
34
+ raise ValueError("label value too large.")
35
+ return colormap[label]
36
+
37
+ def draw_plot(pred_img, seg_np):
38
+ fig = plt.figure(figsize=(20, 15))
39
+ grid_spec = gridspec.GridSpec(1, 2, width_ratios=[6, 1])
40
+
41
+ plt.subplot(grid_spec[0])
42
+ plt.imshow(pred_img)
43
+ plt.axis('off')
44
+
45
+ LABEL_NAMES = np.asarray(labels_list)
46
+ FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)
47
+ FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)
48
+
49
+ unique_labels = np.unique(seg_np.astype("uint8"))
50
+ ax = plt.subplot(grid_spec[1])
51
+ plt.imshow(FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation="nearest")
52
+ ax.yaxis.tick_right()
53
+ plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])
54
+ plt.xticks([], [])
55
+ ax.tick_params(width=0.0, labelsize=25)
56
+ return fig
57
+
58
+ def run_inference(input_img):
59
+ # input: numpy array from gradio -> PIL
60
+ img = Image.fromarray(input_img.astype(np.uint8)) if isinstance(input_img, np.ndarray) else input_img
61
+ if img.mode != "RGB":
62
+ img = img.convert("RGB")
63
+
64
+ inputs = processor(images=img, return_tensors="pt")
65
+ with torch.no_grad():
66
+ outputs = model(**inputs)
67
+ logits = outputs.logits # (1, C, h/4, w/4)
68
+
69
+ # resize to original
70
+ upsampled = torch.nn.functional.interpolate(
71
+ logits, size=img.size[::-1], mode="bilinear", align_corners=False
72
+ )
73
+ seg = upsampled.argmax(dim=1)[0].cpu().numpy().astype(np.uint8) # (H,W)
74
+
75
+ # colorize & overlay
76
+ color_seg = colormap[seg] # (H,W,3)
77
+ pred_img = (np.array(img) * 0.5 + color_seg * 0.5).astype(np.uint8)
78
+
79
+ fig = draw_plot(pred_img, seg)
80
+ return fig
81
+
82
+ demo = gr.Interface(
83
+ fn=run_inference,
84
+ inputs=gr.Image(type="numpy", label="Input Image"),
85
+ outputs=gr.Plot(label="Overlay + Legend"),
86
+ examples=[
87
+ "bicycle.jpg",
88
+ "buildings.jpg",
89
+ "chicago.jpg",
90
+ "spain.jpg",
91
+ "street.jpg"
92
+ ],
93
+ flagging_mode="never",
94
+ cache_examples=False,
95
+ )
96
+
97
+ if __name__ == "__main__":
98
+ demo.launch()
bicycle.jpg ADDED

Git LFS Details

  • SHA256: 19838e83da1d48b9420f00894a5101fa58295d421c9e9c2ac7c13c0be571a4e8
  • Pointer size: 131 Bytes
  • Size of remote file: 306 kB
buildings.jpg ADDED

Git LFS Details

  • SHA256: 34fd076888ef635366c8e936e9ae7a0b570d13d89802eaf4214491b7b29b8499
  • Pointer size: 131 Bytes
  • Size of remote file: 396 kB
chicago.jpg ADDED

Git LFS Details

  • SHA256: c655224875629262391208e459b47e70949e3b93710cf8a23bd9bd9835509d8b
  • Pointer size: 131 Bytes
  • Size of remote file: 446 kB
labels.txt ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ road
2
+ sidewalk
3
+ building
4
+ wall
5
+ fence
6
+ pole
7
+ traffic light
8
+ traffic sign
9
+ vegetation
10
+ terrain
11
+ sky
12
+ person
13
+ rider
14
+ car
15
+ truck
16
+ bus
17
+ train
18
+ motorcycle
19
+ bicycle
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ torch
2
+ transformers>=4.41.0
3
+ gradio>=4.0.0
4
+ Pillow
5
+ numpy
6
+ matplotlib
spain.jpg ADDED

Git LFS Details

  • SHA256: 6446870e0027f7edda2f8320e8f371b24e9cb0a061ad48b16b9afec802af8a13
  • Pointer size: 131 Bytes
  • Size of remote file: 351 kB
street.jpg ADDED

Git LFS Details

  • SHA256: c222bb4458db44349a2823d2933d072ff365fe703aaa1e4dbfeec80ea1c24d8b
  • Pointer size: 131 Bytes
  • Size of remote file: 436 kB