Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,111 +1,93 @@
|
|
| 1 |
-
import cv2
|
| 2 |
-
import torch
|
| 3 |
-
from PIL import Image, ImageDraw
|
| 4 |
-
import gradio as gr
|
| 5 |
-
import pandas as pd
|
| 6 |
-
from transformers import pipeline
|
| 7 |
-
|
| 8 |
-
# تحميل نموذج YOLOv5 من مكتبة ultralytics
|
| 9 |
-
model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
|
| 10 |
|
| 11 |
-
|
| 12 |
translator = pipeline("translation_en_to_ar", model="Helsinki-NLP/opus-mt-en-ar")
|
| 13 |
|
| 14 |
-
# دالة لاكتشاف الكائنات ورسم الإطارات في الصور
|
| 15 |
def detect_and_draw_image(input_image):
|
| 16 |
-
results = model(input_image)
|
| 17 |
-
detections = results.xyxy[0].numpy()
|
| 18 |
-
|
| 19 |
-
draw = ImageDraw.Draw(input_image)
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
label =
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
draw.
|
| 28 |
-
draw.text((xmin, ymin), f"{label}: {conf:.2f}", fill="white") # كتابة التسمية والنسبة
|
| 29 |
|
| 30 |
-
# ترجمة التسميات إلى العربية
|
| 31 |
translated_labels = translator(list(counts.keys()))
|
| 32 |
-
|
| 33 |
-
# إنشاء DataFrame لتخزين النتائج
|
| 34 |
df = pd.DataFrame({
|
| 35 |
'Label (English)': list(counts.keys()),
|
| 36 |
'Label (Arabic)': [t['translation_text'] for t in translated_labels],
|
| 37 |
'Object Count': list(counts.values())
|
| 38 |
})
|
| 39 |
|
| 40 |
-
return input_image, df
|
| 41 |
|
| 42 |
-
# دالة لاكتشاف الكائنات ورسم الإطارات في الفيديو
|
| 43 |
def detect_and_draw_video(video_path):
|
| 44 |
-
cap = cv2.VideoCapture(video_path)
|
| 45 |
-
frames = []
|
| 46 |
-
overall_counts = {}
|
| 47 |
|
| 48 |
-
while cap.isOpened():
|
| 49 |
-
ret, frame = cap.read()
|
| 50 |
-
if not ret:
|
| 51 |
break
|
| 52 |
|
| 53 |
-
frame = cv2.resize(frame, (640, 480))
|
| 54 |
-
results = model(frame)
|
| 55 |
-
detections = results.xyxy[0].numpy()
|
| 56 |
|
| 57 |
-
for detection in detections:
|
| 58 |
xmin, ymin, xmax, ymax, conf, class_id = detection
|
| 59 |
-
label = model.names[int(class_id)]
|
| 60 |
-
overall_counts[label] = overall_counts.get(label, 0) + 1
|
| 61 |
-
|
| 62 |
-
# رسم الإطار والنص على الفيديو
|
| 63 |
cv2.rectangle(frame, (int(xmin), int(ymin)), (int(xmax), int(ymax)), (255, 0, 0), 2)
|
| 64 |
cv2.putText(frame, f"{label}: {conf:.2f}", (int(xmin), int(ymin) - 10),
|
| 65 |
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
|
| 66 |
|
| 67 |
-
frames.append(frame)
|
| 68 |
-
|
| 69 |
-
cap.release() # إغلاق الفيديو
|
| 70 |
|
| 71 |
-
|
| 72 |
-
|
|
|
|
| 73 |
|
| 74 |
-
for frame in frames:
|
| 75 |
out.write(frame)
|
| 76 |
-
out.release()
|
| 77 |
|
| 78 |
-
# ترجمة التسميات إلى العربية
|
| 79 |
translated_labels = translator(list(overall_counts.keys()))
|
| 80 |
-
|
| 81 |
-
# إنشاء DataFrame لتخزين النتائج
|
| 82 |
df = pd.DataFrame({
|
| 83 |
'Label (English)': list(overall_counts.keys()),
|
| 84 |
'Label (Arabic)': [t['translation_text'] for t in translated_labels],
|
| 85 |
'Object Count': list(overall_counts.values())
|
| 86 |
})
|
| 87 |
|
| 88 |
-
return output_path, df
|
| 89 |
|
| 90 |
-
# إنشاء واجهات لتطبيق Gradio
|
| 91 |
image_interface = gr.Interface(
|
| 92 |
-
fn=detect_and_draw_image,
|
| 93 |
-
inputs=gr.Image(type="pil", label="Upload Image"),
|
| 94 |
-
outputs=[gr.Image(type="pil"), gr.Dataframe(label="Object Counts")],
|
| 95 |
-
title="Object Detection for Images",
|
| 96 |
-
description="Upload an image to see the objects detected
|
| 97 |
)
|
| 98 |
|
| 99 |
video_interface = gr.Interface(
|
| 100 |
-
fn=detect_and_draw_video,
|
| 101 |
-
inputs=gr.Video(label="Upload Video"),
|
| 102 |
-
outputs=[gr.Video(label="Processed Video"), gr.Dataframe(label="Object Counts")],
|
| 103 |
-
title="Object Detection for Videos",
|
| 104 |
-
description="Upload a video to see the objects detected
|
| 105 |
)
|
| 106 |
|
| 107 |
-
# دمج الواجهات في تطبيق واحد
|
| 108 |
app = gr.TabbedInterface([image_interface, video_interface], ["Image Detection", "Video Detection"])
|
| 109 |
-
|
| 110 |
-
# تشغيل التطبيق
|
| 111 |
-
app.launch(debug=True) # إطلاق التطبيق مع تمكين وضع التصحيح
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import torch
|
| 3 |
+
from PIL import Image, ImageDraw
|
| 4 |
+
import gradio as gr
|
| 5 |
+
import pandas as pd
|
| 6 |
+
from transformers import pipeline
|
|
|
|
|
|
|
|
|
|
| 7 |
|
| 8 |
+
model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
|
| 9 |
translator = pipeline("translation_en_to_ar", model="Helsinki-NLP/opus-mt-en-ar")
|
| 10 |
|
|
|
|
| 11 |
def detect_and_draw_image(input_image):
|
| 12 |
+
results = model(input_image)
|
| 13 |
+
detections = results.xyxy[0].numpy()
|
| 14 |
+
|
| 15 |
+
draw = ImageDraw.Draw(input_image)
|
| 16 |
+
counts = {}
|
| 17 |
+
for detection in detections:
|
| 18 |
+
xmin, ymin, xmax, ymax, conf, class_id = detection
|
| 19 |
+
label = model.names[int(class_id)]
|
| 20 |
+
counts[label] = counts.get(label, 0) + 1
|
| 21 |
+
|
| 22 |
+
draw.rectangle([(xmin, ymin), (xmax, ymax)], outline="red", width=2)
|
| 23 |
+
draw.text((xmin, ymin), f"{label}: {conf:.2f}", fill="white")
|
|
|
|
| 24 |
|
|
|
|
| 25 |
translated_labels = translator(list(counts.keys()))
|
|
|
|
|
|
|
| 26 |
df = pd.DataFrame({
|
| 27 |
'Label (English)': list(counts.keys()),
|
| 28 |
'Label (Arabic)': [t['translation_text'] for t in translated_labels],
|
| 29 |
'Object Count': list(counts.values())
|
| 30 |
})
|
| 31 |
|
| 32 |
+
return input_image, df
|
| 33 |
|
|
|
|
| 34 |
def detect_and_draw_video(video_path):
|
| 35 |
+
cap = cv2.VideoCapture(video_path)
|
| 36 |
+
frames = []
|
| 37 |
+
overall_counts = {}
|
| 38 |
|
| 39 |
+
while cap.isOpened():
|
| 40 |
+
ret, frame = cap.read()
|
| 41 |
+
if not ret:
|
| 42 |
break
|
| 43 |
|
| 44 |
+
frame = cv2.resize(frame, (640, 480))
|
| 45 |
+
results = model(frame)
|
| 46 |
+
detections = results.xyxy[0].numpy()
|
| 47 |
|
| 48 |
+
for detection in detections:
|
| 49 |
xmin, ymin, xmax, ymax, conf, class_id = detection
|
| 50 |
+
label = model.names[int(class_id)]
|
| 51 |
+
overall_counts[label] = overall_counts.get(label, 0) + 1
|
| 52 |
+
|
|
|
|
| 53 |
cv2.rectangle(frame, (int(xmin), int(ymin)), (int(xmax), int(ymax)), (255, 0, 0), 2)
|
| 54 |
cv2.putText(frame, f"{label}: {conf:.2f}", (int(xmin), int(ymin) - 10),
|
| 55 |
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
|
| 56 |
|
| 57 |
+
frames.append(frame)
|
|
|
|
|
|
|
| 58 |
|
| 59 |
+
cap.release()
|
| 60 |
+
output_path = 'output.mp4'
|
| 61 |
+
out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), 20.0, (640, 480))
|
| 62 |
|
| 63 |
+
for frame in frames:
|
| 64 |
out.write(frame)
|
| 65 |
+
out.release()
|
| 66 |
|
|
|
|
| 67 |
translated_labels = translator(list(overall_counts.keys()))
|
|
|
|
|
|
|
| 68 |
df = pd.DataFrame({
|
| 69 |
'Label (English)': list(overall_counts.keys()),
|
| 70 |
'Label (Arabic)': [t['translation_text'] for t in translated_labels],
|
| 71 |
'Object Count': list(overall_counts.values())
|
| 72 |
})
|
| 73 |
|
| 74 |
+
return output_path, df
|
| 75 |
|
|
|
|
| 76 |
image_interface = gr.Interface(
|
| 77 |
+
fn=detect_and_draw_image,
|
| 78 |
+
inputs=gr.Image(type="pil", label="Upload Image"),
|
| 79 |
+
outputs=[gr.Image(type="pil"), gr.Dataframe(label="Object Counts")],
|
| 80 |
+
title="Object Detection for Images",
|
| 81 |
+
description="Upload an image to see the objects detected and their counts."
|
| 82 |
)
|
| 83 |
|
| 84 |
video_interface = gr.Interface(
|
| 85 |
+
fn=detect_and_draw_video,
|
| 86 |
+
inputs=gr.Video(label="Upload Video"),
|
| 87 |
+
outputs=[gr.Video(label="Processed Video"), gr.Dataframe(label="Object Counts")],
|
| 88 |
+
title="Object Detection for Videos",
|
| 89 |
+
description="Upload a video to see the objects detected and their counts."
|
| 90 |
)
|
| 91 |
|
|
|
|
| 92 |
app = gr.TabbedInterface([image_interface, video_interface], ["Image Detection", "Video Detection"])
|
| 93 |
+
app.launch(debug=True)
|
|
|
|
|
|