SakshiRathi77's picture
added app.py
c968ae3 verified
raw
history blame
2.79 kB
import gradio as gr
import cv2
from huggingface_hub import hf_hub_download
import yolov9
def detect_objects_on_video(video_path, model_path, interval, image_size, conf_threshold, iou_threshold):
cap = cv2.VideoCapture(video_path)
count = 0
detections = []
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
count += 1
if count % interval == 0:
# Perform object detection on the frame
model = yolov9.load(model_path)
model.conf = conf_threshold
model.iou = iou_threshold
results = model(frame, size=image_size)
# Optionally, show detection bounding boxes on image
output = results.render()
detections.append(output[0])
cap.release()
cv2.destroyAllWindows()
return detections
def app():
with gr.Blocks():
with gr.Row():
with gr.Column():
video_path = gr.Video(type="file", label="Video")
model_path = gr.Dropdown(
label="Model",
choices=[
"best.pt",
],
value="./best.pt",
)
interval = gr.Number(label="Screenshot Interval (seconds)", default=30, step=1)
image_size = gr.Slider(
label="Image Size",
minimum=320,
maximum=1280,
step=32,
value=640,
)
conf_threshold = gr.Slider(
label="Confidence Threshold",
minimum=0.1,
maximum=1.0,
step=0.1,
value=0.4,
)
iou_threshold = gr.Slider(
label="IoU Threshold",
minimum=0.1,
maximum=1.0,
step=0.1,
value=0.5,
)
yolov9_infer = gr.Button(value="Detect Objects")
with gr.Column():
output_images = gr.Image(type="numpy", label="Output Images")
yolov9_infer.click(
fn=detect_objects_on_video,
inputs=[
video_path,
model_path,
interval,
image_size,
conf_threshold,
iou_threshold,
],
outputs=[output_images],
)
gradio_app = gr.Blocks()
with gradio_app:
gr.HTML(
"""
<h1 style='text-align: center'>
YOLOv9: Detect Objects in Video
</h1>
"""
)
with gr.Row():
with gr.Column():
app()
gradio_app.launch(debug=True)