Spaces:
Running
Running
File size: 3,199 Bytes
3dd1121 28a8385 3dd1121 5550224 3dd1121 30c68a8 3dd1121 804963d 6ae7ae0 804963d 3dd1121 5550224 3dd1121 28a8385 3dd1121 28a8385 3dd1121 5550224 28a8385 331e0bb 28a8385 331e0bb 28a8385 7b4e726 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 |
import gradio as gr
from typing import Any, Dict
from urllib.request import urlopen, Request
from io import BytesIO
from pathlib import Path
from PIL import Image
from functools import lru_cache
from moderators import AutoModerator
_MODEL_CACHE: Dict[str, Any] = {}
EXAMPLE_ITEMS = [
("https://assets.clevelandclinic.org/transform/LargeFeatureImage/cd71f4bd-81d4-45d8-a450-74df78e4477a/Apples-184940975-770x533-1_jpg", "viddexa/nsfw-detection-2-mini"),
("https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSbRwt56NYsiHwrT8oS-igzgeEzp7p3Jbe2dw&s", "viddexa/nsfw-detection-2-mini"),
("https://img.freepik.com/premium-photo/portrait-beautiful-young-woman_1048944-5548042.jpg", "viddexa/nsfw-detection-2-mini"),
]
CUSTOM_CSS = """
.header {
text-align: center;
padding: 2rem 1rem;
background: linear-gradient(135deg, #14b8a6 0%, #0d9488 100%);
color: white;
border-radius: 8px;
margin-bottom: 2rem;
}
.header h1 { margin: 0; font-size: 2.5em; }
.header p { margin: 0.5rem 0; opacity: 0.9; }
.header a { color: white; text-decoration: underline; }
/* Hide built-in Gradio footer */
footer {
display: none !important;
}
/* Custom footer styling */
.custom-footer {
text-align: center;
padding: 1rem;
color: #888;
margin-top: 2rem;
}
"""
EXAMPLES_DIR = Path("examples")
EXAMPLES_DIR.mkdir(exist_ok=True)
@lru_cache(maxsize=32)
def download_image(url: str) -> Image.Image:
"""Download and return PIL Image from URL."""
req = Request(url, headers={"User-Agent": "viddexa-gradio-demo/1.0"})
with urlopen(req, timeout=20) as resp:
return Image.open(BytesIO(resp.read())).convert("RGB")
def prepare_examples():
"""Download and prepare example images for display."""
examples = []
for i, (url, model) in enumerate(EXAMPLE_ITEMS):
img_path = EXAMPLES_DIR / f"example_{i}.jpg"
if not img_path.exists():
try:
img = download_image(url)
img.save(img_path, "JPEG", quality=95)
except Exception as e:
print(f"Warning: Could not download example {i}: {e}")
continue
examples.append([str(img_path), model])
return examples
def load_model(model_id: str, token: str | None = None) -> Any:
"""Load model with caching."""
if model_id not in _MODEL_CACHE:
_MODEL_CACHE[model_id] = AutoModerator.from_pretrained(model_id, token=token, use_fast=True)
return _MODEL_CACHE[model_id]
def analyze(image_path: str | None, image_url: str | None, model_id: str, token: str | None = None):
"""Run inference and return classification scores."""
if not image_path and not image_url:
raise gr.Error("Provide an image or URL")
img = Image.open(image_path).convert("RGB") if image_path else download_image(image_url)
model = load_model(model_id, token)
results = model(img)
classifications = results[0].classifications if hasattr(results[0], "classifications") else results[0]["classifications"]
return {str(k): float(v) for k, v in (classifications.items() if isinstance(classifications, dict) else [(c["label"], c["score"]) for c in classifications])} |