Paul Gavrikov
updating display format
7a5e704
raw
history blame
10.5 kB
import gradio as gr
import json, os, time, uuid
import pandas as pd
from rate_limiter import RateLimiter, RateLimitConfig
from config import *
from judge import build_question_index_from_json, judge
import pprint
SUBMISSION_TEXT = """Upload a json file with your predictions and click Submit. Your predictions should be a list of dictionaries, each containing an \"question_id\" field and a \"response\" field. For multiple choice questions, the \"response\" field should contain the predicted answer choice. For open-ended questions, the \"response\" field should contain the option letter (A-D). We will apply simple heuristics to clean the responses, but please ensure they are as accurate as possible.
Example:
```
[
{\"question_id\": \"28deb79e\", \"response\": \"A\"},
{\"question_id\": \"73cbabd7\", \"response\": \"C\"},
...
]
```
Your file:"""
INTRO_TEXT = """# Welcome to the VisualOverload Leaderboard!
Below you will find the public leaderboard for the [VisualOverload benchmark](https://huggingface.co/datasets/paulgavrikov/visualoverload), which evaluates models on their ability to understand and reason about complex visual scenes. We seperate by models and 'special' inference techniques (e.g., special prompts, ICL, CoT etc.) to better understand the source of their performance.
The leaderboard ranks models based on their overall accuracy across a six tasks (activity recognition, attribute recognition, counting, OCR, reasoning, and global scene recognition). We provide an aggregate score (Total) as well as individual scores on three distinct splits per difficulty (Easy, Medium, Hard), and each task."""
INTRO_DETAILS = "Please see the evaluation tab for evaluation and details on how to list your results."
def load_ground_truth():
from cryptography.fernet import Fernet
key = os.getenv("SECRET_KEY")
cipher = Fernet(key)
with open("ground_truth.secret", "rb") as f:
json_data = json.loads(cipher.decrypt(f.read().decode()))
hardness_levels = json_data["splits"]
df_gt = pd.DataFrame.from_dict(json_data["benchmark"])
df_gt.question_id = df_gt.question_id.astype(str)
df_gt = df_gt.set_index("question_id")
for level, ids in hardness_levels.items():
ids = [str(i) for i in ids]
df_gt.loc[ids, "difficulty"] = level
return df_gt.reset_index()
def login_check(profile: gr.OAuthProfile | None):
visible = profile is not None
welcome = (
f"## Welcome to the evaluation server @{profile.username} πŸ‘‹"
if visible
else "πŸ”’ Please sign in to submit."
)
quota_details = quoata_check(profile)
return (
welcome,
gr.Markdown(visible=visible),
gr.Markdown(value=quota_details, visible=visible),
gr.Textbox(visible=visible & SAVE_SUBMISSIONS),
gr.File(visible=visible),
# gr.Checkbox(visible=visible),
gr.Checkbox(visible=visible & SAVE_SUBMISSIONS),
gr.Button(visible=visible),
)
def quoata_check(profile: gr.OAuthProfile | None):
quota_details = None
if profile and (
MAX_SUBMISSIONS_PER_USER_PER_DAY > 0 or MAX_TOTAL_SUBMISSIONS_PER_USER > 0
):
quota = limiter.get_status(profile.username)
quota_details = (
f"### Remaining quota \n"
+ (
f"**Daily Used:** {quota['daily_used']} / {MAX_SUBMISSIONS_PER_USER_PER_DAY} \n"
if MAX_SUBMISSIONS_PER_USER_PER_DAY
else ""
)
+ (
f"**Total Used:** {quota['total_used']} / {MAX_TOTAL_SUBMISSIONS_PER_USER} \n"
if MAX_TOTAL_SUBMISSIONS_PER_USER
else ""
)
)
return quota_details
def submit(
submission_id: str,
submission_file: str,
# is_cleaning: bool,
is_private: bool,
profile: gr.OAuthProfile | None,
):
if not profile:
raise gr.Error("πŸ”’ Please sign in first.")
if not submission_file:
raise gr.Error("❌ Please upload a submission file.")
username = profile.username
now = time.time()
with open(submission_file, "rb") as file:
try:
prediction_json = json.load(file)
except json.JSONDecodeError:
raise gr.Error("❌ Submission file is invalid JSON.")
try:
# if is_cleaning:
# prediction_json = clean_submission(prediction_json)
# score_dict = evaluate_submission(prediction_json, ground_truth_json)
_, score_dict = judge(prediction_json, question_index)
score_dict = {k: f"{float(round(v * 100, 1))} %" for k, v in score_dict.items() if k.startswith("accuracy/")}
except Exception as e:
print(e)
raise gr.Error(f"❌ Invalid submission format. Check logs for details.")
allowed, allowed_reason = limiter.is_allowed(username)
status = limiter.get_status(username)
if not allowed:
if allowed_reason == "min_interval_seconds":
raise gr.Error(
f"❌ You must wait at least {RATE_LIMIT_MIN_INT_SEC} seconds between submissions."
)
elif allowed_reason == "max_per_day":
raise gr.Error(
f"❌ You have reached your daily submission limit of {MAX_SUBMISSIONS_PER_USER_PER_DAY}."
)
elif allowed_reason == "max_total":
raise gr.Error(
f"❌ You have reached your total submission limit of {MAX_TOTAL_SUBMISSIONS_PER_USER}."
)
if not is_private and SAVE_SUBMISSIONS:
sid = str(uuid.uuid4())
submission_record = f"{sid}"
# TODO: it is probably a good idea to sanitize the prediction_json here, e.g. remove any user-provided fields to avoid memory attacks.
data = {
"username": username,
"identifier": submission_id,
"timestamp": now,
"scores": score_dict,
}
if SAVE_SUBMISSION_PREDICTIONS:
data["predictions"] = prediction_json
json.dump(data, open(os.path.join(SUB_DIR, submission_record + ".json"), "w"))
metric_order = sorted(list(set(score_dict.keys()) - set(["accuracy/easy", "accuracy/medium", "accuracy/hard", "accuracy/total"])))\
+ ["accuracy/easy", "accuracy/medium", "accuracy/hard", "accuracy/total"]
df_metrics = pd.DataFrame([score_dict])
df_metrics = df_metrics[metric_order]
df_metrics.columns = [col.replace("accuracy/", "") for col in df_metrics.columns]
formatted_scores = df_metrics.to_markdown(index=False)
# pprint.pformat(score_dict, indent=4, sort_dicts=False)
score_response = f"""
Your submission has been evaluated!
```
{formatted_scores}
```
If you want your submission to appear on the public leaderboard, please follow the instructions to open a ticket at [https://github.com/paulgavrikov/visualoverload/issues](https://github.com/paulgavrikov/visualoverload/issues).
"""
return gr.Text(score_response, visible=True)
def get_leaderboard() -> pd.DataFrame | str:
df = pd.read_csv("leaderboard.csv").set_index(["Model", "Special Inference"])
df = df.sort_values(by="Total", ascending=False)
df = df.reset_index()
float_cols = df.select_dtypes(include=["float"]).columns
styler = df.style.format('{:.1f}', subset=float_cols)
return styler
def get_quota(profile: gr.OAuthProfile | None = None):
return limiter.get_status(profile.username)
with gr.Blocks() as app:
if SHOW_LEADERBOARD:
with gr.Tab("πŸ† Public Leaderboard"):
leaderboard_heading_md = gr.Markdown(
INTRO_TEXT
)
leaderboard_table = gr.Dataframe(get_leaderboard())
leaderboard_footer_md = gr.Markdown(
INTRO_DETAILS
)
if SHOW_EVAL_SERVER:
with gr.Tab("πŸš€ Evaluation"):
login_button = gr.LoginButton()
welcome_md = gr.Markdown("πŸ”’ Please sign in to submit.")
welcome_details_md = gr.Markdown(
SUBMISSION_TEXT,
visible=False,
)
submission_file = gr.File(
label="Prediction (.json)", visible=False, file_types=[".json"]
)
submission_id = gr.Textbox(
label="(Optional) Submission identifier", visible=False
)
# clean_flag = gr.Checkbox(
# label="Attempt to clean my submission (Recommended for raw responses)",
# value=True,
# visible=False,
# )
private_flag = gr.Checkbox(
label="Do not save my submission", value=False, visible=False
)
quota_details = gr.Markdown(visible=False)
submit_btn = gr.Button("Submit", visible=False)
result = gr.Markdown(label="βœ… Submission processed", visible=False)
# Load login state β†’ show/hide components
app.load(
fn=login_check,
inputs=[],
outputs=[
welcome_md,
welcome_details_md,
quota_details,
submission_id,
submission_file,
# clean_flag,
private_flag,
submit_btn,
],
)
futures = submit_btn.click(
fn=submit,
inputs=[submission_id, submission_file,
# clean_flag,
private_flag],
outputs=[result],
).then(quoata_check, outputs=[quota_details])
if SHOW_LEADERBOARD:
futures.then(
get_leaderboard,
outputs=[leaderboard_table],
)
# copyright = gr.Markdown(
# "Based on the [gradio-eval-server-template](https://github.com/paulgavrikov/gradio-eval-server-template) by Paul Gavrikov."
# )
if __name__ == "__main__":
config = RateLimitConfig(
max_per_day=MAX_SUBMISSIONS_PER_USER_PER_DAY,
max_total=MAX_TOTAL_SUBMISSIONS_PER_USER,
min_interval_seconds=RATE_LIMIT_MIN_INT_SEC,
)
limiter = RateLimiter(config)
df_ground_truth = load_ground_truth()
question_index = build_question_index_from_json(df_ground_truth.to_dict(orient="records")) # TODO: this should be precomputed once and reused
app.launch()