Spaces:
Running
Running
Update leaderboard
Browse files- Makefile +0 -13
- app.py +158 -315
- data/.gitattributes +59 -0
- data/README.md +75 -0
- generate_initial_leaderboard.py +0 -334
- genned.json +281 -1
- m_data/.gitattributes +4 -0
- pyproject.toml +42 -46
- requirements.txt +0 -23
- src/display/about.py +0 -1
- src/display/utils.py +4 -4
- src/gen/config/api_config.yaml +0 -203
- src/gen/config/judge_config-ru.yaml +0 -35
- src/gen/config/judge_config.yaml +0 -40
- src/gen/gen_answer.py +0 -202
- src/gen/gen_judgment.py +0 -221
- src/gen/show_result.py +0 -279
- src/gen/utils.py +0 -375
- src/leaderboard/build_leaderboard.py +70 -148
- src/leaderboard/filter_models.py +0 -173
- src/leaderboard/read_evals.py +0 -261
- src/populate.py +0 -52
- src/radial/radial.py +79 -72
- src/scripts/create_request_file.py +0 -92
- src/scripts/update_all_request_files.py +0 -96
- src/submission/check_validity.py +0 -178
- src/submission/submit.py +0 -171
- src/tools/collections.py +0 -76
- src/tools/model_backlinks.py +0 -1309
- src/tools/plots.py +0 -158
- style.css +0 -28
- uv.lock +0 -0
Makefile
DELETED
|
@@ -1,13 +0,0 @@
|
|
| 1 |
-
.PHONY: style format
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
style:
|
| 5 |
-
python -m black --line-length 119 .
|
| 6 |
-
python -m isort .
|
| 7 |
-
ruff check --fix .
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
quality:
|
| 11 |
-
python -m black --check --line-length 119 .
|
| 12 |
-
python -m isort --check-only .
|
| 13 |
-
ruff check .
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
CHANGED
|
@@ -1,53 +1,134 @@
|
|
|
|
|
|
|
|
| 1 |
import logging
|
| 2 |
import os
|
|
|
|
| 3 |
|
| 4 |
os.makedirs("tmp", exist_ok=True)
|
| 5 |
os.environ["TMP_DIR"] = "tmp"
|
| 6 |
-
|
| 7 |
-
import
|
| 8 |
-
import glob
|
| 9 |
-
import gradio as gr
|
| 10 |
-
import numpy as np
|
| 11 |
-
from src.radial.radial import create_plot
|
| 12 |
-
from apscheduler.schedulers.background import BackgroundScheduler
|
| 13 |
from gradio_leaderboard import Leaderboard, SelectColumns
|
| 14 |
from gradio_space_ci import enable_space_ci
|
| 15 |
-
import json
|
| 16 |
-
from io import BytesIO
|
| 17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
file_path = file.name.split("/")[-1] if "/" in file.name else file.name
|
| 21 |
logging.info("File uploaded: %s", file_path)
|
| 22 |
-
with open(file.name
|
| 23 |
v = json.load(f)
|
| 24 |
return v, file_path
|
| 25 |
|
| 26 |
|
| 27 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
"""
|
| 29 |
-
|
| 30 |
|
| 31 |
Args:
|
| 32 |
-
v:
|
| 33 |
-
file_path:
|
| 34 |
-
mn:
|
| 35 |
-
profile:
|
| 36 |
|
| 37 |
Returns:
|
| 38 |
-
|
| 39 |
"""
|
| 40 |
if profile is None:
|
| 41 |
return "Hub Login Required: Войдите в HuggingFace, чтобы загрузить результаты"
|
| 42 |
|
| 43 |
try:
|
| 44 |
-
#
|
| 45 |
required_fields = ["score", "math_score", "physics_score"]
|
| 46 |
for field in required_fields:
|
| 47 |
if field not in v:
|
| 48 |
return f"Error: Отсутствует обязательное поле '{field}' в JSON файле"
|
| 49 |
|
| 50 |
-
#
|
| 51 |
new_file = {
|
| 52 |
"model_name": profile.username + "/" + mn,
|
| 53 |
"score": float(v["score"]),
|
|
@@ -60,20 +141,20 @@ def submit_file(v, file_path, mn, profile: gr.OAuthProfile | None):
|
|
| 60 |
),
|
| 61 |
}
|
| 62 |
|
| 63 |
-
#
|
| 64 |
if not (
|
| 65 |
0 <= new_file["score"] <= 1 and 0 <= new_file["math_score"] <= 1 and 0 <= new_file["physics_score"] <= 1
|
| 66 |
):
|
| 67 |
return "Error: Все значения оценок должны быть в диапазоне от 0 до 1"
|
| 68 |
|
| 69 |
-
#
|
| 70 |
safe_filename = profile.username + "_" + mn.replace("/", "_").replace(" ", "_") + ".json"
|
| 71 |
|
| 72 |
-
#
|
| 73 |
buf = BytesIO()
|
| 74 |
buf.write(json.dumps(new_file, ensure_ascii=False).encode("utf-8"))
|
| 75 |
|
| 76 |
-
#
|
| 77 |
API.upload_file(
|
| 78 |
path_or_fileobj=buf,
|
| 79 |
path_in_repo="model_data/external/" + safe_filename,
|
|
@@ -81,12 +162,11 @@ def submit_file(v, file_path, mn, profile: gr.OAuthProfile | None):
|
|
| 81 |
repo_type="dataset",
|
| 82 |
)
|
| 83 |
|
| 84 |
-
# Устанавливаем флаг для обновления лидерборда
|
| 85 |
-
os.environ[RESET_JUDGEMENT_ENV] = "1"
|
| 86 |
-
|
| 87 |
-
# Логируем успешную загрузку
|
| 88 |
logging.info(f"Successfully uploaded model results: {new_file['model_name']}")
|
| 89 |
|
|
|
|
|
|
|
|
|
|
| 90 |
return f"Success! Результаты модели '{mn}' успешно отправлены в лидерборд DeathMath."
|
| 91 |
|
| 92 |
except Exception as e:
|
|
@@ -94,45 +174,23 @@ def submit_file(v, file_path, mn, profile: gr.OAuthProfile | None):
|
|
| 94 |
return f"Error: Произошла ошибка при отправке файла: {str(e)}"
|
| 95 |
|
| 96 |
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
AutoEvalColumn,
|
| 101 |
-
fields,
|
| 102 |
-
)
|
| 103 |
-
from src.envs import API, H4_TOKEN, HF_HOME, REPO_ID, RESET_JUDGEMENT_ENV
|
| 104 |
-
from src.leaderboard.build_leaderboard import build_leadearboard_df, download_openbench, download_dataset
|
| 105 |
-
import huggingface_hub
|
| 106 |
-
# huggingface_hub.login(token=H4_TOKEN)
|
| 107 |
-
|
| 108 |
-
os.environ["GRADIO_ANALYTICS_ENABLED"] = "false"
|
| 109 |
-
|
| 110 |
-
# Configure logging
|
| 111 |
-
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 112 |
-
|
| 113 |
-
# Start ephemeral Spaces on PRs (see config in README.md)
|
| 114 |
-
enable_space_ci()
|
| 115 |
-
|
| 116 |
-
# download_openbench()
|
| 117 |
|
| 118 |
|
| 119 |
-
def restart_space():
|
|
|
|
| 120 |
API.restart_space(repo_id=REPO_ID)
|
| 121 |
download_openbench()
|
| 122 |
|
| 123 |
|
| 124 |
-
def
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
def build_demo():
|
| 129 |
-
"""
|
| 130 |
-
Строит интерфейс лидерборда DeathMath
|
| 131 |
-
"""
|
| 132 |
-
# Загружаем данные для лидерборда
|
| 133 |
download_openbench()
|
| 134 |
|
| 135 |
-
#
|
| 136 |
demo = gr.Blocks(
|
| 137 |
title="DeathMath Leaderboard",
|
| 138 |
css=custom_css,
|
|
@@ -142,22 +200,22 @@ def build_demo():
|
|
| 142 |
),
|
| 143 |
)
|
| 144 |
|
| 145 |
-
#
|
| 146 |
-
leaderboard_df =
|
| 147 |
|
| 148 |
-
#
|
| 149 |
with demo:
|
| 150 |
-
#
|
| 151 |
gr.HTML(TITLE)
|
| 152 |
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
|
| 153 |
|
| 154 |
-
#
|
| 155 |
with gr.Tabs(elem_classes="tab-buttons"):
|
| 156 |
-
#
|
| 157 |
with gr.TabItem("🏅 Лидерборд", elem_id="llm-benchmark-tab-table", id=0):
|
| 158 |
gr.Markdown("### Таблица результатов моделей DeathMath")
|
| 159 |
|
| 160 |
-
#
|
| 161 |
Leaderboard(
|
| 162 |
value=leaderboard_df,
|
| 163 |
datatype=[c.type for c in fields(AutoEvalColumn)],
|
|
@@ -171,7 +229,7 @@ def build_demo():
|
|
| 171 |
],
|
| 172 |
)
|
| 173 |
|
| 174 |
-
#
|
| 175 |
with gr.TabItem("🚀 Отправить результаты", elem_id="submit-tab", id=1):
|
| 176 |
with gr.Row():
|
| 177 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
|
@@ -186,269 +244,54 @@ def build_demo():
|
|
| 186 |
with gr.Column():
|
| 187 |
out = gr.Textbox("Здесь будет показан статус отправки")
|
| 188 |
with gr.Column():
|
| 189 |
-
|
| 190 |
|
| 191 |
submit_button = gr.Button("Отправить результаты", elem_id="submit_button", variant="primary")
|
| 192 |
|
| 193 |
-
#
|
| 194 |
file_output.upload(handle_file_upload, file_output, [uploaded_file, file_path])
|
| 195 |
-
|
| 196 |
submit_button.click(submit_file, [uploaded_file, file_path, model_name_textbox], [out])
|
| 197 |
|
| 198 |
-
#
|
| 199 |
-
with gr.TabItem("📊 Аналитика", elem_id="analytics-tab", id=2):
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
info="Выберите модели для сравнения",
|
| 218 |
-
)
|
| 219 |
-
|
| 220 |
-
# Вкладки для разных типов визуализации
|
| 221 |
-
with gr.Tabs():
|
| 222 |
-
with gr.TabItem("Столбчатая диаграмма"):
|
| 223 |
-
bar_plot = gr.Plot(create_plot(model_dropdown.value))
|
| 224 |
-
|
| 225 |
-
with gr.TabItem("Радарная диаграмма"):
|
| 226 |
-
from src.radial.radial import create_radar_plot
|
| 227 |
-
|
| 228 |
-
radar_plot = gr.Plot(create_radar_plot(model_dropdown.value))
|
| 229 |
-
|
| 230 |
-
# Обновление графиков при изменении выбранных моделей
|
| 231 |
-
model_dropdown.change(fn=create_plot, inputs=[model_dropdown], outputs=[bar_plot])
|
| 232 |
-
|
| 233 |
-
model_dropdown.change(fn=create_radar_plot, inputs=[model_dropdown], outputs=[radar_plot])
|
| 234 |
-
|
| 235 |
-
return demo
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
# print(os.system('cd src/gen && ../../.venv/bin/python gen_judgment.py'))
|
| 239 |
-
# print(os.system('cd src/gen/ && python show_result.py --output'))
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
def update_board():
|
| 243 |
-
need_reset = os.environ.get(RESET_JUDGEMENT_ENV)
|
| 244 |
-
logging.info("Updating the leaderboard: %s", need_reset)
|
| 245 |
-
if need_reset != "1":
|
| 246 |
-
return
|
| 247 |
-
|
| 248 |
-
os.environ[RESET_JUDGEMENT_ENV] = "0"
|
| 249 |
-
|
| 250 |
-
try:
|
| 251 |
-
# Загружаем актуальные данные из репозитория
|
| 252 |
-
download_dataset("Vikhrmodels/DeathMath-leaderboard-data", "m_data")
|
| 253 |
-
logging.info("Successfully downloaded model evaluation data")
|
| 254 |
-
|
| 255 |
-
# Собираем данные из всех файлов моделей
|
| 256 |
-
data_list = []
|
| 257 |
-
seen_models = set() # Для отслеживания дубликатов
|
| 258 |
-
|
| 259 |
-
for file in glob.glob("./m_data/model_data/external/*.json"):
|
| 260 |
-
try:
|
| 261 |
-
with open(file) as f:
|
| 262 |
-
data = json.load(f)
|
| 263 |
-
|
| 264 |
-
# Проверяем наличие необходимых полей
|
| 265 |
-
model_name = data.get("model_name", "")
|
| 266 |
-
if not model_name:
|
| 267 |
-
logging.error(f"Failed to parse {file}: 'model_name' not found")
|
| 268 |
-
continue
|
| 269 |
-
|
| 270 |
-
# Предотвращаем дублирование моделей
|
| 271 |
-
model_base_name = model_name.split("/")[-1].split("_v")[0]
|
| 272 |
-
if model_base_name in seen_models:
|
| 273 |
-
logging.info(f"Skipping duplicate model: {model_name}")
|
| 274 |
-
continue
|
| 275 |
-
|
| 276 |
-
seen_models.add(model_base_name)
|
| 277 |
-
|
| 278 |
-
# Добавляем модель в список
|
| 279 |
-
data_list.append(
|
| 280 |
-
{
|
| 281 |
-
"model_name": model_name,
|
| 282 |
-
"score": float(data.get("score", 0.0)),
|
| 283 |
-
"math_score": float(data.get("math_score", 0.0)),
|
| 284 |
-
"physics_score": float(data.get("physics_score", 0.0)),
|
| 285 |
-
"total_tokens": int(data.get("total_tokens", 0)),
|
| 286 |
-
"evaluation_time": float(data.get("evaluation_time", 0.0)),
|
| 287 |
-
"system_prompt": data.get(
|
| 288 |
-
"system_prompt",
|
| 289 |
-
"Вы - полезный помощник по математике и физике. Ответьте на русском языке.",
|
| 290 |
-
),
|
| 291 |
-
}
|
| 292 |
)
|
| 293 |
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
# Если есть данные, сортируем их по общему баллу и сохраняем
|
| 299 |
-
if data_list:
|
| 300 |
-
# Сортируем по общему баллу
|
| 301 |
-
data_list.sort(key=lambda x: x["score"], reverse=True)
|
| 302 |
-
|
| 303 |
-
# Сохраняем обновленный лидерборд
|
| 304 |
-
with open("genned.json", "w", encoding="utf-8") as f:
|
| 305 |
-
json.dump(data_list, f, ensure_ascii=False, indent=2)
|
| 306 |
-
|
| 307 |
-
# Загружаем обновленный лидерборд в репозиторий
|
| 308 |
-
API.upload_file(
|
| 309 |
-
path_or_fileobj="genned.json",
|
| 310 |
-
path_in_repo="leaderboard.json",
|
| 311 |
-
repo_id="Vikhrmodels/DeathMath-leaderboard-metainfo",
|
| 312 |
-
repo_type="dataset",
|
| 313 |
-
)
|
| 314 |
-
logging.info(f"Updated leaderboard with {len(data_list)} models")
|
| 315 |
-
|
| 316 |
-
# Генерируем README с таблицей лидерборда
|
| 317 |
-
update_readme(data_list)
|
| 318 |
-
|
| 319 |
-
except Exception as e:
|
| 320 |
-
logging.error(f"Error updating leaderboard: {e}")
|
| 321 |
-
|
| 322 |
-
|
| 323 |
-
def update_readme(data_list):
|
| 324 |
-
"""
|
| 325 |
-
Генерирует README.md с таблицей лидерборда
|
| 326 |
-
"""
|
| 327 |
-
try:
|
| 328 |
-
import pandas as pd
|
| 329 |
-
from datetime import datetime
|
| 330 |
-
|
| 331 |
-
# Создаем DataFrame для удобного форматирования таблицы
|
| 332 |
-
df = pd.DataFrame(data_list)
|
| 333 |
-
|
| 334 |
-
# Форматируем числовые колонки
|
| 335 |
-
for col in ["score", "math_score", "physics_score"]:
|
| 336 |
-
if col in df.columns:
|
| 337 |
-
df[col] = df[col].apply(lambda x: f"{x:.3f}")
|
| 338 |
-
|
| 339 |
-
if "total_tokens" in df.columns:
|
| 340 |
-
df["total_tokens"] = df["total_tokens"].apply(lambda x: f"{int(x):,}")
|
| 341 |
-
|
| 342 |
-
if "evaluation_time" in df.columns:
|
| 343 |
-
df["evaluation_time"] = df["evaluation_time"].apply(lambda x: f"{x:.1f}s")
|
| 344 |
-
|
| 345 |
-
# Создаем содержимое README
|
| 346 |
-
current_date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 347 |
-
|
| 348 |
-
readme_content = f"""---
|
| 349 |
-
title: DeathMath Leaderboard
|
| 350 |
-
emoji: 🔢
|
| 351 |
-
colorFrom: indigo
|
| 352 |
-
colorTo: purple
|
| 353 |
-
sdk: gradio
|
| 354 |
-
sdk_version: "4.20.0"
|
| 355 |
-
app_file: app.py
|
| 356 |
-
pinned: false
|
| 357 |
-
hf_oauth: true
|
| 358 |
-
---
|
| 359 |
-
# DeathMath Leaderboard
|
| 360 |
-
|
| 361 |
-
DeathMath - это бенчмарк для оценки способности моделей решать сложные математические и физические задачи на русском языке.
|
| 362 |
-
|
| 363 |
-
## Текущий лидерборд
|
| 364 |
-
|
| 365 |
-
Последнее обновление: {current_date}
|
| 366 |
-
|
| 367 |
-
| Модель | Общий балл | Математика | Физика | Токены | Время оценки |
|
| 368 |
-
|--------|------------|------------|---------|---------|--------------|
|
| 369 |
-
"""
|
| 370 |
-
|
| 371 |
-
# Добавляем строки таблицы
|
| 372 |
-
for _, row in df.iterrows():
|
| 373 |
-
model_name = row.get("model_name", "")
|
| 374 |
-
readme_content += f"| {model_name} | {row.get('score', 'N/A')} | {row.get('math_score', 'N/A')} | {row.get('physics_score', 'N/A')} | {row.get('total_tokens', 'N/A')} | {row.get('evaluation_time', 'N/A')} |\n"
|
| 375 |
-
|
| 376 |
-
readme_content += """
|
| 377 |
-
## Как принять участие в бенчмарке
|
| 378 |
-
|
| 379 |
-
Для участия в бенчмарке DeathMath:
|
| 380 |
-
|
| 381 |
-
1. Клонируйте репозиторий и запустите тесты вашей модели
|
| 382 |
-
2. Загрузите результаты через [HuggingFace Space](https://huggingface.co/spaces/Vikhrmodels/DeathMath-leaderboard)
|
| 383 |
-
3. Дождитесь проверки и добавления результатов в лидерборд
|
| 384 |
-
|
| 385 |
-
## Формат результатов
|
| 386 |
-
|
| 387 |
-
Результаты должны быть в формате JSON со следующей структурой:
|
| 388 |
-
```json
|
| 389 |
-
{
|
| 390 |
-
"score": 0.586,
|
| 391 |
-
"math_score": 0.8,
|
| 392 |
-
"physics_score": 0.373,
|
| 393 |
-
"total_tokens": 1394299,
|
| 394 |
-
"evaluation_time": 4533.2,
|
| 395 |
-
"system_prompt": "Вы - полезный помощник по математике и физике. Ответьте на русском языке."
|
| 396 |
-
}
|
| 397 |
-
```
|
| 398 |
-
|
| 399 |
-
## Лицензия
|
| 400 |
-
|
| 401 |
-
Бенчмарк распространяется под лицензией Apache 2.0
|
| 402 |
-
"""
|
| 403 |
-
|
| 404 |
-
# Сохраняем README
|
| 405 |
-
with open("README.md", "w", encoding="utf-8") as f:
|
| 406 |
-
f.write(readme_content)
|
| 407 |
-
|
| 408 |
-
# Загружаем README в репозиторий
|
| 409 |
-
API.upload_file(
|
| 410 |
-
path_or_fileobj="README.md",
|
| 411 |
-
path_in_repo="README.md",
|
| 412 |
-
repo_id="Vikhrmodels/DeathMath-leaderboard-metainfo",
|
| 413 |
-
repo_type="dataset",
|
| 414 |
-
)
|
| 415 |
-
|
| 416 |
-
logging.info("Updated README with leaderboard table")
|
| 417 |
|
| 418 |
-
|
| 419 |
-
|
| 420 |
|
|
|
|
|
|
|
|
|
|
| 421 |
|
| 422 |
-
|
| 423 |
-
"""
|
| 424 |
-
Инициализирующая версия функции обновления лидерборда.
|
| 425 |
-
Удаляет все существующие данные перед первой загрузкой
|
| 426 |
-
"""
|
| 427 |
-
try:
|
| 428 |
-
# Очищаем каталоги перед загрузкой данных
|
| 429 |
-
if os.path.exists("m_data"):
|
| 430 |
-
# Удаляем все JSON-файлы моделей из старой версии Small Shlepa
|
| 431 |
-
for old_file in glob.glob("./m_data/model_data/external/*.json"):
|
| 432 |
-
try:
|
| 433 |
-
os.remove(old_file)
|
| 434 |
-
logging.info(f"Removed old file: {old_file}")
|
| 435 |
-
except Exception as e:
|
| 436 |
-
logging.error(f"Error removing old file {old_file}: {e}")
|
| 437 |
-
|
| 438 |
-
# Вызываем основную функцию обновления
|
| 439 |
-
update_board()
|
| 440 |
-
|
| 441 |
-
except Exception as e:
|
| 442 |
-
logging.error(f"Error in update_board_(): {e}")
|
| 443 |
|
| 444 |
|
| 445 |
if __name__ == "__main__":
|
|
|
|
| 446 |
os.environ[RESET_JUDGEMENT_ENV] = "1"
|
| 447 |
-
|
| 448 |
-
scheduler = BackgroundScheduler()
|
| 449 |
-
update_board_()
|
| 450 |
-
scheduler.add_job(update_board, "interval", minutes=10)
|
| 451 |
-
scheduler.start()
|
| 452 |
|
| 453 |
demo_app = build_demo()
|
| 454 |
demo_app.launch()
|
|
|
|
| 1 |
+
import glob
|
| 2 |
+
import json
|
| 3 |
import logging
|
| 4 |
import os
|
| 5 |
+
from io import BytesIO
|
| 6 |
|
| 7 |
os.makedirs("tmp", exist_ok=True)
|
| 8 |
os.environ["TMP_DIR"] = "tmp"
|
| 9 |
+
|
| 10 |
+
import gradio as gr # noqa: E402
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
from gradio_leaderboard import Leaderboard, SelectColumns
|
| 12 |
from gradio_space_ci import enable_space_ci
|
|
|
|
|
|
|
| 13 |
|
| 14 |
+
from src.display.about import INTRODUCTION_TEXT, LLM_BENCHMARKS_TEXT, TITLE
|
| 15 |
+
from src.display.css_html_js import custom_css
|
| 16 |
+
from src.display.utils import AutoEvalColumn, fields
|
| 17 |
+
from src.envs import API, REPO_ID, RESET_JUDGEMENT_ENV
|
| 18 |
+
from src.leaderboard.build_leaderboard import build_leaderboard_df, download_dataset, download_openbench
|
| 19 |
+
from src.radial.radial import create_plot, create_radar_plot
|
| 20 |
|
| 21 |
+
os.environ["GRADIO_ANALYTICS_ENABLED"] = "false"
|
| 22 |
+
|
| 23 |
+
# Configure logging
|
| 24 |
+
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 25 |
+
|
| 26 |
+
# Start ephemeral Spaces on PRs (see config in README.md)
|
| 27 |
+
enable_space_ci()
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def handle_file_upload(file) -> tuple[dict, str]:
|
| 31 |
+
"""Handle JSON file upload and parse contents."""
|
| 32 |
file_path = file.name.split("/")[-1] if "/" in file.name else file.name
|
| 33 |
logging.info("File uploaded: %s", file_path)
|
| 34 |
+
with open(file.name) as f:
|
| 35 |
v = json.load(f)
|
| 36 |
return v, file_path
|
| 37 |
|
| 38 |
|
| 39 |
+
def rebuild_leaderboard() -> None:
|
| 40 |
+
"""Rebuild and upload leaderboard data to HuggingFace."""
|
| 41 |
+
try:
|
| 42 |
+
# Download latest data from repository
|
| 43 |
+
download_dataset("Vikhrmodels/DeathMath-leaderboard-data", "m_data")
|
| 44 |
+
logging.info("Successfully downloaded model evaluation data")
|
| 45 |
+
|
| 46 |
+
# Collect data from all model files
|
| 47 |
+
data_list = []
|
| 48 |
+
best_model_results = {}
|
| 49 |
+
|
| 50 |
+
for file in glob.glob("./m_data/model_data/external/*.json"):
|
| 51 |
+
try:
|
| 52 |
+
with open(file, encoding="utf-8") as f:
|
| 53 |
+
data = json.load(f)
|
| 54 |
+
|
| 55 |
+
model_name = data.get("model_name", "")
|
| 56 |
+
if not model_name:
|
| 57 |
+
logging.error(f"Failed to parse {file}: 'model_name' not found")
|
| 58 |
+
continue
|
| 59 |
+
|
| 60 |
+
model_data = {
|
| 61 |
+
"model_name": model_name,
|
| 62 |
+
"score": float(data.get("score", 0.0)),
|
| 63 |
+
"math_score": float(data.get("math_score", 0.0)),
|
| 64 |
+
"physics_score": float(data.get("physics_score", 0.0)),
|
| 65 |
+
"total_tokens": int(data.get("total_tokens", 0)),
|
| 66 |
+
"evaluation_time": float(data.get("evaluation_time", 0.0)),
|
| 67 |
+
"system_prompt": data.get(
|
| 68 |
+
"system_prompt", "Вы - полезный помощник по математике и физике. Ответьте на русском языке."
|
| 69 |
+
),
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
# Keep only best result for each model
|
| 73 |
+
model_base_name = model_name.split("/")[-1].split("_v")[0]
|
| 74 |
+
if model_base_name in best_model_results:
|
| 75 |
+
if model_data["score"] > best_model_results[model_base_name]["score"]:
|
| 76 |
+
best_model_results[model_base_name] = model_data
|
| 77 |
+
else:
|
| 78 |
+
best_model_results[model_base_name] = model_data
|
| 79 |
+
|
| 80 |
+
except Exception as e:
|
| 81 |
+
logging.error(f"Failed to process file {file}: {e}")
|
| 82 |
+
continue
|
| 83 |
+
|
| 84 |
+
# Convert to sorted list
|
| 85 |
+
data_list = list(best_model_results.values())
|
| 86 |
+
data_list.sort(key=lambda x: x["score"], reverse=True)
|
| 87 |
+
|
| 88 |
+
if data_list:
|
| 89 |
+
# Save updated leaderboard
|
| 90 |
+
with open("genned.json", "w", encoding="utf-8") as f:
|
| 91 |
+
json.dump(data_list, f, ensure_ascii=False, indent=2)
|
| 92 |
+
|
| 93 |
+
# Upload updated leaderboard to repository
|
| 94 |
+
API.upload_file(
|
| 95 |
+
path_or_fileobj="genned.json",
|
| 96 |
+
path_in_repo="leaderboard.json",
|
| 97 |
+
repo_id="Vikhrmodels/DeathMath-leaderboard-metainfo",
|
| 98 |
+
repo_type="dataset",
|
| 99 |
+
)
|
| 100 |
+
logging.info(f"Updated leaderboard with {len(data_list)} models")
|
| 101 |
+
else:
|
| 102 |
+
logging.warning("No model data found to update leaderboard")
|
| 103 |
+
|
| 104 |
+
except Exception as e:
|
| 105 |
+
logging.error(f"Error updating leaderboard: {e}")
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def submit_file(v: dict, _file_path: str, mn: str, profile: gr.OAuthProfile | None) -> str:
|
| 109 |
"""
|
| 110 |
+
Handle user model evaluation results submission.
|
| 111 |
|
| 112 |
Args:
|
| 113 |
+
v: Uploaded evaluation results from JSON
|
| 114 |
+
file_path: Path to uploaded file
|
| 115 |
+
mn: Model name specified by user
|
| 116 |
+
profile: HuggingFace user profile
|
| 117 |
|
| 118 |
Returns:
|
| 119 |
+
Success or error message
|
| 120 |
"""
|
| 121 |
if profile is None:
|
| 122 |
return "Hub Login Required: Войдите в HuggingFace, чтобы загрузить результаты"
|
| 123 |
|
| 124 |
try:
|
| 125 |
+
# Check required fields
|
| 126 |
required_fields = ["score", "math_score", "physics_score"]
|
| 127 |
for field in required_fields:
|
| 128 |
if field not in v:
|
| 129 |
return f"Error: Отсутствует обязательное поле '{field}' в JSON файле"
|
| 130 |
|
| 131 |
+
# Create new object for database
|
| 132 |
new_file = {
|
| 133 |
"model_name": profile.username + "/" + mn,
|
| 134 |
"score": float(v["score"]),
|
|
|
|
| 141 |
),
|
| 142 |
}
|
| 143 |
|
| 144 |
+
# Validate score ranges
|
| 145 |
if not (
|
| 146 |
0 <= new_file["score"] <= 1 and 0 <= new_file["math_score"] <= 1 and 0 <= new_file["physics_score"] <= 1
|
| 147 |
):
|
| 148 |
return "Error: Все значения оценок должны быть в диапазоне от 0 до 1"
|
| 149 |
|
| 150 |
+
# Create unique filename based on username and model name
|
| 151 |
safe_filename = profile.username + "_" + mn.replace("/", "_").replace(" ", "_") + ".json"
|
| 152 |
|
| 153 |
+
# Create JSON in memory and upload to repository
|
| 154 |
buf = BytesIO()
|
| 155 |
buf.write(json.dumps(new_file, ensure_ascii=False).encode("utf-8"))
|
| 156 |
|
| 157 |
+
# Upload file to repository
|
| 158 |
API.upload_file(
|
| 159 |
path_or_fileobj=buf,
|
| 160 |
path_in_repo="model_data/external/" + safe_filename,
|
|
|
|
| 162 |
repo_type="dataset",
|
| 163 |
)
|
| 164 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 165 |
logging.info(f"Successfully uploaded model results: {new_file['model_name']}")
|
| 166 |
|
| 167 |
+
# Immediately rebuild leaderboard
|
| 168 |
+
rebuild_leaderboard()
|
| 169 |
+
|
| 170 |
return f"Success! Результаты модели '{mn}' успешно отправлены в лидерборд DeathMath."
|
| 171 |
|
| 172 |
except Exception as e:
|
|
|
|
| 174 |
return f"Error: Произошла ошибка при отправке файла: {str(e)}"
|
| 175 |
|
| 176 |
|
| 177 |
+
def update_plot(selected_models: list[str]):
|
| 178 |
+
"""Update bar chart with selected models."""
|
| 179 |
+
return create_plot(selected_models)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 180 |
|
| 181 |
|
| 182 |
+
def restart_space() -> None:
|
| 183 |
+
"""Restart HuggingFace Space and reload data."""
|
| 184 |
API.restart_space(repo_id=REPO_ID)
|
| 185 |
download_openbench()
|
| 186 |
|
| 187 |
|
| 188 |
+
def build_demo() -> gr.Blocks:
|
| 189 |
+
"""Build DeathMath leaderboard interface."""
|
| 190 |
+
# Load data for leaderboard
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 191 |
download_openbench()
|
| 192 |
|
| 193 |
+
# Create interface with theme settings
|
| 194 |
demo = gr.Blocks(
|
| 195 |
title="DeathMath Leaderboard",
|
| 196 |
css=custom_css,
|
|
|
|
| 200 |
),
|
| 201 |
)
|
| 202 |
|
| 203 |
+
# Get leaderboard data
|
| 204 |
+
leaderboard_df = build_leaderboard_df()
|
| 205 |
|
| 206 |
+
# Build interface
|
| 207 |
with demo:
|
| 208 |
+
# Header and introduction
|
| 209 |
gr.HTML(TITLE)
|
| 210 |
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
|
| 211 |
|
| 212 |
+
# Main tabs
|
| 213 |
with gr.Tabs(elem_classes="tab-buttons"):
|
| 214 |
+
# Leaderboard tab
|
| 215 |
with gr.TabItem("🏅 Лидерборд", elem_id="llm-benchmark-tab-table", id=0):
|
| 216 |
gr.Markdown("### Таблица результатов моделей DeathMath")
|
| 217 |
|
| 218 |
+
# Results table
|
| 219 |
Leaderboard(
|
| 220 |
value=leaderboard_df,
|
| 221 |
datatype=[c.type for c in fields(AutoEvalColumn)],
|
|
|
|
| 229 |
],
|
| 230 |
)
|
| 231 |
|
| 232 |
+
# Submit results tab
|
| 233 |
with gr.TabItem("🚀 Отправить результаты", elem_id="submit-tab", id=1):
|
| 234 |
with gr.Row():
|
| 235 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
|
|
|
| 244 |
with gr.Column():
|
| 245 |
out = gr.Textbox("Здесь будет показан статус отправки")
|
| 246 |
with gr.Column():
|
| 247 |
+
_login_button = gr.LoginButton(elem_id="oauth-button")
|
| 248 |
|
| 249 |
submit_button = gr.Button("Отправить результаты", elem_id="submit_button", variant="primary")
|
| 250 |
|
| 251 |
+
# Event handlers
|
| 252 |
file_output.upload(handle_file_upload, file_output, [uploaded_file, file_path])
|
|
|
|
| 253 |
submit_button.click(submit_file, [uploaded_file, file_path, model_name_textbox], [out])
|
| 254 |
|
| 255 |
+
# Analytics tab
|
| 256 |
+
with gr.TabItem("📊 Аналитика", elem_id="analytics-tab", id=2), gr.Column():
|
| 257 |
+
if len(leaderboard_df) > 0:
|
| 258 |
+
model_dropdown = gr.Dropdown(
|
| 259 |
+
choices=leaderboard_df["model"].tolist(),
|
| 260 |
+
label="Модели",
|
| 261 |
+
value=leaderboard_df["model"].tolist()[:5]
|
| 262 |
+
if len(leaderboard_df) >= 5
|
| 263 |
+
else leaderboard_df["model"].tolist(),
|
| 264 |
+
multiselect=True,
|
| 265 |
+
info="Выберите модели для сравнения",
|
| 266 |
+
)
|
| 267 |
+
else:
|
| 268 |
+
model_dropdown = gr.Dropdown(
|
| 269 |
+
choices=["example/model-1", "example/model-2"],
|
| 270 |
+
label="Модели",
|
| 271 |
+
value=["example/model-1", "example/model-2"],
|
| 272 |
+
multiselect=True,
|
| 273 |
+
info="Выберите модели для сравнения",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 274 |
)
|
| 275 |
|
| 276 |
+
# Tabs for different visualization types
|
| 277 |
+
with gr.Tabs():
|
| 278 |
+
with gr.TabItem("Столбчатая диаграмма"):
|
| 279 |
+
bar_plot = gr.Plot(create_plot(model_dropdown.value))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 280 |
|
| 281 |
+
with gr.TabItem("Радарная диаграмма"):
|
| 282 |
+
radar_plot = gr.Plot(create_radar_plot(model_dropdown.value))
|
| 283 |
|
| 284 |
+
# Update plots when selected models change
|
| 285 |
+
model_dropdown.change(fn=create_plot, inputs=[model_dropdown], outputs=[bar_plot])
|
| 286 |
+
model_dropdown.change(fn=create_radar_plot, inputs=[model_dropdown], outputs=[radar_plot])
|
| 287 |
|
| 288 |
+
return demo
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 289 |
|
| 290 |
|
| 291 |
if __name__ == "__main__":
|
| 292 |
+
# Initialize leaderboard on startup
|
| 293 |
os.environ[RESET_JUDGEMENT_ENV] = "1"
|
| 294 |
+
rebuild_leaderboard()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 295 |
|
| 296 |
demo_app = build_demo()
|
| 297 |
demo_app.launch()
|
data/.gitattributes
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.mds filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
# Audio files - uncompressed
|
| 39 |
+
*.pcm filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
*.sam filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
*.raw filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
# Audio files - compressed
|
| 43 |
+
*.aac filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
*.flac filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
*.ogg filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
*.wav filter=lfs diff=lfs merge=lfs -text
|
| 48 |
+
# Image files - uncompressed
|
| 49 |
+
*.bmp filter=lfs diff=lfs merge=lfs -text
|
| 50 |
+
*.gif filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
| 52 |
+
*.tiff filter=lfs diff=lfs merge=lfs -text
|
| 53 |
+
# Image files - compressed
|
| 54 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 55 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 56 |
+
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 57 |
+
# Video files - compressed
|
| 58 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
+
*.webm filter=lfs diff=lfs merge=lfs -text
|
data/README.md
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: DeathMath Leaderboard
|
| 3 |
+
emoji: 🔢
|
| 4 |
+
colorFrom: yellow
|
| 5 |
+
colorTo: amber
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: "4.20.0"
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
hf_oauth: true
|
| 11 |
+
---
|
| 12 |
+
# DeathMath Leaderboard
|
| 13 |
+
|
| 14 |
+
Бенчмарк для оценки способности моделей решать сложные математические и физические задачи на русском языке.
|
| 15 |
+
|
| 16 |
+
## Текущий лидерборд
|
| 17 |
+
|
| 18 |
+
Последнее обновление: 2025-11-19 17:58:31
|
| 19 |
+
|
| 20 |
+
| Модель | Общий балл | Математика | Физика | Токены | Время |
|
| 21 |
+
|--------|------------|------------|---------|---------|--------|
|
| 22 |
+
| Anonumous/Gemini 3 Pro | 0.776 | 0.940 | 0.612 | 2,441,816 | 6400.9s |
|
| 23 |
+
| AlexWortega/Gemeni 2.5 Pro | 0.728 | 0.874 | 0.582 | 2,227,721 | 4937.3s |
|
| 24 |
+
| Anonumous/GPT-5 | 0.705 | 0.910 | 0.500 | 1,374,085 | 4908.4s |
|
| 25 |
+
| o3-mini-high | 0.692 | 0.884 | 0.500 | 2,186,756 | 5107.5s |
|
| 26 |
+
| Anonumous/GPT-OSS-120B | 0.675 | 0.849 | 0.500 | 671,703 | 939.1s |
|
| 27 |
+
| o3 | 0.669 | 0.868 | 0.469 | 1,164,000 | 5025.8s |
|
| 28 |
+
| o4-mini-high | 0.664 | 0.868 | 0.459 | 1,997,548 | 5811.0s |
|
| 29 |
+
| Anonumous/GPT-5 Nano | 0.649 | 0.839 | 0.459 | 2,218,450 | 5109.7s |
|
| 30 |
+
| Anonumous/GPT-5 Mini | 0.634 | 0.849 | 0.418 | 993,326 | 3368.5s |
|
| 31 |
+
| Anonumous/Claude Sonnet 4.5 | 0.610 | 0.688 | 0.531 | 461,074 | 1216.3s |
|
| 32 |
+
| Anonumous/Claude Opus 4.1 | 0.607 | 0.704 | 0.510 | 448,628 | 1927.2s |
|
| 33 |
+
| Anonumous/GPT-OSS-20B | 0.583 | 0.789 | 0.378 | 1,034,077 | 4009.3s |
|
| 34 |
+
| AlexWortega/Claude Sonnet 4 | 0.551 | 0.633 | 0.469 | 490,996 | 1294.7s |
|
| 35 |
+
| Qwen QwQ 32B | 0.530 | 0.653 | 0.408 | 2,112,951 | 16974.7s |
|
| 36 |
+
| Gemini 2.0 Flash | 0.514 | 0.558 | 0.469 | 495,313 | 736.6s |
|
| 37 |
+
| Claude 3.7 Sonnet | 0.470 | 0.542 | 0.398 | 405,583 | 1082.0s |
|
| 38 |
+
| gpt-4.1 | 0.466 | 0.584 | 0.347 | 549,983 | 2434.6s |
|
| 39 |
+
| LakoMoor/QVikhr-3-8B-Instruction | 0.445 | 0.563 | 0.327 | 1,486,327 | 11874.2s |
|
| 40 |
+
| LakoMoor/Qwen3-8B | 0.417 | 0.538 | 0.296 | 1,576,445 | 12744.3s |
|
| 41 |
+
| Gemma 3 27B | 0.400 | 0.474 | 0.327 | 384,164 | 3024.3s |
|
| 42 |
+
| Claude 3.5 Sonnet | 0.376 | 0.416 | 0.337 | 252,843 | 702.0s |
|
| 43 |
+
| DeepSeek R1 Distill Qwen 14B | 0.346 | 0.447 | 0.245 | 806,258 | 7904.1s |
|
| 44 |
+
| DeepSeek V3 0324 | 0.343 | 0.432 | 0.255 | 339,857 | 2901.8s |
|
| 45 |
+
| gpt-4o | 0.338 | 0.432 | 0.245 | 399,483 | 1145.0s |
|
| 46 |
+
| GigaChat-2-Max | 0.314 | 0.363 | 0.265 | 185,204 | 965.8s |
|
| 47 |
+
| AvitoTech/A-vibe | 0.280 | 0.367 | 0.194 | 797,771 | 4097.4s |
|
| 48 |
+
| GigaChat-2-Pro | 0.270 | 0.316 | 0.224 | 215,297 | 1250.3s |
|
| 49 |
+
| Qwen2.5 72B Instruct | 0.189 | 0.379 | 0.000 | 322,441 | 5670.7s |
|
| 50 |
+
| GigaChat-Max | 0.181 | 0.189 | 0.173 | 200,271 | 1056.5s |
|
| 51 |
+
| Gemma 3 4B | 0.180 | 0.258 | 0.102 | 726,285 | 2959.4s |
|
| 52 |
+
| GigaChat-2 | 0.083 | 0.095 | 0.071 | 136,051 | 576.9s |
|
| 53 |
+
|
| 54 |
+
## Как принять участие
|
| 55 |
+
|
| 56 |
+
1. Клонируйте репозиторий бенчмарка
|
| 57 |
+
2. Запустите оценку вашей модели
|
| 58 |
+
3. Загрузите результаты через [HuggingFace Space](https://huggingface.co/spaces/Vikhrmodels/DeathMath-leaderboard)
|
| 59 |
+
|
| 60 |
+
## Формат результатов
|
| 61 |
+
|
| 62 |
+
```json
|
| 63 |
+
{
|
| 64 |
+
"score": 0.586,
|
| 65 |
+
"math_score": 0.8,
|
| 66 |
+
"physics_score": 0.373,
|
| 67 |
+
"total_tokens": 1394299,
|
| 68 |
+
"evaluation_time": 4533.2,
|
| 69 |
+
"system_prompt": "Вы - полезный помощник по математике и физике."
|
| 70 |
+
}
|
| 71 |
+
```
|
| 72 |
+
|
| 73 |
+
## Лицензия
|
| 74 |
+
|
| 75 |
+
Apache 2.0
|
generate_initial_leaderboard.py
DELETED
|
@@ -1,334 +0,0 @@
|
|
| 1 |
-
#!/usr/bin/env python
|
| 2 |
-
# -*- coding: utf-8 -*-
|
| 3 |
-
"""
|
| 4 |
-
Скрипт для генерации первоначального лидерборда DeathMath и загрузки данных в HuggingFace.
|
| 5 |
-
Использует результаты из директории results и загружает их в репозиторий Vikhrmodels/DeathMath-leaderboard-data.
|
| 6 |
-
"""
|
| 7 |
-
|
| 8 |
-
import os
|
| 9 |
-
import json
|
| 10 |
-
import logging
|
| 11 |
-
import pandas as pd
|
| 12 |
-
import argparse
|
| 13 |
-
from pathlib import Path
|
| 14 |
-
from huggingface_hub import HfApi, create_repo
|
| 15 |
-
from datetime import datetime
|
| 16 |
-
|
| 17 |
-
# Настройка логирования
|
| 18 |
-
logging.basicConfig(
|
| 19 |
-
level=logging.INFO,
|
| 20 |
-
format="%(asctime)s - %(levelname)s - %(message)s",
|
| 21 |
-
handlers=[logging.FileHandler("leaderboard_generation.log"), logging.StreamHandler()],
|
| 22 |
-
)
|
| 23 |
-
logger = logging.getLogger(__name__)
|
| 24 |
-
|
| 25 |
-
# Константы
|
| 26 |
-
REPO_ID = "Vikhrmodels/DeathMath-leaderboard-data"
|
| 27 |
-
METAINFO_REPO_ID = "Vikhrmodels/DeathMath-leaderboard-metainfo"
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
def setup_repositories(token):
|
| 31 |
-
"""
|
| 32 |
-
Создает необходимые репозитории на HuggingFace Hub, если они еще не существуют.
|
| 33 |
-
|
| 34 |
-
Args:
|
| 35 |
-
token (str): Токен для доступа к HuggingFace Hub
|
| 36 |
-
"""
|
| 37 |
-
api = HfApi(token=token)
|
| 38 |
-
|
| 39 |
-
try:
|
| 40 |
-
# Проверка и создание репозитория для данных лидерборда
|
| 41 |
-
try:
|
| 42 |
-
api.repo_info(repo_id=REPO_ID, repo_type="dataset")
|
| 43 |
-
logger.info(f"Репозиторий {REPO_ID} уже существует")
|
| 44 |
-
except Exception:
|
| 45 |
-
logger.info(f"Создание репозитория для данных лидерборда: {REPO_ID}")
|
| 46 |
-
create_repo(repo_id=REPO_ID, repo_type="dataset", token=token)
|
| 47 |
-
|
| 48 |
-
# Проверка и создание репозитория для метаданных лидерборда
|
| 49 |
-
try:
|
| 50 |
-
api.repo_info(repo_id=METAINFO_REPO_ID, repo_type="dataset")
|
| 51 |
-
logger.info(f"Репозиторий {METAINFO_REPO_ID} уже существует")
|
| 52 |
-
except Exception:
|
| 53 |
-
logger.info(f"Создание репозитория для метаданных лидерборда: {METAINFO_REPO_ID}")
|
| 54 |
-
create_repo(repo_id=METAINFO_REPO_ID, repo_type="dataset", token=token)
|
| 55 |
-
|
| 56 |
-
return api
|
| 57 |
-
except Exception as e:
|
| 58 |
-
logger.error(f"Ошибка при создании репозиториев: {e}")
|
| 59 |
-
raise
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
def load_results(results_file):
|
| 63 |
-
"""
|
| 64 |
-
Загружает результаты из JSON файла и удаляет дубликаты.
|
| 65 |
-
|
| 66 |
-
Args:
|
| 67 |
-
results_file (str): Путь к файлу с результатами
|
| 68 |
-
|
| 69 |
-
Returns:
|
| 70 |
-
list: Список записей для лидерборда без дубликатов
|
| 71 |
-
"""
|
| 72 |
-
try:
|
| 73 |
-
with open(results_file, "r", encoding="utf-8") as f:
|
| 74 |
-
data = json.load(f)
|
| 75 |
-
|
| 76 |
-
leaderboard_entries = []
|
| 77 |
-
seen_models = set() # Множество для отслеживания уже обработанных моделей
|
| 78 |
-
|
| 79 |
-
for key, value in data.items():
|
| 80 |
-
if "_Combined_" in key: # берем только комбинированные результаты
|
| 81 |
-
model_name = value["model_name"]
|
| 82 |
-
|
| 83 |
-
# Пропускаем модель, если она уже была добавлена
|
| 84 |
-
if model_name in seen_models:
|
| 85 |
-
logger.info(f"Пропускаем дублирующуюся модель: {model_name}")
|
| 86 |
-
continue
|
| 87 |
-
|
| 88 |
-
# Добавляем модель во множество обработанных
|
| 89 |
-
seen_models.add(model_name)
|
| 90 |
-
|
| 91 |
-
leaderboard_entry = {
|
| 92 |
-
"model_name": model_name,
|
| 93 |
-
"score": value["score"],
|
| 94 |
-
"math_score": value["math_score"],
|
| 95 |
-
"physics_score": value["physics_score"],
|
| 96 |
-
"total_tokens": value["total_tokens"],
|
| 97 |
-
"evaluation_time": value["evaluation_time"],
|
| 98 |
-
"system_prompt": value.get(
|
| 99 |
-
"system_prompt", "Вы - полезный помощник по математике и физике. Ответьте на русском языке."
|
| 100 |
-
),
|
| 101 |
-
}
|
| 102 |
-
leaderboard_entries.append(leaderboard_entry)
|
| 103 |
-
|
| 104 |
-
# Сортировка по общему баллу
|
| 105 |
-
leaderboard_entries.sort(key=lambda x: x["score"], reverse=True)
|
| 106 |
-
logger.info(f"Загружено {len(leaderboard_entries)} уникальных моделей после удаления дубликатов")
|
| 107 |
-
return leaderboard_entries
|
| 108 |
-
|
| 109 |
-
except Exception as e:
|
| 110 |
-
logger.error(f"Ошибка при загрузке результатов: {e}")
|
| 111 |
-
raise
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
def prepare_directory_structure():
|
| 115 |
-
"""
|
| 116 |
-
Создает необходимую структ��ру директорий для внешних моделей.
|
| 117 |
-
|
| 118 |
-
Returns:
|
| 119 |
-
str: Путь к временной директории с подготовленной структурой
|
| 120 |
-
"""
|
| 121 |
-
temp_dir = Path("./temp_leaderboard")
|
| 122 |
-
model_data_dir = temp_dir / "model_data" / "external"
|
| 123 |
-
|
| 124 |
-
# Очистка и создание директорий
|
| 125 |
-
if temp_dir.exists():
|
| 126 |
-
import shutil
|
| 127 |
-
|
| 128 |
-
shutil.rmtree(temp_dir)
|
| 129 |
-
|
| 130 |
-
model_data_dir.mkdir(parents=True, exist_ok=True)
|
| 131 |
-
|
| 132 |
-
return str(temp_dir)
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
def upload_model_files(api, leaderboard_entries, temp_dir):
|
| 136 |
-
"""
|
| 137 |
-
Загружает файлы моделей в репозиторий данных лидерборда.
|
| 138 |
-
|
| 139 |
-
Args:
|
| 140 |
-
api (HfApi): Экземпляр API для взаимодействия с HuggingFace
|
| 141 |
-
leaderboard_entries (list): Список записей для лидерборда
|
| 142 |
-
temp_dir (str): Путь к временной директории
|
| 143 |
-
"""
|
| 144 |
-
model_data_dir = os.path.join(temp_dir, "model_data", "external")
|
| 145 |
-
|
| 146 |
-
for entry in leaderboard_entries:
|
| 147 |
-
model_name = entry["model_name"]
|
| 148 |
-
safe_filename = model_name.replace("/", "_").replace(" ", "_")
|
| 149 |
-
file_path = os.path.join(model_data_dir, f"{safe_filename}.json")
|
| 150 |
-
|
| 151 |
-
with open(file_path, "w", encoding="utf-8") as f:
|
| 152 |
-
json.dump(entry, f, ensure_ascii=False, indent=2)
|
| 153 |
-
|
| 154 |
-
# Загрузка файла модели в репозиторий
|
| 155 |
-
api.upload_file(
|
| 156 |
-
path_or_fileobj=file_path,
|
| 157 |
-
path_in_repo=f"model_data/external/{safe_filename}.json",
|
| 158 |
-
repo_id=REPO_ID,
|
| 159 |
-
repo_type="dataset",
|
| 160 |
-
)
|
| 161 |
-
logger.info(f"Загружен файл модели: {safe_filename}.json")
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
def generate_leaderboard_json(leaderboard_entries):
|
| 165 |
-
"""
|
| 166 |
-
Создает JSON файл с данными лидерборда.
|
| 167 |
-
|
| 168 |
-
Args:
|
| 169 |
-
leaderboard_entries (list): Список записей для лидерборда
|
| 170 |
-
|
| 171 |
-
Returns:
|
| 172 |
-
str: Путь к созданному JSON файлу
|
| 173 |
-
"""
|
| 174 |
-
leaderboard_file = "leaderboard.json"
|
| 175 |
-
|
| 176 |
-
with open(leaderboard_file, "w", encoding="utf-8") as f:
|
| 177 |
-
json.dump(leaderboard_entries, f, ensure_ascii=False, indent=2)
|
| 178 |
-
|
| 179 |
-
return leaderboard_file
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
def generate_readme(leaderboard_entries):
|
| 183 |
-
"""
|
| 184 |
-
Генерирует README.md с информацией о лидерборде.
|
| 185 |
-
|
| 186 |
-
Args:
|
| 187 |
-
leaderboard_entries (list): Список записей для лидерборда
|
| 188 |
-
|
| 189 |
-
Returns:
|
| 190 |
-
str: Путь к созданному README файлу
|
| 191 |
-
"""
|
| 192 |
-
readme_file = "README.md"
|
| 193 |
-
|
| 194 |
-
# Создаем DataFrame для удобного форматирования таблицы
|
| 195 |
-
df = pd.DataFrame(leaderboard_entries)
|
| 196 |
-
|
| 197 |
-
# Форматируем числовые колонки
|
| 198 |
-
for col in ["score", "math_score", "physics_score"]:
|
| 199 |
-
if col in df.columns:
|
| 200 |
-
df[col] = df[col].apply(lambda x: f"{x:.3f}")
|
| 201 |
-
|
| 202 |
-
if "total_tokens" in df.columns:
|
| 203 |
-
df["total_tokens"] = df["total_tokens"].apply(lambda x: f"{int(x):,}")
|
| 204 |
-
|
| 205 |
-
if "evaluation_time" in df.columns:
|
| 206 |
-
df["evaluation_time"] = df["evaluation_time"].apply(lambda x: f"{x:.1f}s")
|
| 207 |
-
|
| 208 |
-
# Создаем содержимое README
|
| 209 |
-
current_date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 210 |
-
|
| 211 |
-
readme_content = f"""# DeathMath Leaderboard
|
| 212 |
-
|
| 213 |
-
DeathMath - это бенчмарк для оценки способности моделей решать сложные математические и физические задачи на русском языке.
|
| 214 |
-
|
| 215 |
-
## Текущий лидерборд
|
| 216 |
-
|
| 217 |
-
Последнее обновление: {current_date}
|
| 218 |
-
|
| 219 |
-
| Модель | Общий балл | Математика | Физика | Токены | Время оценки |
|
| 220 |
-
|--------|------------|------------|---------|---------|--------------|
|
| 221 |
-
"""
|
| 222 |
-
|
| 223 |
-
# Добавляем строки таблицы
|
| 224 |
-
for _, row in df.iterrows():
|
| 225 |
-
readme_content += f"| {row['model_name']} | {row['score']} | {row['math_score']} | {row['physics_score']} | {row.get('total_tokens', 'N/A')} | {row.get('evaluation_time', 'N/A')} |\n"
|
| 226 |
-
|
| 227 |
-
readme_content += """
|
| 228 |
-
## Как принять участие в бенчмарке
|
| 229 |
-
|
| 230 |
-
Для участия в бенчмарке DeathMath:
|
| 231 |
-
|
| 232 |
-
1. Клонируйте репозиторий и запустите тесты вашей модели
|
| 233 |
-
2. Загрузите результаты через [HuggingFace Space](https://huggingface.co/spaces/Vikhrmodels/DeathMath-leaderboard)
|
| 234 |
-
3. Дождитесь проверки и добавления результатов в лидерборд
|
| 235 |
-
|
| 236 |
-
## Формат результатов
|
| 237 |
-
|
| 238 |
-
Результаты должны быть в формате JSON со следующей структурой:
|
| 239 |
-
```json
|
| 240 |
-
{
|
| 241 |
-
"score": 0.586,
|
| 242 |
-
"math_score": 0.8,
|
| 243 |
-
"physics_score": 0.373,
|
| 244 |
-
"total_tokens": 1394299,
|
| 245 |
-
"evaluation_time": 4533.2,
|
| 246 |
-
"system_prompt": "Вы - полезный помощник по математике и физике. Ответьте на русском языке."
|
| 247 |
-
}
|
| 248 |
-
```
|
| 249 |
-
|
| 250 |
-
## Лицензия
|
| 251 |
-
|
| 252 |
-
Бенчмарк распространяется под лицензией Apache 2.0
|
| 253 |
-
"""
|
| 254 |
-
|
| 255 |
-
with open(readme_file, "w", encoding="utf-8") as f:
|
| 256 |
-
f.write(readme_content)
|
| 257 |
-
|
| 258 |
-
return readme_file
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
def upload_leaderboard_files(api, leaderboard_file, readme_file):
|
| 262 |
-
"""
|
| 263 |
-
Загружает файлы лидерборда в репозиторий метаданных.
|
| 264 |
-
|
| 265 |
-
Args:
|
| 266 |
-
api (HfApi): Экземпляр API для взаимодействия с HuggingFace
|
| 267 |
-
leaderboard_file (str): Путь к JSON файлу лидерборда
|
| 268 |
-
readme_file (str): Путь к README файлу
|
| 269 |
-
"""
|
| 270 |
-
# Загрузка JSON лидерборда
|
| 271 |
-
api.upload_file(
|
| 272 |
-
path_or_fileobj=leaderboard_file, path_in_repo="leaderboard.json", repo_id=METAINFO_REPO_ID, repo_type="dataset"
|
| 273 |
-
)
|
| 274 |
-
logger.info(f"Загружен файл лидерборда: leaderboard.json в {METAINFO_REPO_ID}")
|
| 275 |
-
|
| 276 |
-
# Загрузка README
|
| 277 |
-
api.upload_file(
|
| 278 |
-
path_or_fileobj=readme_file, path_in_repo="README.md", repo_id=METAINFO_REPO_ID, repo_type="dataset"
|
| 279 |
-
)
|
| 280 |
-
logger.info(f"Загружен README: README.md в {METAINFO_REPO_ID}")
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
def main():
|
| 284 |
-
# Парсинг аргументов командной строки
|
| 285 |
-
parser = argparse.ArgumentParser(description="Генерация первоначального лидерборда DeathMath")
|
| 286 |
-
parser.add_argument(
|
| 287 |
-
"--results",
|
| 288 |
-
default="../results/leaderboard_results.json",
|
| 289 |
-
help="Путь к файлу с результатами (по умолчанию: ../results/leaderboard_results.json)",
|
| 290 |
-
)
|
| 291 |
-
parser.add_argument("--token", required=True, help="Токен для доступа к HuggingFace Hub")
|
| 292 |
-
|
| 293 |
-
args = parser.parse_args()
|
| 294 |
-
|
| 295 |
-
try:
|
| 296 |
-
logger.info("Начинаем генерацию лидерборда DeathMath")
|
| 297 |
-
|
| 298 |
-
# Настраиваем репозитории
|
| 299 |
-
api = setup_repositories(args.token)
|
| 300 |
-
logger.info("Репозитории успешно настроены")
|
| 301 |
-
|
| 302 |
-
# Загружаем результаты
|
| 303 |
-
leaderboard_entries = load_results(args.results)
|
| 304 |
-
logger.info(f"Загружено {len(leaderboard_entries)} записей для лидерборда")
|
| 305 |
-
|
| 306 |
-
# Подготавливаем структуру директорий
|
| 307 |
-
temp_dir = prepare_directory_structure()
|
| 308 |
-
logger.info(f"Создана временная директория: {temp_dir}")
|
| 309 |
-
|
| 310 |
-
# Загружаем файлы моделей
|
| 311 |
-
upload_model_files(api, leaderboard_entries, temp_dir)
|
| 312 |
-
logger.info("Файлы моделей успешно загружены")
|
| 313 |
-
|
| 314 |
-
# Генерируем JSON лидерборда
|
| 315 |
-
leaderboard_file = generate_leaderboard_json(leaderboard_entries)
|
| 316 |
-
logger.info(f"Создан файл лидерборда: {leaderboard_file}")
|
| 317 |
-
|
| 318 |
-
# Генерируем README
|
| 319 |
-
readme_file = generate_readme(leaderboard_entries)
|
| 320 |
-
logger.info(f"Создан README: {readme_file}")
|
| 321 |
-
|
| 322 |
-
# Загружаем файлы лидерборда
|
| 323 |
-
upload_leaderboard_files(api, leaderboard_file, readme_file)
|
| 324 |
-
logger.info("Файлы лидерборда успешно загружены")
|
| 325 |
-
|
| 326 |
-
logger.info("Генерация лидерборда успешно завершена!")
|
| 327 |
-
|
| 328 |
-
except Exception as e:
|
| 329 |
-
logger.error(f"Ошибка при генерации лидерборда: {e}")
|
| 330 |
-
raise
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
if __name__ == "__main__":
|
| 334 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
genned.json
CHANGED
|
@@ -1 +1,281 @@
|
|
| 1 |
-
[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"model_name": "Anonumous/Gemini 3 Pro",
|
| 4 |
+
"score": 0.7759716952107476,
|
| 5 |
+
"math_score": 0.9396984924623115,
|
| 6 |
+
"physics_score": 0.6122448979591837,
|
| 7 |
+
"total_tokens": 2441816,
|
| 8 |
+
"evaluation_time": 6400.850697278976,
|
| 9 |
+
"system_prompt": "Ты - полезный ИИ-ассистент, решающий задачи по математике и физике. Отвечай на русском языке."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"model_name": "AlexWortega/Gemeni 2.5 Pro",
|
| 13 |
+
"score": 0.728,
|
| 14 |
+
"math_score": 0.874,
|
| 15 |
+
"physics_score": 0.582,
|
| 16 |
+
"total_tokens": 2227721,
|
| 17 |
+
"evaluation_time": 4937.3,
|
| 18 |
+
"system_prompt": "Вы - полезный помощник по математике и физике. Ответьте на русском языке."
|
| 19 |
+
},
|
| 20 |
+
{
|
| 21 |
+
"model_name": "Anonumous/GPT-5",
|
| 22 |
+
"score": 0.7047738693467337,
|
| 23 |
+
"math_score": 0.9095477386934674,
|
| 24 |
+
"physics_score": 0.5,
|
| 25 |
+
"total_tokens": 1374085,
|
| 26 |
+
"evaluation_time": 4908.358837604523,
|
| 27 |
+
"system_prompt": "Ты - полезный ИИ-ассистент, решающий задачи по м��тематике и физике. Отвечай на русском языке."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"model_name": "o3-mini-high",
|
| 31 |
+
"score": 0.6921052631578948,
|
| 32 |
+
"math_score": 0.8842105263157894,
|
| 33 |
+
"physics_score": 0.5,
|
| 34 |
+
"total_tokens": 2186756,
|
| 35 |
+
"evaluation_time": 5107.464560031891,
|
| 36 |
+
"system_prompt": "Вы - полезный помощник по математике и физике. Ответьте на русском языке."
|
| 37 |
+
},
|
| 38 |
+
{
|
| 39 |
+
"model_name": "Anonumous/GPT-OSS-120B",
|
| 40 |
+
"score": 0.6746231155778895,
|
| 41 |
+
"math_score": 0.8492462311557789,
|
| 42 |
+
"physics_score": 0.5,
|
| 43 |
+
"total_tokens": 671703,
|
| 44 |
+
"evaluation_time": 939.0848069190979,
|
| 45 |
+
"system_prompt": "Ты - полезный ИИ-ассистент, решающий задачи по математике и физике. Отвечай на русском языке."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"model_name": "o3",
|
| 49 |
+
"score": 0.6689044038668099,
|
| 50 |
+
"math_score": 0.868421052631579,
|
| 51 |
+
"physics_score": 0.46938775510204084,
|
| 52 |
+
"total_tokens": 1164000,
|
| 53 |
+
"evaluation_time": 5025.797904491425,
|
| 54 |
+
"system_prompt": "Вы - полезный помощник по математике и физике. Ответьте на русском языке."
|
| 55 |
+
},
|
| 56 |
+
{
|
| 57 |
+
"model_name": "o4-mini-high",
|
| 58 |
+
"score": 0.6638023630504833,
|
| 59 |
+
"math_score": 0.868421052631579,
|
| 60 |
+
"physics_score": 0.45918367346938777,
|
| 61 |
+
"total_tokens": 1997548,
|
| 62 |
+
"evaluation_time": 5811.026826620102,
|
| 63 |
+
"system_prompt": "Вы - полезный помощник по математике и физике. Ответьте на русском языке."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"model_name": "Anonumous/GPT-5 Nano",
|
| 67 |
+
"score": 0.6491898266844426,
|
| 68 |
+
"math_score": 0.8391959798994975,
|
| 69 |
+
"physics_score": 0.45918367346938777,
|
| 70 |
+
"total_tokens": 2218450,
|
| 71 |
+
"evaluation_time": 5109.693122148514,
|
| 72 |
+
"system_prompt": "Ты - полезный ИИ-ассистент, решающий задачи по математике и физике. Отвечай на русском языке."
|
| 73 |
+
},
|
| 74 |
+
{
|
| 75 |
+
"model_name": "Anonumous/GPT-5 Mini",
|
| 76 |
+
"score": 0.6338067890472772,
|
| 77 |
+
"math_score": 0.8492462311557789,
|
| 78 |
+
"physics_score": 0.41836734693877553,
|
| 79 |
+
"total_tokens": 993326,
|
| 80 |
+
"evaluation_time": 3368.527199983597,
|
| 81 |
+
"system_prompt": "Ты - полезный ИИ-ассистент, решающий задачи по математике и физике. Отвечай на русском языке."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"model_name": "Anonumous/Claude Sonnet 4.5",
|
| 85 |
+
"score": 0.6095272279766177,
|
| 86 |
+
"math_score": 0.6884422110552764,
|
| 87 |
+
"physics_score": 0.5306122448979592,
|
| 88 |
+
"total_tokens": 461074,
|
| 89 |
+
"evaluation_time": 1216.3338594436646,
|
| 90 |
+
"system_prompt": "Ты - полезный ИИ-ассистент, решающий задачи по математике и физике. Отвечай на русском языке."
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"model_name": "Anonumous/Claude Opus 4.1",
|
| 94 |
+
"score": 0.6068608347861758,
|
| 95 |
+
"math_score": 0.7035175879396985,
|
| 96 |
+
"physics_score": 0.5102040816326531,
|
| 97 |
+
"total_tokens": 448628,
|
| 98 |
+
"evaluation_time": 1927.2011346817017,
|
| 99 |
+
"system_prompt": "Ты - полезный ИИ-ассистент, решающий задачи по математике и физике. Отвечай на русском языке."
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"model_name": "Anonumous/GPT-OSS-20B",
|
| 103 |
+
"score": 0.5832478720131269,
|
| 104 |
+
"math_score": 0.7889447236180904,
|
| 105 |
+
"physics_score": 0.37755102040816324,
|
| 106 |
+
"total_tokens": 1034077,
|
| 107 |
+
"evaluation_time": 4009.333387374878,
|
| 108 |
+
"system_prompt": "Ты - полезный ИИ-ассистент, решающий задачи по математике и физике. Отвечай на русском языке."
|
| 109 |
+
},
|
| 110 |
+
{
|
| 111 |
+
"model_name": "AlexWortega/Claude Sonnet 4",
|
| 112 |
+
"score": 0.551,
|
| 113 |
+
"math_score": 0.633,
|
| 114 |
+
"physics_score": 0.469,
|
| 115 |
+
"total_tokens": 490996,
|
| 116 |
+
"evaluation_time": 1294.7,
|
| 117 |
+
"system_prompt": "Вы - полезный помощник по математике и физике. Ответьте на русском языке."
|
| 118 |
+
},
|
| 119 |
+
{
|
| 120 |
+
"model_name": "Qwen QwQ 32B",
|
| 121 |
+
"score": 0.5303974221267455,
|
| 122 |
+
"math_score": 0.6526315789473685,
|
| 123 |
+
"physics_score": 0.40816326530612246,
|
| 124 |
+
"total_tokens": 2112951,
|
| 125 |
+
"evaluation_time": 16974.732090711594,
|
| 126 |
+
"system_prompt": "Вы - полезный помощник по математике и физике. Ответьте на русском языке."
|
| 127 |
+
},
|
| 128 |
+
{
|
| 129 |
+
"model_name": "Gemini 2.0 Flash",
|
| 130 |
+
"score": 0.5136412459720731,
|
| 131 |
+
"math_score": 0.5578947368421052,
|
| 132 |
+
"physics_score": 0.46938775510204084,
|
| 133 |
+
"total_tokens": 495313,
|
| 134 |
+
"evaluation_time": 736.5758397579193,
|
| 135 |
+
"system_prompt": "Вы - полезный помощник по математике и физике. Ответьте на русском языке."
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"model_name": "Claude 3.7 Sonnet",
|
| 139 |
+
"score": 0.47003222341568207,
|
| 140 |
+
"math_score": 0.5421052631578948,
|
| 141 |
+
"physics_score": 0.3979591836734694,
|
| 142 |
+
"total_tokens": 405583,
|
| 143 |
+
"evaluation_time": 1081.969652414322,
|
| 144 |
+
"system_prompt": "Вы - полезный помощник по математике и физике. Ответьте на русском языке."
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"model_name": "gpt-4.1",
|
| 148 |
+
"score": 0.46557465091299677,
|
| 149 |
+
"math_score": 0.5842105263157895,
|
| 150 |
+
"physics_score": 0.3469387755102041,
|
| 151 |
+
"total_tokens": 549983,
|
| 152 |
+
"evaluation_time": 2434.5933046340942,
|
| 153 |
+
"system_prompt": "Вы - полезный помощник по математике и физике. Ответьте на русском языке."
|
| 154 |
+
},
|
| 155 |
+
{
|
| 156 |
+
"model_name": "LakoMoor/QVikhr-3-8B-Instruction",
|
| 157 |
+
"score": 0.445,
|
| 158 |
+
"math_score": 0.563,
|
| 159 |
+
"physics_score": 0.327,
|
| 160 |
+
"total_tokens": 1486327,
|
| 161 |
+
"evaluation_time": 11874.2,
|
| 162 |
+
"system_prompt": "You are a helpful assistant. Answer in Russian."
|
| 163 |
+
},
|
| 164 |
+
{
|
| 165 |
+
"model_name": "LakoMoor/Qwen3-8B",
|
| 166 |
+
"score": 0.417,
|
| 167 |
+
"math_score": 0.538,
|
| 168 |
+
"physics_score": 0.296,
|
| 169 |
+
"total_tokens": 1576445,
|
| 170 |
+
"evaluation_time": 12744.3,
|
| 171 |
+
"system_prompt": "You are a helpful assistant. Answer in Russian."
|
| 172 |
+
},
|
| 173 |
+
{
|
| 174 |
+
"model_name": "Gemma 3 27B",
|
| 175 |
+
"score": 0.40010741138560685,
|
| 176 |
+
"math_score": 0.47368421052631576,
|
| 177 |
+
"physics_score": 0.32653061224489793,
|
| 178 |
+
"total_tokens": 384164,
|
| 179 |
+
"evaluation_time": 3024.3041207790375,
|
| 180 |
+
"system_prompt": "Вы - полезный помощник по математике и физике. Ответьте на русском языке."
|
| 181 |
+
},
|
| 182 |
+
{
|
| 183 |
+
"model_name": "Claude 3.5 Sonnet",
|
| 184 |
+
"score": 0.3762620837808808,
|
| 185 |
+
"math_score": 0.41578947368421054,
|
| 186 |
+
"physics_score": 0.336734693877551,
|
| 187 |
+
"total_tokens": 252843,
|
| 188 |
+
"evaluation_time": 702.003228187561,
|
| 189 |
+
"system_prompt": "Вы - полезный помощник по математике и физике. Ответьте на русском языке."
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
"model_name": "DeepSeek R1 Distill Qwen 14B",
|
| 193 |
+
"score": 0.3461331901181525,
|
| 194 |
+
"math_score": 0.4473684210526316,
|
| 195 |
+
"physics_score": 0.24489795918367346,
|
| 196 |
+
"total_tokens": 806258,
|
| 197 |
+
"evaluation_time": 7904.137916564941,
|
| 198 |
+
"system_prompt": "Вы - полезный помощник по математике и физике. Ответьте на русском языке."
|
| 199 |
+
},
|
| 200 |
+
{
|
| 201 |
+
"model_name": "DeepSeek V3 0324",
|
| 202 |
+
"score": 0.34334049409237377,
|
| 203 |
+
"math_score": 0.43157894736842106,
|
| 204 |
+
"physics_score": 0.25510204081632654,
|
| 205 |
+
"total_tokens": 339857,
|
| 206 |
+
"evaluation_time": 2901.8081181049347,
|
| 207 |
+
"system_prompt": "Вы - полезный помощник по математике и физике. Ответьте на русском языке."
|
| 208 |
+
},
|
| 209 |
+
{
|
| 210 |
+
"model_name": "gpt-4o",
|
| 211 |
+
"score": 0.3382384532760473,
|
| 212 |
+
"math_score": 0.43157894736842106,
|
| 213 |
+
"physics_score": 0.24489795918367346,
|
| 214 |
+
"total_tokens": 399483,
|
| 215 |
+
"evaluation_time": 1145.0411348342896,
|
| 216 |
+
"system_prompt": "Вы - полезный помощник по математике и физике. Ответьте на русском языке."
|
| 217 |
+
},
|
| 218 |
+
{
|
| 219 |
+
"model_name": "GigaChat-2-Max",
|
| 220 |
+
"score": 0.3142320085929109,
|
| 221 |
+
"math_score": 0.3631578947368421,
|
| 222 |
+
"physics_score": 0.2653061224489796,
|
| 223 |
+
"total_tokens": 185204,
|
| 224 |
+
"evaluation_time": 965.8490540981293,
|
| 225 |
+
"system_prompt": "Вы - полезный помощник по математике и физике. Ответьте на русском языке."
|
| 226 |
+
},
|
| 227 |
+
{
|
| 228 |
+
"model_name": "AvitoTech/A-vibe",
|
| 229 |
+
"score": 0.2803558609373398,
|
| 230 |
+
"math_score": 0.36683417085427134,
|
| 231 |
+
"physics_score": 0.19387755102040816,
|
| 232 |
+
"total_tokens": 797771,
|
| 233 |
+
"evaluation_time": 4097.367070198059,
|
| 234 |
+
"system_prompt": null
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"model_name": "GigaChat-2-Pro",
|
| 238 |
+
"score": 0.27013963480128894,
|
| 239 |
+
"math_score": 0.3157894736842105,
|
| 240 |
+
"physics_score": 0.22448979591836735,
|
| 241 |
+
"total_tokens": 215297,
|
| 242 |
+
"evaluation_time": 1250.343486070633,
|
| 243 |
+
"system_prompt": "Вы - полезный помощник по математике и физике. Ответьте на русском языке."
|
| 244 |
+
},
|
| 245 |
+
{
|
| 246 |
+
"model_name": "Qwen2.5 72B Instruct",
|
| 247 |
+
"score": 0.18947368421052632,
|
| 248 |
+
"math_score": 0.37894736842105264,
|
| 249 |
+
"physics_score": 0.0,
|
| 250 |
+
"total_tokens": 322441,
|
| 251 |
+
"evaluation_time": 5670.72598361969,
|
| 252 |
+
"system_prompt": "Вы - полезный помощник по математике и физике. Ответьте на русском языке."
|
| 253 |
+
},
|
| 254 |
+
{
|
| 255 |
+
"model_name": "GigaChat-Max",
|
| 256 |
+
"score": 0.18147153598281418,
|
| 257 |
+
"math_score": 0.18947368421052632,
|
| 258 |
+
"physics_score": 0.17346938775510204,
|
| 259 |
+
"total_tokens": 200271,
|
| 260 |
+
"evaluation_time": 1056.4797942638397,
|
| 261 |
+
"system_prompt": "Вы - полезный помощник по математике и физике. Ответьте на русском языке."
|
| 262 |
+
},
|
| 263 |
+
{
|
| 264 |
+
"model_name": "Gemma 3 4B",
|
| 265 |
+
"score": 0.17996777658431795,
|
| 266 |
+
"math_score": 0.2578947368421053,
|
| 267 |
+
"physics_score": 0.10204081632653061,
|
| 268 |
+
"total_tokens": 726285,
|
| 269 |
+
"evaluation_time": 2959.3581898212433,
|
| 270 |
+
"system_prompt": "Вы - полезный помощник по математике и физике. Ответьте на русском языке."
|
| 271 |
+
},
|
| 272 |
+
{
|
| 273 |
+
"model_name": "GigaChat-2",
|
| 274 |
+
"score": 0.08308270676691729,
|
| 275 |
+
"math_score": 0.09473684210526316,
|
| 276 |
+
"physics_score": 0.07142857142857142,
|
| 277 |
+
"total_tokens": 136051,
|
| 278 |
+
"evaluation_time": 576.884927034378,
|
| 279 |
+
"system_prompt": "Вы - полезный помощник по математике и физике. Ответьте на русском языке."
|
| 280 |
+
}
|
| 281 |
+
]
|
m_data/.gitattributes
CHANGED
|
@@ -9,6 +9,7 @@
|
|
| 9 |
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 12 |
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 13 |
*.model filter=lfs diff=lfs merge=lfs -text
|
| 14 |
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
|
@@ -53,3 +54,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 53 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.mds filter=lfs diff=lfs merge=lfs -text
|
| 13 |
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 14 |
*.model filter=lfs diff=lfs merge=lfs -text
|
| 15 |
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 54 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 55 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 56 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 57 |
+
# Video files - compressed
|
| 58 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
+
*.webm filter=lfs diff=lfs merge=lfs -text
|
pyproject.toml
CHANGED
|
@@ -1,54 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
[tool.ruff]
|
| 2 |
line-length = 120
|
| 3 |
-
target-version = "
|
| 4 |
include = ["*.py", "*.pyi", "**/pyproject.toml", "*.ipynb"]
|
| 5 |
-
ignore=["I","EM","FBT","TRY003","S101","D101","D102","D103","D104","D105","G004","D107","FA102"]
|
| 6 |
-
fixable=["ALL"]
|
| 7 |
-
select=["ALL"]
|
| 8 |
|
| 9 |
[tool.ruff.lint]
|
| 10 |
-
select = ["E", "F"]
|
|
|
|
| 11 |
fixable = ["ALL"]
|
| 12 |
-
ignore = ["E501"] # line too long (black is taking care of this)
|
| 13 |
|
| 14 |
-
[tool.
|
| 15 |
-
|
| 16 |
-
|
|
|
|
| 17 |
|
| 18 |
-
[tool.
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
[tool.poetry]
|
| 22 |
-
package-mode = false
|
| 23 |
-
name = "open-llm-leaderboard"
|
| 24 |
-
version = "0.1.0"
|
| 25 |
-
description = ""
|
| 26 |
-
authors = []
|
| 27 |
-
readme = "README.md"
|
| 28 |
-
|
| 29 |
-
[tool.poetry.dependencies]
|
| 30 |
-
python = "3.13.3"
|
| 31 |
-
apscheduler = "3.10.1"
|
| 32 |
-
black = "23.11.0"
|
| 33 |
-
click = "8.1.3"
|
| 34 |
-
datasets = "2.14.5"
|
| 35 |
-
huggingface-hub = ">=0.18.0"
|
| 36 |
-
matplotlib = "3.8.4"
|
| 37 |
-
numpy = "1.26.0"
|
| 38 |
-
pandas = "2.2.2"
|
| 39 |
-
plotly = "5.14.1"
|
| 40 |
-
python-dateutil = "2.8.2"
|
| 41 |
-
requests = "2.28.2"
|
| 42 |
-
sentencepiece = "^0.2.0"
|
| 43 |
-
tqdm = "4.65.0"
|
| 44 |
-
transformers = "4.40.0"
|
| 45 |
-
tokenizers = ">=0.15.0"
|
| 46 |
-
gradio-space-ci = {git = "https://huggingface.co/spaces/Wauplin/gradio-space-ci", rev = "0.2.3"}
|
| 47 |
-
gradio = " 4.20.0"
|
| 48 |
-
isort = "^5.13.2"
|
| 49 |
-
ruff = "^0.3.5"
|
| 50 |
-
gradio-leaderboard = "0.0.8"
|
| 51 |
-
|
| 52 |
-
[build-system]
|
| 53 |
-
requires = ["poetry-core"]
|
| 54 |
-
build-backend = "poetry.core.masonry.api"
|
|
|
|
| 1 |
+
[project]
|
| 2 |
+
name = "deathmatch-leaderboard"
|
| 3 |
+
version = "0.1.0"
|
| 4 |
+
description = "DeathMath Leaderboard - benchmark platform for evaluating LLM performance on complex Russian-language mathematics and physics problems"
|
| 5 |
+
requires-python = ">=3.10"
|
| 6 |
+
dependencies = [
|
| 7 |
+
"gradio[oauth]>=5.0.0",
|
| 8 |
+
"gradio-leaderboard>=0.0.8",
|
| 9 |
+
"huggingface-hub>=0.27.0",
|
| 10 |
+
"pandas>=2.3.0",
|
| 11 |
+
"numpy>=2.0.0",
|
| 12 |
+
"matplotlib>=3.10.0",
|
| 13 |
+
"plotly>=5.24.0",
|
| 14 |
+
"datasets>=3.2.0",
|
| 15 |
+
"requests>=2.32.0",
|
| 16 |
+
"tqdm>=4.67.0",
|
| 17 |
+
"python-dateutil>=2.9.0",
|
| 18 |
+
"click>=8.1.7",
|
| 19 |
+
"gradio-space-ci",
|
| 20 |
+
]
|
| 21 |
+
|
| 22 |
+
[project.optional-dependencies]
|
| 23 |
+
dev = [
|
| 24 |
+
"ruff>=0.9.0",
|
| 25 |
+
]
|
| 26 |
+
|
| 27 |
+
[build-system]
|
| 28 |
+
requires = ["hatchling"]
|
| 29 |
+
build-backend = "hatchling.build"
|
| 30 |
+
|
| 31 |
+
[tool.hatch.build.targets.wheel]
|
| 32 |
+
packages = ["src"]
|
| 33 |
+
|
| 34 |
[tool.ruff]
|
| 35 |
line-length = 120
|
| 36 |
+
target-version = "py310"
|
| 37 |
include = ["*.py", "*.pyi", "**/pyproject.toml", "*.ipynb"]
|
|
|
|
|
|
|
|
|
|
| 38 |
|
| 39 |
[tool.ruff.lint]
|
| 40 |
+
select = ["E", "F", "I", "N", "W", "UP", "B", "C4", "PIE", "RET", "SIM", "ARG"]
|
| 41 |
+
ignore = ["E501"] # line too long handled by formatter
|
| 42 |
fixable = ["ALL"]
|
|
|
|
| 43 |
|
| 44 |
+
[tool.ruff.format]
|
| 45 |
+
quote-style = "double"
|
| 46 |
+
indent-style = "space"
|
| 47 |
+
line-ending = "auto"
|
| 48 |
|
| 49 |
+
[tool.uv.sources]
|
| 50 |
+
gradio-space-ci = { git = "https://huggingface.co/spaces/Wauplin/gradio-space-ci", rev = "0.2.3" }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
DELETED
|
@@ -1,23 +0,0 @@
|
|
| 1 |
-
APScheduler==3.10.1
|
| 2 |
-
black==23.11.0
|
| 3 |
-
click==8.1.3
|
| 4 |
-
datasets==2.14.5
|
| 5 |
-
huggingface-hub>=0.18.0
|
| 6 |
-
matplotlib==3.8.4
|
| 7 |
-
numpy==1.26.0
|
| 8 |
-
pandas==2.2.2
|
| 9 |
-
plotly==5.14.1
|
| 10 |
-
python-dateutil==2.8.2
|
| 11 |
-
requests==2.28.2
|
| 12 |
-
sentencepiece
|
| 13 |
-
tqdm==4.65.0
|
| 14 |
-
transformers==4.40.0
|
| 15 |
-
tokenizers>=0.15.0
|
| 16 |
-
gradio-space-ci @ git+https://huggingface.co/spaces/Wauplin/gradio-space-ci@0.2.3 # CI !!!
|
| 17 |
-
gradio==4.20.0
|
| 18 |
-
gradio_leaderboard==0.0.8
|
| 19 |
-
tiktoken
|
| 20 |
-
openai
|
| 21 |
-
shortuuid
|
| 22 |
-
httpx==0.25.2
|
| 23 |
-
scikit-learn
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/display/about.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
| 1 |
-
from src.display.utils import ModelType
|
| 2 |
|
| 3 |
TITLE = """<h1 style="text-align:left;float:left; id="space-title">DeathMath Leaderboard</h1> <h3 style="text-align:left;float:left;"> Оценка моделей на сложных математических и физических задачах </h3>"""
|
| 4 |
|
|
|
|
|
|
|
| 1 |
|
| 2 |
TITLE = """<h1 style="text-align:left;float:left; id="space-title">DeathMath Leaderboard</h1> <h3 style="text-align:left;float:left;"> Оценка моделей на сложных математических и физических задачах </h3>"""
|
| 3 |
|
src/display/utils.py
CHANGED
|
@@ -1,10 +1,10 @@
|
|
| 1 |
-
from dataclasses import dataclass, make_dataclass
|
| 2 |
-
from enum import Enum
|
| 3 |
import json
|
| 4 |
import logging
|
|
|
|
| 5 |
from datetime import datetime
|
| 6 |
-
|
| 7 |
|
|
|
|
| 8 |
|
| 9 |
# Configure logging
|
| 10 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
|
@@ -30,7 +30,7 @@ def parse_datetime(datetime_str):
|
|
| 30 |
def load_json_data(file_path):
|
| 31 |
"""Safely load JSON data from a file."""
|
| 32 |
try:
|
| 33 |
-
with open(file_path
|
| 34 |
return json.load(file)
|
| 35 |
except json.JSONDecodeError:
|
| 36 |
print(f"Error reading JSON from {file_path}")
|
|
|
|
|
|
|
|
|
|
| 1 |
import json
|
| 2 |
import logging
|
| 3 |
+
from dataclasses import dataclass, make_dataclass
|
| 4 |
from datetime import datetime
|
| 5 |
+
from enum import Enum
|
| 6 |
|
| 7 |
+
import pandas as pd
|
| 8 |
|
| 9 |
# Configure logging
|
| 10 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
|
|
|
| 30 |
def load_json_data(file_path):
|
| 31 |
"""Safely load JSON data from a file."""
|
| 32 |
try:
|
| 33 |
+
with open(file_path) as file:
|
| 34 |
return json.load(file)
|
| 35 |
except json.JSONDecodeError:
|
| 36 |
print(f"Error reading JSON from {file_path}")
|
src/gen/config/api_config.yaml
DELETED
|
@@ -1,203 +0,0 @@
|
|
| 1 |
-
# name: str
|
| 2 |
-
# model_name: str
|
| 3 |
-
# endpoints: default to null
|
| 4 |
-
# - api_base: str
|
| 5 |
-
# api_key: str optional (required if no api_key_ENV)
|
| 6 |
-
# api_key_ENV: str optional (ENV name to store the token secret)
|
| 7 |
-
# api_version: str optional (only for azure)
|
| 8 |
-
# api_type: str
|
| 9 |
-
# tokenizer: str optional (to optimize token limits)
|
| 10 |
-
# parallel: int
|
| 11 |
-
|
| 12 |
-
gpt-4-1106-preview:
|
| 13 |
-
model_name: gpt-4-1106-preview
|
| 14 |
-
endpoints:
|
| 15 |
-
- api_base: https://cgiaura-openai-trainning.openai.azure.com
|
| 16 |
-
api_key_ENV: GPT_4_TOKEN
|
| 17 |
-
api_version: 2024-02-15-preview
|
| 18 |
-
api_type: azure
|
| 19 |
-
parallel: 5
|
| 20 |
-
|
| 21 |
-
gpt-3.5-turbo-0125:
|
| 22 |
-
model_name: gpt-3.5-turbo-0125
|
| 23 |
-
endpoints:
|
| 24 |
-
- api_base: https://api.openai.com/v1/
|
| 25 |
-
api_key_ENV: GPT_3_TOKEN
|
| 26 |
-
api_type: openai
|
| 27 |
-
parallel: 6
|
| 28 |
-
|
| 29 |
-
gpt-3.5-turbo-0125-ru-sys:
|
| 30 |
-
model_name: gpt-3.5-turbo-0125
|
| 31 |
-
endpoints:
|
| 32 |
-
- api_base: https://api.openai.com/v1/
|
| 33 |
-
api_key_ENV: GPT_3_TOKEN
|
| 34 |
-
system_prompt: You are a helpful assistant. Answer on Russian.
|
| 35 |
-
api_type: openai
|
| 36 |
-
parallel: 6
|
| 37 |
-
|
| 38 |
-
yandex_gpt_pro:
|
| 39 |
-
model_name: yandexgpt
|
| 40 |
-
endpoints:
|
| 41 |
-
- catalog_id: b1gk1i41eeb97a5s68c7
|
| 42 |
-
iam_token_ENV: YANDEX_GPT_TOKEN
|
| 43 |
-
api_type: yandex
|
| 44 |
-
parallel: 2
|
| 45 |
-
|
| 46 |
-
gigachat_lite:
|
| 47 |
-
model_name: GigaChat
|
| 48 |
-
endpoints:
|
| 49 |
-
auth_token_ENV: GIGACHAT_GPT_TOKEN
|
| 50 |
-
api_type: gigachat
|
| 51 |
-
parallel: 1
|
| 52 |
-
|
| 53 |
-
gigachat_pro:
|
| 54 |
-
model_name: GigaChat-Pro
|
| 55 |
-
endpoints:
|
| 56 |
-
auth_token_ENV: GIGACHAT_GPT_TOKEN
|
| 57 |
-
api_type: gigachat
|
| 58 |
-
parallel: 1
|
| 59 |
-
|
| 60 |
-
meta-llama-3-70b-instruct-gptq:
|
| 61 |
-
model_name: MaziyarPanahi/Meta-Llama-3-70B-Instruct-GPTQ
|
| 62 |
-
endpoints:
|
| 63 |
-
- api_base: http://localhost:8000/v1
|
| 64 |
-
api_key: token-abc123
|
| 65 |
-
api_type: openai
|
| 66 |
-
parallel: 6
|
| 67 |
-
|
| 68 |
-
snorkel-mistral-pairrm-dpo:
|
| 69 |
-
model_name: snorkelai/Snorkel-Mistral-PairRM-DPO
|
| 70 |
-
endpoints:
|
| 71 |
-
- api_base: http://localhost:8000/v1
|
| 72 |
-
api_key: token-abc123
|
| 73 |
-
api_type: openai
|
| 74 |
-
parallel: 6
|
| 75 |
-
|
| 76 |
-
sfr-iterative-dpo-llama-3-8b-r:
|
| 77 |
-
model_name: Salesforce/SFR-Iterative-DPO-LLaMA-3-8B-R
|
| 78 |
-
endpoints:
|
| 79 |
-
- api_base: http://localhost:8000/v1
|
| 80 |
-
api_key: token-abc123
|
| 81 |
-
api_type: openai
|
| 82 |
-
parallel: 6
|
| 83 |
-
|
| 84 |
-
openchat-3.5-0106:
|
| 85 |
-
model_name: openchat/openchat-3.5-0106
|
| 86 |
-
endpoints:
|
| 87 |
-
- api_base: http://localhost:8000/v1
|
| 88 |
-
api_key: token-abc123
|
| 89 |
-
api_type: openai
|
| 90 |
-
parallel: 6
|
| 91 |
-
|
| 92 |
-
mixtral-8x7b-instruct-v0.1:
|
| 93 |
-
model_name: LoneStriker/Mixtral-8x7B-Instruct-v0.1-HF
|
| 94 |
-
endpoints:
|
| 95 |
-
- api_base: http://localhost:8000/v1
|
| 96 |
-
api_key: token-abc123
|
| 97 |
-
api_type: openai
|
| 98 |
-
parallel: 4
|
| 99 |
-
|
| 100 |
-
neural-chat-7b-v3-3:
|
| 101 |
-
model_name: Intel/neural-chat-7b-v3-3
|
| 102 |
-
endpoints:
|
| 103 |
-
- api_base: http://localhost:8000/v1
|
| 104 |
-
api_key: token-abc123
|
| 105 |
-
api_type: openai
|
| 106 |
-
parallel: 6
|
| 107 |
-
|
| 108 |
-
meta-llama-3-8b-instruct:
|
| 109 |
-
model_name: meta-llama/Meta-Llama-3-8B-Instruct
|
| 110 |
-
endpoints:
|
| 111 |
-
- api_base: http://localhost:8000/v1
|
| 112 |
-
api_key: token-abc123
|
| 113 |
-
api_type: openai
|
| 114 |
-
parallel: 6
|
| 115 |
-
|
| 116 |
-
saiga_llama3_8b:
|
| 117 |
-
model_name: IlyaGusev/saiga_llama3_8b
|
| 118 |
-
endpoints:
|
| 119 |
-
- api_base: http://localhost:8000/v1
|
| 120 |
-
api_key: token-abc123
|
| 121 |
-
api_type: openai
|
| 122 |
-
parallel: 6
|
| 123 |
-
|
| 124 |
-
hermes-2-pro-llama-3-8b:
|
| 125 |
-
model_name: NousResearch/Hermes-2-Pro-Llama-3-8B
|
| 126 |
-
endpoints:
|
| 127 |
-
- api_base: http://localhost:8000/v1
|
| 128 |
-
api_key: token-abc123
|
| 129 |
-
api_type: openai
|
| 130 |
-
parallel: 6
|
| 131 |
-
|
| 132 |
-
dpopenhermes-7b:
|
| 133 |
-
model_name: openaccess-ai-collective/DPOpenHermes-7B
|
| 134 |
-
endpoints:
|
| 135 |
-
- api_base: http://localhost:8000/v1
|
| 136 |
-
api_key: token-abc123
|
| 137 |
-
api_type: openai
|
| 138 |
-
parallel: 6
|
| 139 |
-
|
| 140 |
-
llama3-chatqa-1.5-8b:
|
| 141 |
-
model_name: nvidia/Llama3-ChatQA-1.5-8B
|
| 142 |
-
endpoints:
|
| 143 |
-
- api_base: http://localhost:8000/v1
|
| 144 |
-
api_key: token-abc123
|
| 145 |
-
api_type: openai
|
| 146 |
-
parallel: 6
|
| 147 |
-
|
| 148 |
-
hermes-2-pro-mistral-7b:
|
| 149 |
-
model_name: NousResearch/Hermes-2-Pro-Mistral-7B
|
| 150 |
-
endpoints:
|
| 151 |
-
- api_base: http://localhost:8000/v1
|
| 152 |
-
api_key: token-abc123
|
| 153 |
-
api_type: openai
|
| 154 |
-
parallel: 6
|
| 155 |
-
|
| 156 |
-
suzume-llama-3-8b-multilingual:
|
| 157 |
-
model_name: lightblue/suzume-llama-3-8B-multilingual
|
| 158 |
-
endpoints:
|
| 159 |
-
- api_base: http://localhost:8000/v1
|
| 160 |
-
api_key: token-abc123
|
| 161 |
-
api_type: openai
|
| 162 |
-
parallel: 6
|
| 163 |
-
|
| 164 |
-
vikhr-7b-instruct_0.4:
|
| 165 |
-
model_name: Vikhrmodels/Vikhr-7B-instruct_0.4
|
| 166 |
-
endpoints:
|
| 167 |
-
- api_base: http://localhost:8000/v1
|
| 168 |
-
api_key: token-abc123
|
| 169 |
-
api_type: openai
|
| 170 |
-
parallel: 6
|
| 171 |
-
|
| 172 |
-
vikhr-it-5.2-fp16-cp:
|
| 173 |
-
model_name: Vikhrmodels/it-5.2-fp16-cp
|
| 174 |
-
endpoints:
|
| 175 |
-
- api_base: http://localhost:8000/v1
|
| 176 |
-
api_key: token-abc123
|
| 177 |
-
api_type: openai
|
| 178 |
-
system_prompt: Ты — Вихрь, русскоязычный ассистент.
|
| 179 |
-
parallel: 6
|
| 180 |
-
|
| 181 |
-
starling-lm-7b-beta:
|
| 182 |
-
model_name: Nexusflow/Starling-LM-7B-beta
|
| 183 |
-
endpoints:
|
| 184 |
-
- api_base: http://localhost:8000/v1
|
| 185 |
-
api_key: token-abc123
|
| 186 |
-
api_type: openai
|
| 187 |
-
parallel: 6
|
| 188 |
-
|
| 189 |
-
c4ai-command-r-v01:
|
| 190 |
-
model_name: CohereForAI/c4ai-command-r-v01
|
| 191 |
-
endpoints:
|
| 192 |
-
- api_base: http://localhost:8000/v1
|
| 193 |
-
api_key: token-abc123
|
| 194 |
-
api_type: openai
|
| 195 |
-
parallel: 6
|
| 196 |
-
|
| 197 |
-
starcoder2-15b-instruct-v0.1:
|
| 198 |
-
model_name: bigcode/starcoder2-15b-instruct-v0.1
|
| 199 |
-
endpoints:
|
| 200 |
-
- api_base: http://localhost:8000/v1
|
| 201 |
-
api_key: token-abc123
|
| 202 |
-
api_type: openai
|
| 203 |
-
parallel: 3
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/gen/config/judge_config-ru.yaml
DELETED
|
@@ -1,35 +0,0 @@
|
|
| 1 |
-
name: judgment config file for Arena Hard
|
| 2 |
-
|
| 3 |
-
bench_name: arena-hard-v0.1
|
| 4 |
-
|
| 5 |
-
# Arena Hard default
|
| 6 |
-
judge_model: gpt-4-1106-preview
|
| 7 |
-
reference: False # Optional
|
| 8 |
-
ref_model: null
|
| 9 |
-
|
| 10 |
-
baseline: True
|
| 11 |
-
baseline_model: gpt-3.5-turbo-0125
|
| 12 |
-
|
| 13 |
-
pairwise: True
|
| 14 |
-
temperature: 0
|
| 15 |
-
max_tokens: 4096
|
| 16 |
-
|
| 17 |
-
regex_pattern: \[\[([AB<>=]+)\]\]
|
| 18 |
-
|
| 19 |
-
system_prompt: "Пожалуйста, веди себя как беспристрастный судья и оцени качество ответов, предоставленных двумя AI ассистентами на пользовательский запрос, представленный ниже. Тебе будут даны ответы ассистента А и ассистента В. Твоя задача — оценить, чей ответ лучше.\n\nНачни свою оценку, сгенерировав собственный ответ на запрос. Ты должен предоставить свои ответы, прежде чем судить об ответах других AI.\n\nПри оценке ответов ассистентов сравни ответы обоих ассистентов со своим ответом. Ты должен идентифицировать и исправить любые ошибки или неточности.\n\nЗатем рассмотри, являются ли ответы ассистентов грамотными, полезными, релевантными и краткими. Грамотность означает, что ответ использует преимущественно русский язык и в нем отсутствуют языковые ошибки. Полезность означает, что ответ правильно реагирует на запрос или следует инструкциям. Обрати внимание, когда в запросе пользователя есть какая-либо неоднозначность или более одной интерпретации, полезнее и уместнее запрашивать уточнения или дополнительную информацию у пользователя, чем предоставлять ответ на основе предположений. Релевантность означает, что все части ответа тесно связаны или соотвествуют тому, что спрашивается. Краткость означает, что ответ ясен и не многословен или избыточен.\n\nЗатем рассмотри креативность и новизну ответов ассистентов, когда это необходимо. Наконец, определи любую отсутствующую важную информацию в ответах ассистентов, которую было бы полезно включить при ответе на пользовательский запрос.\n\nПосле предоставления твоего объяснения, ты должен выдать только один из следующих вариантов как твое окончательное решение с меткой:\n\n1. Ассистент A значительно лучше: [[A>>B]]\n2. Ассистент A немного лучше: [[A>B]]\n3. Ничья, примерно одинаково: [[A=B]]\n4. Ассистент B немного лучше: [[B>A]]\n5. Ассистент B значительно лучше: [[B>>A]]\n\nПример вывода: \"Мой окончательный вердикт — ничья: [[A=B]]\"."
|
| 20 |
-
|
| 21 |
-
prompt_template: ["<|Запрос пользователя|>\n{question_1}\n\n<|Начало ответа ассистента A|>\n{answer_1}\n<|Конец ответа ассистента A|>\n\n<|Начало ответа ассистента B|>\n{answer_2}\n<|Конец ответа ассистента B|>"]
|
| 22 |
-
|
| 23 |
-
# Add your model below for evaluation
|
| 24 |
-
model_list:
|
| 25 |
-
- meta-llama-3-8b-instruct
|
| 26 |
-
- meta-llama-3-8b-instruct-ru-guided-2
|
| 27 |
-
- saiga_llama3_8b
|
| 28 |
-
- suzume-llama-3-8B-multilingual
|
| 29 |
-
- c4ai-command-r-v01
|
| 30 |
-
- starling-lm-7b-beta
|
| 31 |
-
- openchat-3.5-0106
|
| 32 |
-
- hermes-2-pro-llama-3-8b
|
| 33 |
-
- hermes-2-pro-mistral-7b
|
| 34 |
-
- starcoder2-15b-instruct-v0.1
|
| 35 |
-
- gpt-4-1106-preview
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/gen/config/judge_config.yaml
DELETED
|
@@ -1,40 +0,0 @@
|
|
| 1 |
-
name: judgment config file for Arena Hard
|
| 2 |
-
|
| 3 |
-
bench_name: arena-hard-v0.1
|
| 4 |
-
|
| 5 |
-
# Arena Hard default
|
| 6 |
-
judge_model: gpt-4-1106-preview
|
| 7 |
-
reference: False # Optional
|
| 8 |
-
ref_model: null
|
| 9 |
-
|
| 10 |
-
baseline: True
|
| 11 |
-
baseline_model: gpt-3.5-turbo-0125
|
| 12 |
-
|
| 13 |
-
pairwise: True
|
| 14 |
-
temperature: 0
|
| 15 |
-
max_tokens: 4096
|
| 16 |
-
|
| 17 |
-
regex_pattern: \[\[([AB<>=]+)\]\]
|
| 18 |
-
|
| 19 |
-
system_prompt: "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user prompt displayed below. You will be given assistant A's answer and assistant B's answer. Your job is to evaluate which assistant's answer is better.\n\nBegin your evaluation by describing the details that need to be taken into account when responding to this prompt. You must provide your ideas before judging any answers.\n\nWhen evaluating the assistants' answers, compare both assistants' answers with your ideas. You must identify and correct any mistakes or inaccurate information.\n\nThen consider if the assistant's answers are helpful, relevant, concise and linguistically acceptable. Helpful means the answer correctly responds to the prompt or follows the instructions. Note when user prompt has any ambiguity or more than one interpretation, it is more helpful and appropriate to ask for clarifications or more information from the user than providing an answer based on assumptions. Relevant means all parts of the response closely connect or are appropriate to what is being asked. Concise means the response is clear and not verbose or excessive. Linguistically acceptable means that the response is given mainly in Russian language and there are no grammatical errors in it.\n\nThen consider the creativity and novelty of the assistant's answers when needed. Finally, identify any missing important information in the assistants' answers that would be beneficial to include when responding to the user prompt.\n\nAfter providing your explanation, you must output only one of the following choices as your final verdict with a label:\n\n1. Assistant A is significantly better: [[A>>B]]\n2. Assistant A is slightly better: [[A>B]]\n3. Tie, relatively the same: [[A=B]]\n4. Assistant B is slightly better: [[B>A]]\n5. Assistant B is significantly better: [[B>>A]]\n\nExample output: \"My final verdict is tie: [[A=B]]\"."
|
| 20 |
-
|
| 21 |
-
prompt_template: ["<|User Prompt|>\n{question_1}\n\n<|The Start of Assistant A's Answer|>\n{answer_1}\n<|The End of Assistant A's Answer|>\n\n<|The Start of Assistant B's Answer|>\n{answer_2}\n<|The End of Assistant B's Answer|>"]
|
| 22 |
-
|
| 23 |
-
# Add your model below for evaluation
|
| 24 |
-
model_list:
|
| 25 |
-
- meta-llama-3-8b-instruct
|
| 26 |
-
- saiga_llama3_8b
|
| 27 |
-
- suzume-llama-3-8b-multilingual
|
| 28 |
-
- yandex_gpt_pro
|
| 29 |
-
- c4ai-command-r-v01
|
| 30 |
-
- starling-lm-7b-beta
|
| 31 |
-
- openchat-3.5-0106
|
| 32 |
-
- snorkel-mistral-pairrm-dpo
|
| 33 |
-
- neural-chat-7b-v3-3
|
| 34 |
-
- gigachat_lite
|
| 35 |
-
- gigachat_pro
|
| 36 |
-
- vikhr-7b-instruct_0.4
|
| 37 |
-
- hermes-2-pro-llama-3-8b
|
| 38 |
-
- gpt-4-1106-preview
|
| 39 |
-
- llama3-chatqa-1.5-8b
|
| 40 |
-
- vikhr-it-5.1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/gen/gen_answer.py
DELETED
|
@@ -1,202 +0,0 @@
|
|
| 1 |
-
"""Generate answers using api endpoints.
|
| 2 |
-
|
| 3 |
-
Usage:
|
| 4 |
-
python gen_api_answer --parallel 32
|
| 5 |
-
"""
|
| 6 |
-
import argparse
|
| 7 |
-
import concurrent.futures
|
| 8 |
-
import json
|
| 9 |
-
import os
|
| 10 |
-
import time
|
| 11 |
-
|
| 12 |
-
import shortuuid
|
| 13 |
-
import tiktoken
|
| 14 |
-
import tqdm
|
| 15 |
-
from utils import (
|
| 16 |
-
OPENAI_MODEL_LIST,
|
| 17 |
-
chat_completion_anthropic,
|
| 18 |
-
chat_completion_cohere,
|
| 19 |
-
chat_completion_gemini,
|
| 20 |
-
chat_completion_gigachat,
|
| 21 |
-
chat_completion_mistral,
|
| 22 |
-
chat_completion_openai,
|
| 23 |
-
chat_completion_openai_azure,
|
| 24 |
-
chat_completion_yandex,
|
| 25 |
-
get_endpoint,
|
| 26 |
-
load_model_answers,
|
| 27 |
-
load_questions,
|
| 28 |
-
make_config,
|
| 29 |
-
reorg_answer_file,
|
| 30 |
-
temperature_config,
|
| 31 |
-
)
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
def get_answer(
|
| 35 |
-
question: dict,
|
| 36 |
-
model: str,
|
| 37 |
-
endpoint_info: dict,
|
| 38 |
-
num_choices: int,
|
| 39 |
-
max_tokens: int,
|
| 40 |
-
temperature: float,
|
| 41 |
-
answer_file: str,
|
| 42 |
-
api_dict: dict,
|
| 43 |
-
):
|
| 44 |
-
if question["category"] in temperature_config:
|
| 45 |
-
temperature = temperature_config[question["category"]]
|
| 46 |
-
|
| 47 |
-
api_type = endpoint_info["api_type"]
|
| 48 |
-
|
| 49 |
-
conv = []
|
| 50 |
-
|
| 51 |
-
if "system_prompt" in endpoint_info.keys():
|
| 52 |
-
conv.append({"role": "system", "content": endpoint_info["system_prompt"]})
|
| 53 |
-
elif model in OPENAI_MODEL_LIST:
|
| 54 |
-
conv.append({"role": "system", "content": "You are a helpful assistant."})
|
| 55 |
-
|
| 56 |
-
encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
|
| 57 |
-
choices = []
|
| 58 |
-
for i in range(num_choices):
|
| 59 |
-
turns = []
|
| 60 |
-
for j in range(len(question["turns"])):
|
| 61 |
-
conv.append({"role": "user", "content": question["turns"][j]["content"]})
|
| 62 |
-
if api_type == "anthropic":
|
| 63 |
-
output = chat_completion_anthropic(
|
| 64 |
-
model=endpoint_info["model_name"], messages=conv, temperature=temperature, max_tokens=max_tokens
|
| 65 |
-
)
|
| 66 |
-
elif api_type == "mistral":
|
| 67 |
-
output = chat_completion_mistral(
|
| 68 |
-
model=endpoint_info["model_name"], messages=conv, temperature=temperature, max_tokens=max_tokens
|
| 69 |
-
)
|
| 70 |
-
elif api_type == "yandex":
|
| 71 |
-
output = chat_completion_yandex(
|
| 72 |
-
model=endpoint_info["model_name"],
|
| 73 |
-
messages=conv,
|
| 74 |
-
temperature=temperature,
|
| 75 |
-
max_tokens=max_tokens,
|
| 76 |
-
api_dict=api_dict,
|
| 77 |
-
)
|
| 78 |
-
elif api_type == "gigachat":
|
| 79 |
-
output = chat_completion_gigachat(
|
| 80 |
-
model=endpoint_info["model_name"],
|
| 81 |
-
messages=conv,
|
| 82 |
-
temperature=temperature,
|
| 83 |
-
max_tokens=max_tokens,
|
| 84 |
-
api_dict=api_dict,
|
| 85 |
-
)
|
| 86 |
-
elif api_type == "gemini":
|
| 87 |
-
output = chat_completion_gemini(
|
| 88 |
-
model=endpoint_info["model_name"],
|
| 89 |
-
messages=question["turns"][j]["content"],
|
| 90 |
-
temperature=temperature,
|
| 91 |
-
max_tokens=max_tokens,
|
| 92 |
-
)
|
| 93 |
-
elif api_type == "azure":
|
| 94 |
-
output = chat_completion_openai_azure(
|
| 95 |
-
model=endpoint_info["model_name"],
|
| 96 |
-
messages=conv,
|
| 97 |
-
temperature=temperature,
|
| 98 |
-
max_tokens=max_tokens,
|
| 99 |
-
api_dict=api_dict,
|
| 100 |
-
)
|
| 101 |
-
elif api_type == "cohere":
|
| 102 |
-
output = chat_completion_cohere(
|
| 103 |
-
model=endpoint_info["model_name"], messages=conv, temperature=temperature, max_tokens=max_tokens
|
| 104 |
-
)
|
| 105 |
-
else:
|
| 106 |
-
output = chat_completion_openai(
|
| 107 |
-
model=endpoint_info["model_name"],
|
| 108 |
-
messages=conv,
|
| 109 |
-
temperature=temperature,
|
| 110 |
-
max_tokens=max_tokens,
|
| 111 |
-
api_dict=api_dict,
|
| 112 |
-
)
|
| 113 |
-
conv.append({"role": "assistant", "content": output})
|
| 114 |
-
|
| 115 |
-
turns.append({"content": output, "token_len": len(encoding.encode(output))})
|
| 116 |
-
choices.append({"index": i, "turns": turns})
|
| 117 |
-
|
| 118 |
-
# Dump answers
|
| 119 |
-
ans = {
|
| 120 |
-
"question_id": question["question_id"],
|
| 121 |
-
"answer_id": shortuuid.uuid(),
|
| 122 |
-
"model_id": model,
|
| 123 |
-
"choices": choices,
|
| 124 |
-
"tstamp": time.time(),
|
| 125 |
-
}
|
| 126 |
-
|
| 127 |
-
os.makedirs(os.path.dirname(answer_file), exist_ok=True)
|
| 128 |
-
with open(answer_file, "a") as fout:
|
| 129 |
-
fout.write(json.dumps(ans) + "\n")
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
if __name__ == "__main__":
|
| 133 |
-
parser = argparse.ArgumentParser()
|
| 134 |
-
parser.add_argument("--setting-file", type=str, default="config/gen_answer_config.yaml")
|
| 135 |
-
parser.add_argument("--endpoint-file", type=str, default="config/api_config.yaml")
|
| 136 |
-
args = parser.parse_args()
|
| 137 |
-
|
| 138 |
-
settings = make_config(args.setting_file)
|
| 139 |
-
endpoint_list = make_config(args.endpoint_file)
|
| 140 |
-
|
| 141 |
-
existing_answer = load_model_answers(os.path.join("data", settings["bench_name"], "model_answers", "internal"))
|
| 142 |
-
|
| 143 |
-
print(settings)
|
| 144 |
-
|
| 145 |
-
for model in settings["model_list"]:
|
| 146 |
-
assert model in endpoint_list
|
| 147 |
-
endpoint_info = endpoint_list[model]
|
| 148 |
-
|
| 149 |
-
question_file = os.path.join("data", settings["bench_name"], "question.jsonl")
|
| 150 |
-
questions = load_questions(question_file)
|
| 151 |
-
|
| 152 |
-
answer_file = os.path.join("data", settings["bench_name"], "model_answers", f"{model}.jsonl")
|
| 153 |
-
print(f"Output to {answer_file}")
|
| 154 |
-
|
| 155 |
-
if "parallel" in endpoint_info:
|
| 156 |
-
parallel = endpoint_info["parallel"]
|
| 157 |
-
else:
|
| 158 |
-
parallel = 1
|
| 159 |
-
|
| 160 |
-
# We want to maximizes the number of tokens generate per answer: max_tokens = specified token # - input tokens #
|
| 161 |
-
if "tokenizer" in endpoint_info:
|
| 162 |
-
question_list = [question["turns"][0]["content"] for question in questions]
|
| 163 |
-
if model in OPENAI_MODEL_LIST:
|
| 164 |
-
tokenizer = tiktoken.encoding_for_model(endpoint_info["model_name"])
|
| 165 |
-
tokens = [tokenizer.encode(prompt) for prompt in question_list]
|
| 166 |
-
max_tokens = [(settings["max_tokens"] - len(token) - 100) for token in tokens]
|
| 167 |
-
else:
|
| 168 |
-
from transformers import AutoTokenizer
|
| 169 |
-
|
| 170 |
-
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
| 171 |
-
tokenizer = AutoTokenizer.from_pretrained(endpoint_info["tokenizer"])
|
| 172 |
-
|
| 173 |
-
tokens = tokenizer(question_list)
|
| 174 |
-
max_tokens = [(settings["max_tokens"] - len(prompt) - 300) for prompt in tokens["input_ids"]]
|
| 175 |
-
else:
|
| 176 |
-
max_tokens = [settings["max_tokens"]] * len(questions)
|
| 177 |
-
|
| 178 |
-
with concurrent.futures.ThreadPoolExecutor(max_workers=parallel) as executor:
|
| 179 |
-
futures = []
|
| 180 |
-
count = 0
|
| 181 |
-
for index, question in enumerate(questions):
|
| 182 |
-
if model in existing_answer and question["question_id"] in existing_answer[model]:
|
| 183 |
-
count += 1
|
| 184 |
-
continue
|
| 185 |
-
future = executor.submit(
|
| 186 |
-
get_answer,
|
| 187 |
-
question,
|
| 188 |
-
model,
|
| 189 |
-
endpoint_info,
|
| 190 |
-
settings["num_choices"],
|
| 191 |
-
max_tokens[index],
|
| 192 |
-
settings["temperature"],
|
| 193 |
-
answer_file,
|
| 194 |
-
get_endpoint(endpoint_info["endpoints"]),
|
| 195 |
-
)
|
| 196 |
-
futures.append(future)
|
| 197 |
-
if count > 0:
|
| 198 |
-
print(f"{count} number of existing answers")
|
| 199 |
-
for future in tqdm.tqdm(concurrent.futures.as_completed(futures), total=len(futures)):
|
| 200 |
-
future.result()
|
| 201 |
-
|
| 202 |
-
reorg_answer_file(answer_file)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/gen/gen_judgment.py
DELETED
|
@@ -1,221 +0,0 @@
|
|
| 1 |
-
import argparse
|
| 2 |
-
import concurrent.futures
|
| 3 |
-
import glob
|
| 4 |
-
import json
|
| 5 |
-
import os
|
| 6 |
-
import re
|
| 7 |
-
|
| 8 |
-
import huggingface_hub
|
| 9 |
-
from tqdm import tqdm
|
| 10 |
-
from utils import (
|
| 11 |
-
chat_completion_anthropic,
|
| 12 |
-
chat_completion_openai,
|
| 13 |
-
chat_completion_openai_azure,
|
| 14 |
-
get_endpoint,
|
| 15 |
-
load_model_answers,
|
| 16 |
-
load_questions,
|
| 17 |
-
make_config,
|
| 18 |
-
)
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
def get_score(judgment, pattern, pairwise=True):
|
| 22 |
-
matches = pattern.findall(judgment)
|
| 23 |
-
matches = [m for m in matches if m != ""]
|
| 24 |
-
if len(set(matches)) == 0:
|
| 25 |
-
return None, True
|
| 26 |
-
elif len(set(matches)) == 1:
|
| 27 |
-
if pairwise:
|
| 28 |
-
return matches[0].strip("\n"), False
|
| 29 |
-
return int(matches[0])
|
| 30 |
-
else:
|
| 31 |
-
return None, False
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
# get answer from model
|
| 35 |
-
def get_answer(model, conv, temperature, max_tokens, endpoint_dict=None):
|
| 36 |
-
api_dict = get_endpoint(endpoint_dict["endpoints"])
|
| 37 |
-
|
| 38 |
-
if endpoint_dict["api_type"] == "anthropic":
|
| 39 |
-
output = chat_completion_anthropic(model, conv, temperature, max_tokens)
|
| 40 |
-
elif endpoint_dict["api_type"] == "azure":
|
| 41 |
-
output = chat_completion_openai_azure(model, conv, temperature, max_tokens, api_dict)
|
| 42 |
-
else:
|
| 43 |
-
output = chat_completion_openai(model, conv, temperature, max_tokens, api_dict)
|
| 44 |
-
return output
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
def judgment(**args):
|
| 48 |
-
question = args["question"]
|
| 49 |
-
answer = args["answer"]
|
| 50 |
-
reference = args["reference"]
|
| 51 |
-
baseline = args["baseline_answer"]
|
| 52 |
-
configs = args["configs"]
|
| 53 |
-
output_file = args["output_file"]
|
| 54 |
-
model = configs["judge_model"]
|
| 55 |
-
|
| 56 |
-
num_games = 2 if configs["pairwise"] else 1
|
| 57 |
-
|
| 58 |
-
output = {"question_id": question["question_id"], "model": answer["model_id"], "judge": model, "games": []}
|
| 59 |
-
|
| 60 |
-
for game in range(num_games):
|
| 61 |
-
conv = [{"role": "system", "content": configs["system_prompt"]}]
|
| 62 |
-
|
| 63 |
-
for template in configs["prompt_template"]:
|
| 64 |
-
prompt_args = {}
|
| 65 |
-
|
| 66 |
-
for i, turn in enumerate(question["turns"]):
|
| 67 |
-
prompt_args[f"question_{i+1}"] = turn["content"]
|
| 68 |
-
base = 1
|
| 69 |
-
|
| 70 |
-
if baseline:
|
| 71 |
-
if game % 2 == 1: # swap position
|
| 72 |
-
temp = baseline
|
| 73 |
-
baseline = answer
|
| 74 |
-
answer = temp
|
| 75 |
-
|
| 76 |
-
for i, turn in enumerate(baseline["choices"][0]["turns"]):
|
| 77 |
-
prompt_args[f"answer_{i+1}"] = turn["content"]
|
| 78 |
-
base += 1
|
| 79 |
-
if answer:
|
| 80 |
-
for i, turn in enumerate(answer["choices"][0]["turns"]):
|
| 81 |
-
prompt_args[f"answer_{i+base}"] = turn["content"]
|
| 82 |
-
|
| 83 |
-
if reference:
|
| 84 |
-
for j, ref_answer in enumerate(reference):
|
| 85 |
-
for i, turn in enumerate(ref_answer["choices"][0]["turns"]):
|
| 86 |
-
prompt_args[f"ref_answer_{i+j+1}"] = turn["content"]
|
| 87 |
-
|
| 88 |
-
user_prompt = template.format(**prompt_args)
|
| 89 |
-
conv.append({"role": "user", "content": user_prompt})
|
| 90 |
-
|
| 91 |
-
judgment = ""
|
| 92 |
-
for _ in range(2):
|
| 93 |
-
new_judgment = get_answer(
|
| 94 |
-
model,
|
| 95 |
-
conv,
|
| 96 |
-
configs["temperature"],
|
| 97 |
-
configs["max_tokens"],
|
| 98 |
-
args["endpoint_dict"],
|
| 99 |
-
)
|
| 100 |
-
|
| 101 |
-
judgment += "\n" + new_judgment
|
| 102 |
-
|
| 103 |
-
score, try_again = get_score(judgment, args["regex_pattern"])
|
| 104 |
-
|
| 105 |
-
conv.append({"role": "assistant", "content": new_judgment})
|
| 106 |
-
|
| 107 |
-
if not try_again:
|
| 108 |
-
break
|
| 109 |
-
|
| 110 |
-
conv.append(
|
| 111 |
-
{"role": "user", "content": "continue your judgment and finish by outputting a final verdict label"}
|
| 112 |
-
)
|
| 113 |
-
|
| 114 |
-
result = {"user_prompt": conv[1]["content"], "judgment": judgment, "score": score}
|
| 115 |
-
output["games"].append(result)
|
| 116 |
-
|
| 117 |
-
with open(output_file, "a") as f:
|
| 118 |
-
f.write(json.dumps(output, ensure_ascii=False) + "\n")
|
| 119 |
-
huggingface_hub.HfApi().upload_file(
|
| 120 |
-
output_file,
|
| 121 |
-
path_in_repo=f'model_judgment/{configs['judge_model']}/{output_file.split('/')[-1]}',
|
| 122 |
-
repo_id="Vikhrmodels/openbench-eval",
|
| 123 |
-
repo_type="dataset",
|
| 124 |
-
)
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
if __name__ == "__main__":
|
| 128 |
-
parser = argparse.ArgumentParser()
|
| 129 |
-
parser.add_argument("--setting-file", type=str, default="./config/judge_config.yaml")
|
| 130 |
-
parser.add_argument("--endpoint-file", type=str, default="./config/api_config.yaml")
|
| 131 |
-
args = parser.parse_args()
|
| 132 |
-
print(args)
|
| 133 |
-
|
| 134 |
-
configs = make_config(args.setting_file)
|
| 135 |
-
endpoint_list = make_config(args.endpoint_file)
|
| 136 |
-
|
| 137 |
-
print(
|
| 138 |
-
f'judge model: {configs["judge_model"]}, baseline: {configs["baseline"]}, baseline model: {configs["baseline_model"]}, reference: {configs["reference"]}, '
|
| 139 |
-
+ f'reference models: {configs["ref_model"]}, temperature: {configs["temperature"]}, max tokens: {configs["max_tokens"]}, pairwise: {configs["pairwise"]}'
|
| 140 |
-
)
|
| 141 |
-
|
| 142 |
-
if configs["regex_pattern"]:
|
| 143 |
-
pattern = re.compile(configs["regex_pattern"])
|
| 144 |
-
|
| 145 |
-
question_file = os.path.join("./data", configs["bench_name"], "question.jsonl")
|
| 146 |
-
external_dir = os.path.join("./data", configs["bench_name"], "model_answers/external")
|
| 147 |
-
internal_dir = os.path.join("./data", configs["bench_name"], "model_answers/internal")
|
| 148 |
-
ref_answer_dir = os.path.join("data", configs["bench_name"], "reference_answer")
|
| 149 |
-
|
| 150 |
-
questions = load_questions(question_file)
|
| 151 |
-
model_answers_external = load_model_answers(external_dir)
|
| 152 |
-
model_answers_internal = load_model_answers(internal_dir)
|
| 153 |
-
|
| 154 |
-
# internal has priority
|
| 155 |
-
model_answers = {**model_answers_external, **model_answers_internal}
|
| 156 |
-
|
| 157 |
-
# if user choose a set of models, only judge those models
|
| 158 |
-
models = [
|
| 159 |
-
model.split("/")[-1].split(".")[0]
|
| 160 |
-
for model in glob.glob("./data/arena-hard-v0.1/model_answers/external/*.jsonl")
|
| 161 |
-
]
|
| 162 |
-
|
| 163 |
-
ref_answers = None
|
| 164 |
-
if configs["reference"]:
|
| 165 |
-
ref_answers = load_model_answers(ref_answer_dir)
|
| 166 |
-
ref_answers = [ref_answers[model] for model in configs["ref_model"]]
|
| 167 |
-
|
| 168 |
-
output_files = {}
|
| 169 |
-
output_dir = f"data/{configs['bench_name']}/model_judgment/{configs['judge_model']}"
|
| 170 |
-
for model in models:
|
| 171 |
-
output_files[model] = os.path.join(
|
| 172 |
-
output_dir,
|
| 173 |
-
f"{model}.jsonl",
|
| 174 |
-
)
|
| 175 |
-
|
| 176 |
-
for output_file in output_files.values():
|
| 177 |
-
os.makedirs(os.path.dirname(output_file), exist_ok=True)
|
| 178 |
-
|
| 179 |
-
existing_judgments = load_model_answers(output_dir)
|
| 180 |
-
|
| 181 |
-
endpoint_info = endpoint_list[configs["judge_model"]]
|
| 182 |
-
|
| 183 |
-
with concurrent.futures.ThreadPoolExecutor(max_workers=endpoint_info["parallel"]) as executor:
|
| 184 |
-
futures = []
|
| 185 |
-
for model in models:
|
| 186 |
-
count = 0
|
| 187 |
-
for question in questions[:2]:
|
| 188 |
-
question_id = question["question_id"]
|
| 189 |
-
|
| 190 |
-
kwargs = {}
|
| 191 |
-
kwargs["question"] = question
|
| 192 |
-
if model in model_answers and question_id not in model_answers[model]:
|
| 193 |
-
print(f"Warning: {model} answer to {question['question_id']} cannot be found.")
|
| 194 |
-
continue
|
| 195 |
-
|
| 196 |
-
if model in existing_judgments and question_id in existing_judgments[model]:
|
| 197 |
-
count += 1
|
| 198 |
-
continue
|
| 199 |
-
|
| 200 |
-
kwargs["answer"] = model_answers[model][question_id]
|
| 201 |
-
if ref_answers:
|
| 202 |
-
kwargs["reference"] = [ref_answer[question_id] for ref_answer in ref_answers]
|
| 203 |
-
assert len(kwargs["reference"]) == len(configs["ref_model"])
|
| 204 |
-
else:
|
| 205 |
-
kwargs["reference"] = None
|
| 206 |
-
if configs["baseline"]:
|
| 207 |
-
kwargs["baseline_answer"] = model_answers[configs["baseline_model"]][question_id]
|
| 208 |
-
else:
|
| 209 |
-
kwargs["baseline_answer"] = None
|
| 210 |
-
kwargs["configs"] = configs
|
| 211 |
-
kwargs["endpoint_dict"] = endpoint_info
|
| 212 |
-
kwargs["output_file"] = output_files[model]
|
| 213 |
-
kwargs["regex_pattern"] = pattern
|
| 214 |
-
future = executor.submit(judgment, **kwargs)
|
| 215 |
-
futures.append(future)
|
| 216 |
-
|
| 217 |
-
if count > 0:
|
| 218 |
-
print(f"{count} number of existing judgments")
|
| 219 |
-
|
| 220 |
-
for future in tqdm(concurrent.futures.as_completed(futures), total=len(futures)):
|
| 221 |
-
future.result()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/gen/show_result.py
DELETED
|
@@ -1,279 +0,0 @@
|
|
| 1 |
-
import argparse
|
| 2 |
-
import datetime
|
| 3 |
-
import math
|
| 4 |
-
import os
|
| 5 |
-
from collections import defaultdict
|
| 6 |
-
from glob import glob
|
| 7 |
-
|
| 8 |
-
import numpy as np
|
| 9 |
-
import pandas as pd
|
| 10 |
-
import plotly.express as px
|
| 11 |
-
from sklearn.linear_model import LogisticRegression
|
| 12 |
-
from tqdm import tqdm
|
| 13 |
-
from utils import load_model_answers
|
| 14 |
-
|
| 15 |
-
from src.envs import HF_TOKEN_PRIVATE
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
def compute_mle_elo(df, SCALE=400, BASE=10, INIT_RATING=1000):
|
| 19 |
-
models = pd.concat([df["model_a"], df["model_b"]]).unique()
|
| 20 |
-
models = pd.Series(np.arange(len(models)), index=models)
|
| 21 |
-
|
| 22 |
-
# duplicate battles
|
| 23 |
-
df = pd.concat([df, df], ignore_index=True)
|
| 24 |
-
p = len(models.index)
|
| 25 |
-
n = df.shape[0]
|
| 26 |
-
|
| 27 |
-
X = np.zeros([n, p])
|
| 28 |
-
X[np.arange(n), models[df["model_a"]]] = +math.log(BASE)
|
| 29 |
-
X[np.arange(n), models[df["model_b"]]] = -math.log(BASE)
|
| 30 |
-
|
| 31 |
-
# one A win => two A win
|
| 32 |
-
Y = np.zeros(n)
|
| 33 |
-
Y[df["winner"] == "model_a"] = 1.0
|
| 34 |
-
|
| 35 |
-
# one tie => one A win + one B win
|
| 36 |
-
# find tie + tie (both bad) index
|
| 37 |
-
tie_idx = (df["winner"] == "tie") | (df["winner"] == "tie (bothbad)")
|
| 38 |
-
tie_idx[len(tie_idx) // 2 :] = False
|
| 39 |
-
Y[tie_idx] = 1.0
|
| 40 |
-
|
| 41 |
-
lr = LogisticRegression(fit_intercept=False, penalty=None, tol=1e-8)
|
| 42 |
-
lr.fit(X, Y)
|
| 43 |
-
|
| 44 |
-
elo_scores = SCALE * lr.coef_[0] + INIT_RATING
|
| 45 |
-
|
| 46 |
-
# set anchor as gpt-3.5-turbo-0125 = 1000
|
| 47 |
-
if "gpt-3.5-turbo-0125" in models.index:
|
| 48 |
-
elo_scores += 1000 - elo_scores[models["gpt-3.5-turbo-0125"]]
|
| 49 |
-
return pd.Series(elo_scores, index=models.index).sort_values(ascending=False)
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
def get_bootstrap_result(battles, func_compute_elo, num_round):
|
| 53 |
-
rows = []
|
| 54 |
-
for i in tqdm(range(num_round), desc="bootstrap"):
|
| 55 |
-
rows.append(func_compute_elo(battles.sample(frac=1.0, replace=True)))
|
| 56 |
-
df = pd.DataFrame(rows)
|
| 57 |
-
return df[df.median().sort_values(ascending=False).index]
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
def preety_print_two_ratings(ratings_1, ratings_2, column_names):
|
| 61 |
-
df = (
|
| 62 |
-
pd.DataFrame(
|
| 63 |
-
[[n, ratings_1[n], ratings_2[n]] for n in ratings_1.keys()],
|
| 64 |
-
columns=["Model", column_names[0], column_names[1]],
|
| 65 |
-
)
|
| 66 |
-
.sort_values(column_names[0], ascending=False)
|
| 67 |
-
.reset_index(drop=True)
|
| 68 |
-
)
|
| 69 |
-
df[column_names[0]] = (df[column_names[0]] + 0.5).astype(int)
|
| 70 |
-
df[column_names[1]] = (df[column_names[1]] + 0.5).astype(int)
|
| 71 |
-
df.index = df.index + 1
|
| 72 |
-
return df
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
def visualize_bootstrap_scores(df, title):
|
| 76 |
-
bars = (
|
| 77 |
-
pd.DataFrame(dict(lower=df.quantile(0.025), rating=df.quantile(0.5), upper=df.quantile(0.975)))
|
| 78 |
-
.reset_index(names="model")
|
| 79 |
-
.sort_values("rating", ascending=False)
|
| 80 |
-
)
|
| 81 |
-
bars["error_y"] = bars["upper"] - bars["rating"]
|
| 82 |
-
bars["error_y_minus"] = bars["rating"] - bars["lower"]
|
| 83 |
-
bars["rating_rounded"] = np.round(bars["rating"], 2)
|
| 84 |
-
fig = px.scatter(
|
| 85 |
-
bars,
|
| 86 |
-
x="model",
|
| 87 |
-
y="rating",
|
| 88 |
-
error_y="error_y",
|
| 89 |
-
error_y_minus="error_y_minus",
|
| 90 |
-
text="rating_rounded",
|
| 91 |
-
title=title,
|
| 92 |
-
)
|
| 93 |
-
fig.update_layout(xaxis_title="Model", yaxis_title="Rating", height=600)
|
| 94 |
-
return fig
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
def predict_win_rate(elo_ratings, SCALE=400, BASE=10, INIT_RATING=1000):
|
| 98 |
-
names = sorted(list(elo_ratings.keys()))
|
| 99 |
-
wins = defaultdict(lambda: defaultdict(lambda: 0))
|
| 100 |
-
for a in names:
|
| 101 |
-
for b in names:
|
| 102 |
-
ea = 1 / (1 + BASE ** ((elo_ratings[b] - elo_ratings[a]) / SCALE))
|
| 103 |
-
wins[a][b] = ea
|
| 104 |
-
wins[b][a] = 1 - ea
|
| 105 |
-
|
| 106 |
-
data = {a: [wins[a][b] if a != b else np.NAN for b in names] for a in names}
|
| 107 |
-
|
| 108 |
-
df = pd.DataFrame(data, index=names)
|
| 109 |
-
df.index.name = "model_a"
|
| 110 |
-
df.columns.name = "model_b"
|
| 111 |
-
return df.T
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
def get_win_rate_column(df, column, baseline="gpt-3.5-turbo-0125"):
|
| 115 |
-
to_dict = df[["model", column]].set_index("model").to_dict()[column]
|
| 116 |
-
win_rate_table = predict_win_rate(to_dict)
|
| 117 |
-
return win_rate_table[baseline].fillna(0.5).apply(lambda x: round(x * 100, 2))
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
def get_battles_from_judgment(judge_name, first_game_only=False, WEIGHT=3):
|
| 121 |
-
arena_hard_battles = pd.DataFrame()
|
| 122 |
-
|
| 123 |
-
print("Turning judgment results into battles...")
|
| 124 |
-
|
| 125 |
-
directory = f"data/arena-hard-v0.1/model_judgement/{judge_name}"
|
| 126 |
-
assert os.path.exists(directory)
|
| 127 |
-
for file in tqdm(glob(f"{directory}/*jsonl")):
|
| 128 |
-
df = pd.read_json(file, lines=True)
|
| 129 |
-
|
| 130 |
-
for _, row in df.iterrows():
|
| 131 |
-
# game 1
|
| 132 |
-
output = {"question_id": row["question_id"], "model_a": "gpt-3.5-turbo-0125", "model_b": row["model"]}
|
| 133 |
-
|
| 134 |
-
game = row["games"][0]
|
| 135 |
-
|
| 136 |
-
weight = 1
|
| 137 |
-
if game["score"] == "A=B":
|
| 138 |
-
output["winner"] = "tie"
|
| 139 |
-
elif game["score"] == "A>B":
|
| 140 |
-
output["winner"] = "model_a"
|
| 141 |
-
elif game["score"] == "A>>B":
|
| 142 |
-
output["winner"] = "model_a"
|
| 143 |
-
weight = WEIGHT
|
| 144 |
-
elif game["score"] == "B>A":
|
| 145 |
-
output["winner"] = "model_b"
|
| 146 |
-
elif game["score"] == "B>>A":
|
| 147 |
-
output["winner"] = "model_b"
|
| 148 |
-
weight = WEIGHT
|
| 149 |
-
else:
|
| 150 |
-
weight = 0
|
| 151 |
-
|
| 152 |
-
if weight:
|
| 153 |
-
arena_hard_battles = pd.concat([arena_hard_battles, pd.DataFrame([output] * weight)])
|
| 154 |
-
|
| 155 |
-
if not first_game_only:
|
| 156 |
-
# game 2
|
| 157 |
-
output = {"question_id": row["question_id"], "model_a": "gpt-3.5-turbo-0125", "model_b": row["model"]}
|
| 158 |
-
|
| 159 |
-
game = row["games"][1]
|
| 160 |
-
|
| 161 |
-
weight = 1
|
| 162 |
-
if game["score"] == "A=B":
|
| 163 |
-
output["winner"] = "tie"
|
| 164 |
-
elif game["score"] == "A>B":
|
| 165 |
-
output["winner"] = "model_b"
|
| 166 |
-
elif game["score"] == "A>>B":
|
| 167 |
-
output["winner"] = "model_b"
|
| 168 |
-
weight = WEIGHT
|
| 169 |
-
elif game["score"] == "B>A":
|
| 170 |
-
output["winner"] = "model_a"
|
| 171 |
-
elif game["score"] == "B>>A":
|
| 172 |
-
output["winner"] = "model_a"
|
| 173 |
-
weight = WEIGHT
|
| 174 |
-
else:
|
| 175 |
-
weight = 0
|
| 176 |
-
|
| 177 |
-
if weight:
|
| 178 |
-
arena_hard_battles = pd.concat([arena_hard_battles, pd.DataFrame([output] * weight)])
|
| 179 |
-
arena_hard_battles.to_json("data/arena_hard_battles.jsonl", lines=True, orient="records")
|
| 180 |
-
return arena_hard_battles
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
if __name__ == "__main__":
|
| 184 |
-
parser = argparse.ArgumentParser()
|
| 185 |
-
parser.add_argument("--bench-name", type=str, default="arena-hard-v0.1")
|
| 186 |
-
parser.add_argument("--judge-name", type=str, default="gpt-4-1106-preview")
|
| 187 |
-
parser.add_argument("--baseline", type=str, default="gpt-3.5-turbo-0125")
|
| 188 |
-
parser.add_argument("--load-battles", action="store_true")
|
| 189 |
-
parser.add_argument("--load-bootstrap", action="store_true")
|
| 190 |
-
parser.add_argument("--show-elo", action="store_true")
|
| 191 |
-
parser.add_argument("--weight", type=int, default=3)
|
| 192 |
-
parser.add_argument("--num-rounds", type=int, default=100)
|
| 193 |
-
parser.add_argument("--output", action="store_true")
|
| 194 |
-
parser.add_argument("--first-game-only", action="store_true")
|
| 195 |
-
args = parser.parse_args()
|
| 196 |
-
print(args)
|
| 197 |
-
assert not args.load_bootstrap or (
|
| 198 |
-
args.load_battles and args.load_bootstrap
|
| 199 |
-
), "If loading prexisting bootstrapping data, you must also load preexisting battles."
|
| 200 |
-
|
| 201 |
-
answer_dir = os.path.join("data", args.bench_name, "model_answers/external")
|
| 202 |
-
model_answers = load_model_answers(answer_dir)
|
| 203 |
-
|
| 204 |
-
if args.load_battles:
|
| 205 |
-
assert os.path.exists("data/arena_hard_battles.jsonl")
|
| 206 |
-
battles = pd.read_json("data/arena_hard_battles.jsonl", lines=True)
|
| 207 |
-
else:
|
| 208 |
-
battles = get_battles_from_judgment(args.judge_name, args.first_game_only, args.weight)
|
| 209 |
-
|
| 210 |
-
bootstrap_online_elo = compute_mle_elo(battles)
|
| 211 |
-
|
| 212 |
-
if args.load_bootstrap:
|
| 213 |
-
bootstrap_elo_lu = pd.read_json("data/bootstrapping_results.jsonl", lines=True)
|
| 214 |
-
else:
|
| 215 |
-
np.random.seed(42)
|
| 216 |
-
bootstrap_elo_lu = get_bootstrap_result(battles, compute_mle_elo, args.num_rounds)
|
| 217 |
-
bootstrap_elo_lu.to_json("data/bootstrapping_results.jsonl", lines=True, orient="records")
|
| 218 |
-
|
| 219 |
-
stats = pd.DataFrame()
|
| 220 |
-
stats["results"] = None
|
| 221 |
-
stats["results"] = stats["results"].astype("object")
|
| 222 |
-
|
| 223 |
-
for i, model in enumerate(bootstrap_online_elo.index):
|
| 224 |
-
assert model in bootstrap_elo_lu.columns
|
| 225 |
-
|
| 226 |
-
stats.at[i, "model"] = model
|
| 227 |
-
stats.at[i, "score"] = bootstrap_online_elo[model]
|
| 228 |
-
stats.at[i, "lower"] = np.percentile(bootstrap_elo_lu[model], 2.5)
|
| 229 |
-
stats.at[i, "upper"] = np.percentile(bootstrap_elo_lu[model], 97.5)
|
| 230 |
-
|
| 231 |
-
length = 0
|
| 232 |
-
if model in model_answers:
|
| 233 |
-
for _, row in model_answers[model].items():
|
| 234 |
-
turn = row["choices"][0]["turns"][0]
|
| 235 |
-
length += turn["token_len"]
|
| 236 |
-
length /= len(model_answers[model])
|
| 237 |
-
|
| 238 |
-
stats.at[i, "avg_tokens"] = int(length)
|
| 239 |
-
stats.at[i, "results"] = bootstrap_elo_lu[model].tolist()
|
| 240 |
-
|
| 241 |
-
if not args.show_elo:
|
| 242 |
-
stats.sort_values(by="model", inplace=True)
|
| 243 |
-
stats["score"] = get_win_rate_column(stats, "score", args.baseline).tolist()
|
| 244 |
-
stats["lower"] = get_win_rate_column(stats, "lower", args.baseline).tolist()
|
| 245 |
-
stats["upper"] = get_win_rate_column(stats, "upper", args.baseline).tolist()
|
| 246 |
-
decimal = 1
|
| 247 |
-
else:
|
| 248 |
-
decimal = 0
|
| 249 |
-
stats = stats.astype({"score": int, "lower": int, "upper": int})
|
| 250 |
-
|
| 251 |
-
stats.sort_values(by="score", ascending=False, inplace=True)
|
| 252 |
-
for _, row in stats.iterrows():
|
| 253 |
-
interval = str((round(row["lower"] - row["score"], decimal), round(row["upper"] - row["score"], decimal)))
|
| 254 |
-
print(
|
| 255 |
-
f"{row['model'] : <30} | score: {round(row['score'], decimal) : ^5} | 95% CI: {interval : ^12} | average #tokens: {int(row['avg_tokens'])}"
|
| 256 |
-
)
|
| 257 |
-
|
| 258 |
-
if args.output:
|
| 259 |
-
cur_date = datetime.datetime.now()
|
| 260 |
-
date_str = cur_date.strftime("%Y%m%d")
|
| 261 |
-
json_file_name = f"arena_hard_leaderboard_{date_str}.json"
|
| 262 |
-
stats.to_json(json_file_name, orient="records", indent=4)
|
| 263 |
-
import huggingface_hub
|
| 264 |
-
|
| 265 |
-
huggingface_hub.HfApi().upload_file(
|
| 266 |
-
path_or_fileobj=json_file_name,
|
| 267 |
-
path_in_repo="leaderboard.json",
|
| 268 |
-
repo_id="Vikhrmodels/arena-leaderboard-metainfo",
|
| 269 |
-
repo_type="dataset",
|
| 270 |
-
token=HF_TOKEN_PRIVATE,
|
| 271 |
-
)
|
| 272 |
-
|
| 273 |
-
huggingface_hub.HfApi().upload_file(
|
| 274 |
-
path_or_fileobj=json_file_name,
|
| 275 |
-
path_in_repo=f"leaderboard_logs/{json_file_name}",
|
| 276 |
-
repo_id="Vikhrmodels/arena-leaderboard-metainfo",
|
| 277 |
-
repo_type="dataset",
|
| 278 |
-
token=HF_TOKEN_PRIVATE,
|
| 279 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/gen/utils.py
DELETED
|
@@ -1,375 +0,0 @@
|
|
| 1 |
-
import json
|
| 2 |
-
import os
|
| 3 |
-
import random
|
| 4 |
-
import time
|
| 5 |
-
from glob import glob
|
| 6 |
-
|
| 7 |
-
import yaml
|
| 8 |
-
|
| 9 |
-
# API setting constants
|
| 10 |
-
API_MAX_RETRY = 16
|
| 11 |
-
API_RETRY_SLEEP = 10
|
| 12 |
-
API_ERROR_OUTPUT = "$ERROR$"
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
OPENAI_MODEL_LIST = (
|
| 16 |
-
"gpt-3.5-turbo",
|
| 17 |
-
"gpt-3.5-turbo-0301",
|
| 18 |
-
"gpt-3.5-turbo-0613",
|
| 19 |
-
"gpt-3.5-turbo-0613-verbose",
|
| 20 |
-
"gpt-3.5-turbo-1106",
|
| 21 |
-
"gpt-3.5-turbo-0125",
|
| 22 |
-
"gpt-4",
|
| 23 |
-
"gpt-4-0314",
|
| 24 |
-
"gpt-4-0613",
|
| 25 |
-
"gpt-4-turbo",
|
| 26 |
-
"gpt-4-1106-preview",
|
| 27 |
-
"gpt-4-0125-preview",
|
| 28 |
-
)
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
temperature_config = {
|
| 32 |
-
"writing": 0.7,
|
| 33 |
-
"roleplay": 0.7,
|
| 34 |
-
"extraction": 0.0,
|
| 35 |
-
"math": 0.0,
|
| 36 |
-
"coding": 0.0,
|
| 37 |
-
"reasoning": 0.0,
|
| 38 |
-
"stem": 0.1,
|
| 39 |
-
"humanities": 0.1,
|
| 40 |
-
}
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
def load_questions(question_file: str):
|
| 44 |
-
"""Load questions from a file."""
|
| 45 |
-
questions = []
|
| 46 |
-
with open(question_file, "r") as ques_file:
|
| 47 |
-
for line in ques_file:
|
| 48 |
-
if line:
|
| 49 |
-
questions.append(json.loads(line))
|
| 50 |
-
return questions
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
def load_model_answers(answer_dir: str):
|
| 54 |
-
"""Load model answers.
|
| 55 |
-
|
| 56 |
-
The return value is a python dict of type:
|
| 57 |
-
Dict[model_name: str -> Dict[question_id: int -> answer: dict]]
|
| 58 |
-
"""
|
| 59 |
-
filenames = glob(os.path.join(answer_dir, "*.jsonl"))
|
| 60 |
-
filenames.sort()
|
| 61 |
-
model_answers = {}
|
| 62 |
-
|
| 63 |
-
for filename in filenames:
|
| 64 |
-
model_name = os.path.basename(filename)[:-6]
|
| 65 |
-
answer = {}
|
| 66 |
-
with open(filename) as fin:
|
| 67 |
-
for line in fin:
|
| 68 |
-
line = json.loads(line)
|
| 69 |
-
answer[line["question_id"]] = line
|
| 70 |
-
model_answers[model_name] = answer
|
| 71 |
-
|
| 72 |
-
return model_answers
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
def get_endpoint(endpoint_list):
|
| 76 |
-
if endpoint_list is None:
|
| 77 |
-
return None
|
| 78 |
-
assert endpoint_list is not None
|
| 79 |
-
# randomly pick one
|
| 80 |
-
api_dict = random.choices(endpoint_list)[0]
|
| 81 |
-
return api_dict
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
# load config args from config yaml files
|
| 85 |
-
def make_config(config_file: str) -> dict:
|
| 86 |
-
config_kwargs = {}
|
| 87 |
-
with open(config_file, "r") as f:
|
| 88 |
-
config_kwargs = yaml.load(f, Loader=yaml.SafeLoader)
|
| 89 |
-
|
| 90 |
-
return config_kwargs
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
def chat_completion_gigachat(model, messages, temperature, max_tokens, api_dict=None):
|
| 94 |
-
from gigachat import GigaChat
|
| 95 |
-
from gigachat.models import Chat, Messages
|
| 96 |
-
|
| 97 |
-
assert api_dict is not None, "no api settings provided!"
|
| 98 |
-
auth_token = api_dict.get("auth_token", os.environ.get(api_dict["auth_token"], ""))
|
| 99 |
-
client = GigaChat(credentials=auth_token, model=model, verify_ssl_certs=False)
|
| 100 |
-
temperature = max(temperature, 0.001)
|
| 101 |
-
|
| 102 |
-
messages = [Messages.parse_obj(m) for m in messages]
|
| 103 |
-
chat = Chat(messages=messages, max_tokens=max_tokens, temperature=temperature)
|
| 104 |
-
|
| 105 |
-
output = API_ERROR_OUTPUT
|
| 106 |
-
for _ in range(API_MAX_RETRY):
|
| 107 |
-
try:
|
| 108 |
-
output = client.chat(chat)
|
| 109 |
-
output = output.choices[0].message.content
|
| 110 |
-
break
|
| 111 |
-
# Don't know other errors
|
| 112 |
-
except Exception as e:
|
| 113 |
-
print(type(e), e)
|
| 114 |
-
time.sleep(API_RETRY_SLEEP)
|
| 115 |
-
|
| 116 |
-
return output
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
def chat_completion_yandex(model, messages, temperature, max_tokens, api_dict=None):
|
| 120 |
-
from yandex_gpt import YandexGPT, YandexGPTConfigManagerForIAMToken
|
| 121 |
-
|
| 122 |
-
assert api_dict is not None, "no api settings provided!"
|
| 123 |
-
iam_token = api_dict.get("iam_token", os.environ.get(api_dict["iam_token_ENV"], ""))
|
| 124 |
-
config = YandexGPTConfigManagerForIAMToken(model_type=model, catalog_id=api_dict["catalog_id"], iam_token=iam_token)
|
| 125 |
-
client = YandexGPT(config_manager=config)
|
| 126 |
-
|
| 127 |
-
messages = [{"role": m["role"], "text": m["content"]} for m in messages]
|
| 128 |
-
|
| 129 |
-
output = API_ERROR_OUTPUT
|
| 130 |
-
for _ in range(API_MAX_RETRY):
|
| 131 |
-
try:
|
| 132 |
-
output = client.get_sync_completion(
|
| 133 |
-
messages=messages,
|
| 134 |
-
temperature=temperature,
|
| 135 |
-
max_tokens=max_tokens,
|
| 136 |
-
)
|
| 137 |
-
break
|
| 138 |
-
# Don't know other errors
|
| 139 |
-
except Exception as e:
|
| 140 |
-
print(type(e), e)
|
| 141 |
-
time.sleep(API_RETRY_SLEEP)
|
| 142 |
-
|
| 143 |
-
return output
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
def chat_completion_openai(model, messages, temperature, max_tokens, api_dict=None):
|
| 147 |
-
import openai
|
| 148 |
-
|
| 149 |
-
api_key = api_dict.get("api_key", os.environ.get(api_dict["api_key_ENV"], ""))
|
| 150 |
-
if api_dict:
|
| 151 |
-
client = openai.OpenAI(
|
| 152 |
-
base_url=api_dict["api_base"],
|
| 153 |
-
api_key=api_key,
|
| 154 |
-
)
|
| 155 |
-
else:
|
| 156 |
-
client = openai.OpenAI()
|
| 157 |
-
|
| 158 |
-
output = API_ERROR_OUTPUT
|
| 159 |
-
for _ in range(API_MAX_RETRY):
|
| 160 |
-
try:
|
| 161 |
-
# print(messages)
|
| 162 |
-
completion = client.chat.completions.create(
|
| 163 |
-
model=model,
|
| 164 |
-
messages=messages,
|
| 165 |
-
temperature=temperature,
|
| 166 |
-
max_tokens=max_tokens,
|
| 167 |
-
stop=["</s>", "<eos>", "<|eot_id|>"],
|
| 168 |
-
)
|
| 169 |
-
output = completion.choices[0].message.content
|
| 170 |
-
break
|
| 171 |
-
except openai.RateLimitError as e:
|
| 172 |
-
print(type(e), e)
|
| 173 |
-
time.sleep(API_RETRY_SLEEP)
|
| 174 |
-
except openai.BadRequestError as e:
|
| 175 |
-
print(messages)
|
| 176 |
-
print(type(e), e)
|
| 177 |
-
except KeyError as e:
|
| 178 |
-
print(type(e), e)
|
| 179 |
-
break
|
| 180 |
-
|
| 181 |
-
return output
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
def chat_completion_openai_azure(model, messages, temperature, max_tokens, api_dict=None):
|
| 185 |
-
import openai
|
| 186 |
-
from openai import AzureOpenAI
|
| 187 |
-
|
| 188 |
-
api_base = api_dict["api_base"]
|
| 189 |
-
api_key = api_dict.get("api_key", os.environ.get(api_dict["api_key_ENV"], ""))
|
| 190 |
-
client = AzureOpenAI(
|
| 191 |
-
azure_endpoint=api_base, api_key=api_key, api_version=api_dict["api_version"], timeout=240, max_retries=2
|
| 192 |
-
)
|
| 193 |
-
|
| 194 |
-
output = API_ERROR_OUTPUT
|
| 195 |
-
for _ in range(API_MAX_RETRY):
|
| 196 |
-
try:
|
| 197 |
-
response = client.chat.completions.create(
|
| 198 |
-
model=model,
|
| 199 |
-
messages=messages,
|
| 200 |
-
n=1,
|
| 201 |
-
temperature=temperature,
|
| 202 |
-
max_tokens=max_tokens,
|
| 203 |
-
seed=42,
|
| 204 |
-
)
|
| 205 |
-
output = response.choices[0].message.content
|
| 206 |
-
break
|
| 207 |
-
except openai.RateLimitError as e:
|
| 208 |
-
print(type(e), e)
|
| 209 |
-
time.sleep(API_RETRY_SLEEP)
|
| 210 |
-
except openai.BadRequestError as e:
|
| 211 |
-
print(type(e), e)
|
| 212 |
-
break
|
| 213 |
-
except KeyError as e:
|
| 214 |
-
print(type(e), e)
|
| 215 |
-
break
|
| 216 |
-
|
| 217 |
-
return output
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
def chat_completion_anthropic(model, messages, temperature, max_tokens, api_dict=None):
|
| 221 |
-
import anthropic
|
| 222 |
-
|
| 223 |
-
if api_dict:
|
| 224 |
-
api_key = api_dict.get("api_key", os.environ.get(api_dict["api_key_ENV"], ""))
|
| 225 |
-
else:
|
| 226 |
-
api_key = os.environ["ANTHROPIC_API_KEY"]
|
| 227 |
-
|
| 228 |
-
sys_msg = ""
|
| 229 |
-
if messages[0]["role"] == "system":
|
| 230 |
-
sys_msg = messages[0]["content"]
|
| 231 |
-
messages = messages[1:]
|
| 232 |
-
|
| 233 |
-
output = API_ERROR_OUTPUT
|
| 234 |
-
for _ in range(API_MAX_RETRY):
|
| 235 |
-
try:
|
| 236 |
-
# print(sys_msg)
|
| 237 |
-
c = anthropic.Anthropic(api_key=api_key)
|
| 238 |
-
response = c.messages.create(
|
| 239 |
-
model=model,
|
| 240 |
-
messages=messages,
|
| 241 |
-
stop_sequences=[anthropic.HUMAN_PROMPT],
|
| 242 |
-
max_tokens=max_tokens,
|
| 243 |
-
temperature=temperature,
|
| 244 |
-
system=sys_msg,
|
| 245 |
-
)
|
| 246 |
-
output = response.content[0].text
|
| 247 |
-
break
|
| 248 |
-
except anthropic.APIError as e:
|
| 249 |
-
print(type(e), e)
|
| 250 |
-
time.sleep(API_RETRY_SLEEP)
|
| 251 |
-
return output
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
def chat_completion_mistral(model, messages, temperature, max_tokens):
|
| 255 |
-
from mistralai.client import MistralClient
|
| 256 |
-
from mistralai.exceptions import MistralException
|
| 257 |
-
from mistralai.models.chat_completion import ChatMessage
|
| 258 |
-
|
| 259 |
-
api_key = os.environ["MISTRAL_API_KEY"]
|
| 260 |
-
client = MistralClient(api_key=api_key)
|
| 261 |
-
|
| 262 |
-
prompts = [ChatMessage(role=message["role"], content=message["content"]) for message in messages]
|
| 263 |
-
|
| 264 |
-
output = API_ERROR_OUTPUT
|
| 265 |
-
for _ in range(API_MAX_RETRY):
|
| 266 |
-
try:
|
| 267 |
-
chat_response = client.chat(
|
| 268 |
-
model=model,
|
| 269 |
-
messages=prompts,
|
| 270 |
-
temperature=temperature,
|
| 271 |
-
max_tokens=max_tokens,
|
| 272 |
-
)
|
| 273 |
-
output = chat_response.choices[0].message.content
|
| 274 |
-
break
|
| 275 |
-
except MistralException as e:
|
| 276 |
-
print(type(e), e)
|
| 277 |
-
break
|
| 278 |
-
|
| 279 |
-
return output
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
def chat_completion_gemini(model, messages, temperature, max_tokens):
|
| 283 |
-
import google.generativeai as genai
|
| 284 |
-
|
| 285 |
-
genai.configure(api_key=os.environ["GEMINI_API_KEY"])
|
| 286 |
-
|
| 287 |
-
safety_settings = [
|
| 288 |
-
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
|
| 289 |
-
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
|
| 290 |
-
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
|
| 291 |
-
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
|
| 292 |
-
]
|
| 293 |
-
|
| 294 |
-
# Set up the model
|
| 295 |
-
generation_config = {
|
| 296 |
-
"temperature": temperature,
|
| 297 |
-
"top_p": 1,
|
| 298 |
-
"top_k": 1,
|
| 299 |
-
"max_output_tokens": max_tokens,
|
| 300 |
-
}
|
| 301 |
-
|
| 302 |
-
output = API_ERROR_OUTPUT
|
| 303 |
-
for _ in range(API_MAX_RETRY):
|
| 304 |
-
try:
|
| 305 |
-
gemini = genai.GenerativeModel(
|
| 306 |
-
model_name=model, generation_config=generation_config, safety_settings=safety_settings
|
| 307 |
-
)
|
| 308 |
-
|
| 309 |
-
convo = gemini.start_chat(history=[])
|
| 310 |
-
|
| 311 |
-
convo.send_message(messages)
|
| 312 |
-
output = convo.last.text
|
| 313 |
-
break
|
| 314 |
-
except genai.types.generation_types.StopCandidateException as e:
|
| 315 |
-
print(type(e), e)
|
| 316 |
-
break
|
| 317 |
-
except Exception as e:
|
| 318 |
-
print(type(e), e)
|
| 319 |
-
time.sleep(API_RETRY_SLEEP)
|
| 320 |
-
|
| 321 |
-
return output
|
| 322 |
-
|
| 323 |
-
|
| 324 |
-
def chat_completion_cohere(model, messages, temperature, max_tokens):
|
| 325 |
-
import cohere
|
| 326 |
-
|
| 327 |
-
co = cohere.Client(os.environ["COHERE_API_KEY"])
|
| 328 |
-
assert len(messages) > 0
|
| 329 |
-
|
| 330 |
-
template_map = {"system": "SYSTEM", "assistant": "CHATBOT", "user": "USER"}
|
| 331 |
-
|
| 332 |
-
assert messages[-1]["role"] == "user"
|
| 333 |
-
prompt = messages[-1]["content"]
|
| 334 |
-
|
| 335 |
-
if len(messages) > 1:
|
| 336 |
-
history = []
|
| 337 |
-
for message in messages[:-1]:
|
| 338 |
-
history.append({"role": template_map[message["role"]], "message": message["content"]})
|
| 339 |
-
else:
|
| 340 |
-
history = None
|
| 341 |
-
|
| 342 |
-
output = API_ERROR_OUTPUT
|
| 343 |
-
for _ in range(API_MAX_RETRY):
|
| 344 |
-
try:
|
| 345 |
-
response = co.chat(
|
| 346 |
-
message=prompt,
|
| 347 |
-
model=model,
|
| 348 |
-
temperature=temperature,
|
| 349 |
-
max_tokens=max_tokens,
|
| 350 |
-
chat_history=history,
|
| 351 |
-
)
|
| 352 |
-
output = response.text
|
| 353 |
-
break
|
| 354 |
-
except cohere.core.api_error.ApiError as e:
|
| 355 |
-
print(type(e), e)
|
| 356 |
-
raise
|
| 357 |
-
except Exception as e:
|
| 358 |
-
print(type(e), e)
|
| 359 |
-
break
|
| 360 |
-
|
| 361 |
-
return output
|
| 362 |
-
|
| 363 |
-
|
| 364 |
-
def reorg_answer_file(answer_file):
|
| 365 |
-
"""Sort by question id and de-duplication"""
|
| 366 |
-
answers = {}
|
| 367 |
-
with open(answer_file, "r") as fin:
|
| 368 |
-
for line in fin:
|
| 369 |
-
qid = json.loads(line)["question_id"]
|
| 370 |
-
answers[qid] = line
|
| 371 |
-
|
| 372 |
-
qids = sorted(list(answers.keys()))
|
| 373 |
-
with open(answer_file, "w") as fout:
|
| 374 |
-
for qid in qids:
|
| 375 |
-
fout.write(answers[qid])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/leaderboard/build_leaderboard.py
CHANGED
|
@@ -2,17 +2,20 @@ import json
|
|
| 2 |
import logging
|
| 3 |
import os
|
| 4 |
import time
|
|
|
|
| 5 |
|
| 6 |
import pandas as pd
|
| 7 |
from huggingface_hub import snapshot_download
|
| 8 |
|
| 9 |
-
from src.envs import DATA_PATH, H4_TOKEN,
|
| 10 |
|
| 11 |
# Configure logging
|
| 12 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 13 |
|
| 14 |
|
| 15 |
def time_diff_wrapper(func):
|
|
|
|
|
|
|
| 16 |
def wrapper(*args, **kwargs):
|
| 17 |
start_time = time.time()
|
| 18 |
result = func(*args, **kwargs)
|
|
@@ -23,19 +26,24 @@ def time_diff_wrapper(func):
|
|
| 23 |
|
| 24 |
return wrapper
|
| 25 |
|
| 26 |
-
|
|
|
|
|
|
|
| 27 |
os.chmod(path, mode)
|
| 28 |
for root, dirs, files in os.walk(path):
|
| 29 |
-
for
|
| 30 |
-
os.chmod(os.path.join(root,
|
| 31 |
-
for
|
| 32 |
-
os.chmod(os.path.join(root,
|
|
|
|
| 33 |
|
| 34 |
@time_diff_wrapper
|
| 35 |
-
def download_dataset(
|
|
|
|
|
|
|
| 36 |
"""Download dataset with exponential backoff retries."""
|
| 37 |
os.makedirs(local_dir, exist_ok=True)
|
| 38 |
-
os.makedirs(
|
| 39 |
attempt = 0
|
| 40 |
while attempt < max_attempts:
|
| 41 |
try:
|
|
@@ -43,14 +51,14 @@ def download_dataset(repo_id, local_dir, repo_type="dataset", max_attempts=3, ba
|
|
| 43 |
snapshot_download(
|
| 44 |
repo_id=repo_id,
|
| 45 |
local_dir=local_dir,
|
| 46 |
-
cache_dir=
|
| 47 |
repo_type=repo_type,
|
| 48 |
tqdm_class=None,
|
| 49 |
token=H4_TOKEN,
|
| 50 |
etag_timeout=30,
|
| 51 |
max_workers=8,
|
| 52 |
force_download=True,
|
| 53 |
-
local_dir_use_symlinks=False
|
| 54 |
)
|
| 55 |
logging.info("Download successful")
|
| 56 |
return
|
|
@@ -62,18 +70,16 @@ def download_dataset(repo_id, local_dir, repo_type="dataset", max_attempts=3, ba
|
|
| 62 |
logging.error("Failed to download %s after %s attempts", repo_id, max_attempts)
|
| 63 |
|
| 64 |
|
| 65 |
-
def download_openbench():
|
| 66 |
-
"""
|
| 67 |
-
|
| 68 |
-
"""
|
| 69 |
-
# Скачиваем метаданные лидерборда
|
| 70 |
try:
|
| 71 |
download_dataset(METAINFO_REPO, DATA_PATH)
|
| 72 |
logging.info("Successfully downloaded leaderboard metainfo data")
|
| 73 |
except Exception as e:
|
| 74 |
logging.error(f"Failed to download leaderboard metainfo: {e}")
|
| 75 |
-
|
| 76 |
-
#
|
| 77 |
try:
|
| 78 |
download_dataset(RESULTS_REPO, "m_data")
|
| 79 |
logging.info("Successfully downloaded model evaluation results")
|
|
@@ -81,32 +87,28 @@ def download_openbench():
|
|
| 81 |
logging.error(f"Failed to download model evaluation results: {e}")
|
| 82 |
|
| 83 |
|
| 84 |
-
def
|
| 85 |
"""
|
| 86 |
-
|
| 87 |
-
|
| 88 |
"""
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
# 1. Пытаемся загрузить данные из метаинформации лидерборда
|
| 93 |
try:
|
| 94 |
leaderboard_path = os.path.join(DATA_PATH, "leaderboard.json")
|
| 95 |
if os.path.exists(leaderboard_path):
|
| 96 |
-
with open(leaderboard_path,
|
| 97 |
saved_data = json.load(eval_file)
|
| 98 |
if saved_data:
|
| 99 |
logging.info(f"Loaded {len(saved_data)} models from saved leaderboard data")
|
| 100 |
-
|
| 101 |
-
# Обрабатываем каждую модель, сохраняя только лучший ��езультат
|
| 102 |
for item in saved_data:
|
| 103 |
try:
|
| 104 |
-
# Получаем имя модели, проверяя разные возможные ключи
|
| 105 |
model_name = item.get("model_name", item.get("model", ""))
|
| 106 |
if not model_name:
|
| 107 |
continue
|
| 108 |
-
|
| 109 |
-
# Стандартизируем данные
|
| 110 |
model_data = {
|
| 111 |
"model": model_name,
|
| 112 |
"score": float(item.get("score", 0.0)),
|
|
@@ -114,10 +116,13 @@ def build_leadearboard_df():
|
|
| 114 |
"physics_score": float(item.get("physics_score", 0.0)),
|
| 115 |
"total_tokens": int(item.get("total_tokens", 0)),
|
| 116 |
"evaluation_time": float(item.get("evaluation_time", 0.0)),
|
| 117 |
-
"system_prompt": item.get(
|
|
|
|
|
|
|
|
|
|
| 118 |
}
|
| 119 |
-
|
| 120 |
-
#
|
| 121 |
model_base_name = model_name.split("/")[-1].split("_v")[0]
|
| 122 |
if model_base_name in best_model_results:
|
| 123 |
if model_data["score"] > best_model_results[model_base_name]["score"]:
|
|
@@ -125,43 +130,39 @@ def build_leadearboard_df():
|
|
| 125 |
else:
|
| 126 |
best_model_results[model_base_name] = model_data
|
| 127 |
except KeyError as e:
|
| 128 |
-
# Логируем ошибку, но продолжаем обработку других моделей
|
| 129 |
logging.error(f"Failed to process model data: {e}")
|
| 130 |
except Exception as e:
|
| 131 |
logging.error(f"Failed to load saved leaderboard data: {e}")
|
| 132 |
-
|
| 133 |
-
# 2.
|
| 134 |
try:
|
| 135 |
external_dir = "./m_data/model_data/external/"
|
| 136 |
if os.path.exists(external_dir):
|
| 137 |
for file in os.listdir(external_dir):
|
| 138 |
if file.endswith(".json"):
|
| 139 |
try:
|
| 140 |
-
with open(os.path.join(external_dir, file),
|
| 141 |
data = json.load(f)
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
converted_data = convert_old_format_to_deatmath(data)
|
| 145 |
-
|
| 146 |
-
# Проверяем наличие необходимых полей после конвертации
|
| 147 |
-
model_name = converted_data.get("model_name", converted_data.get("model", ""))
|
| 148 |
if not model_name:
|
| 149 |
-
logging.error(f"Failed to parse {file}: 'model_name' not found
|
| 150 |
continue
|
| 151 |
-
|
| 152 |
-
# Стандартизируем данные
|
| 153 |
model_data = {
|
| 154 |
"model": model_name,
|
| 155 |
-
"score": float(
|
| 156 |
-
"math_score": float(
|
| 157 |
-
"physics_score": float(
|
| 158 |
-
"total_tokens": int(
|
| 159 |
-
"evaluation_time": float(
|
| 160 |
-
"system_prompt":
|
| 161 |
-
"
|
|
|
|
|
|
|
| 162 |
}
|
| 163 |
-
|
| 164 |
-
#
|
| 165 |
model_base_name = model_name.split("/")[-1].split("_v")[0]
|
| 166 |
if model_base_name in best_model_results:
|
| 167 |
if model_data["score"] > best_model_results[model_base_name]["score"]:
|
|
@@ -173,13 +174,12 @@ def build_leadearboard_df():
|
|
| 173 |
continue
|
| 174 |
except Exception as e:
|
| 175 |
logging.error(f"Failed to process external model data: {e}")
|
| 176 |
-
|
| 177 |
-
# 3.
|
| 178 |
results = list(best_model_results.values())
|
| 179 |
-
|
| 180 |
-
# 4.
|
| 181 |
if not results:
|
| 182 |
-
# Добавляем несколько моделей-заглушек для отображения интерфейса
|
| 183 |
results = [
|
| 184 |
{
|
| 185 |
"model": "example/model-1",
|
|
@@ -188,7 +188,7 @@ def build_leadearboard_df():
|
|
| 188 |
"physics_score": 0.6,
|
| 189 |
"total_tokens": 1000000,
|
| 190 |
"evaluation_time": 3600.0,
|
| 191 |
-
"system_prompt": "Вы - полезный помощник по математике и физике. Ответьте на русском языке."
|
| 192 |
},
|
| 193 |
{
|
| 194 |
"model": "example/model-2",
|
|
@@ -197,96 +197,18 @@ def build_leadearboard_df():
|
|
| 197 |
"physics_score": 0.5,
|
| 198 |
"total_tokens": 800000,
|
| 199 |
"evaluation_time": 3000.0,
|
| 200 |
-
"system_prompt": "Вы - полезный помощник по математике и физике. Ответьте на русском языке."
|
| 201 |
-
}
|
| 202 |
]
|
| 203 |
logging.warning("No model data found, using example models")
|
| 204 |
-
|
| 205 |
-
#
|
| 206 |
df = pd.DataFrame(results)
|
| 207 |
-
df.sort_values(by=
|
| 208 |
-
|
| 209 |
-
#
|
| 210 |
-
numeric_cols = df.select_dtypes(include=[
|
| 211 |
if not numeric_cols.empty:
|
| 212 |
df[numeric_cols] = df[numeric_cols].round(3)
|
| 213 |
-
|
| 214 |
-
return df
|
| 215 |
-
|
| 216 |
|
| 217 |
-
|
| 218 |
-
"""
|
| 219 |
-
Конвертирует данные из старого формата Small Shlepa в формат DeathMath
|
| 220 |
-
|
| 221 |
-
Args:
|
| 222 |
-
data (dict): Данные модели в старом формате
|
| 223 |
-
|
| 224 |
-
Returns:
|
| 225 |
-
dict: Конвертированные данные в формате DeathMath
|
| 226 |
-
"""
|
| 227 |
-
# Проверяем, возможно это файл уже в формате DeathMath
|
| 228 |
-
if "score" in data:
|
| 229 |
-
return data
|
| 230 |
-
|
| 231 |
-
# Проверяем формат Small Shlepa с полями: musicmc, moviesmc, booksmc, lawmc, mmluproru
|
| 232 |
-
small_shlepa_fields = ["musicmc", "moviesmc", "booksmc", "lawmc", "mmluproru", "model"]
|
| 233 |
-
is_shlepa_format = any(field in data for field in small_shlepa_fields)
|
| 234 |
-
|
| 235 |
-
if is_shlepa_format:
|
| 236 |
-
logging.info(f"Конвертация модели из формата Small Shlepa в формат DeathMath: {data.get('model', 'Unknown')}")
|
| 237 |
-
|
| 238 |
-
# Конвертируем данные с примерным соответствием:
|
| 239 |
-
# math_score = среднее(musicmc, booksmc, mmluproru)
|
| 240 |
-
# physics_score = lawmc или moviesmc
|
| 241 |
-
math_score = 0.0
|
| 242 |
-
math_components = 0
|
| 243 |
-
|
| 244 |
-
if "musicmc" in data and data["musicmc"] is not None:
|
| 245 |
-
math_score += float(data["musicmc"])
|
| 246 |
-
math_components += 1
|
| 247 |
-
|
| 248 |
-
if "booksmc" in data and data["booksmc"] is not None:
|
| 249 |
-
math_score += float(data["booksmc"])
|
| 250 |
-
math_components += 1
|
| 251 |
-
|
| 252 |
-
if "mmluproru" in data and data["mmluproru"] is not None:
|
| 253 |
-
math_score += float(data["mmluproru"])
|
| 254 |
-
math_components += 1
|
| 255 |
-
|
| 256 |
-
if math_components > 0:
|
| 257 |
-
math_score /= math_components
|
| 258 |
-
|
| 259 |
-
# Для physics_score используем значение lawmc или moviesmc (что доступно)
|
| 260 |
-
physics_score = 0.0
|
| 261 |
-
if "lawmc" in data and data["lawmc"] is not None:
|
| 262 |
-
physics_score = float(data["lawmc"])
|
| 263 |
-
elif "moviesmc" in data and data["moviesmc"] is not None:
|
| 264 |
-
physics_score = float(data["moviesmc"])
|
| 265 |
-
|
| 266 |
-
# Общий скор - среднее арифметическое
|
| 267 |
-
avg_score = (math_score + physics_score) / 2 if math_score or physics_score else 0.0
|
| 268 |
-
|
| 269 |
-
converted_data = {
|
| 270 |
-
"model_name": data.get("model", "Unknown"),
|
| 271 |
-
"score": avg_score,
|
| 272 |
-
"math_score": math_score,
|
| 273 |
-
"physics_score": physics_score,
|
| 274 |
-
"total_tokens": int(data.get("total_tokens", 0)),
|
| 275 |
-
"evaluation_time": float(data.get("evaluation_time", 0.0)),
|
| 276 |
-
"system_prompt": data.get("system_prompt",
|
| 277 |
-
"Вы - полезный помощник по математике и физике. Ответьте на русском языке.")
|
| 278 |
-
}
|
| 279 |
-
|
| 280 |
-
return converted_data
|
| 281 |
-
|
| 282 |
-
# Если формат неизвестен, возвращаем стандартный шаблон
|
| 283 |
-
logging.warning(f"Неизвестный формат данных модели, использую шаблон")
|
| 284 |
-
return {
|
| 285 |
-
"model_name": data.get("model_name", data.get("model", "Unknown")),
|
| 286 |
-
"score": 0.0,
|
| 287 |
-
"math_score": 0.0,
|
| 288 |
-
"physics_score": 0.0,
|
| 289 |
-
"total_tokens": 0,
|
| 290 |
-
"evaluation_time": 0.0,
|
| 291 |
-
"system_prompt": "Вы - полезный помощник по математике и физике. Ответьте на русском языке."
|
| 292 |
-
}
|
|
|
|
| 2 |
import logging
|
| 3 |
import os
|
| 4 |
import time
|
| 5 |
+
from typing import Any
|
| 6 |
|
| 7 |
import pandas as pd
|
| 8 |
from huggingface_hub import snapshot_download
|
| 9 |
|
| 10 |
+
from src.envs import DATA_PATH, H4_TOKEN, METAINFO_REPO, RESULTS_REPO
|
| 11 |
|
| 12 |
# Configure logging
|
| 13 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 14 |
|
| 15 |
|
| 16 |
def time_diff_wrapper(func):
|
| 17 |
+
"""Decorator to measure function execution time."""
|
| 18 |
+
|
| 19 |
def wrapper(*args, **kwargs):
|
| 20 |
start_time = time.time()
|
| 21 |
result = func(*args, **kwargs)
|
|
|
|
| 26 |
|
| 27 |
return wrapper
|
| 28 |
|
| 29 |
+
|
| 30 |
+
def chmod_recursive(path: str, mode: int) -> None:
|
| 31 |
+
"""Recursively change file permissions."""
|
| 32 |
os.chmod(path, mode)
|
| 33 |
for root, dirs, files in os.walk(path):
|
| 34 |
+
for dir_name in dirs:
|
| 35 |
+
os.chmod(os.path.join(root, dir_name), mode)
|
| 36 |
+
for file_name in files:
|
| 37 |
+
os.chmod(os.path.join(root, file_name), mode)
|
| 38 |
+
|
| 39 |
|
| 40 |
@time_diff_wrapper
|
| 41 |
+
def download_dataset(
|
| 42 |
+
repo_id: str, local_dir: str, repo_type: str = "dataset", max_attempts: int = 3, backoff_factor: float = 1.5
|
| 43 |
+
) -> None:
|
| 44 |
"""Download dataset with exponential backoff retries."""
|
| 45 |
os.makedirs(local_dir, exist_ok=True)
|
| 46 |
+
os.makedirs("./tmp", exist_ok=True)
|
| 47 |
attempt = 0
|
| 48 |
while attempt < max_attempts:
|
| 49 |
try:
|
|
|
|
| 51 |
snapshot_download(
|
| 52 |
repo_id=repo_id,
|
| 53 |
local_dir=local_dir,
|
| 54 |
+
cache_dir="./tmp",
|
| 55 |
repo_type=repo_type,
|
| 56 |
tqdm_class=None,
|
| 57 |
token=H4_TOKEN,
|
| 58 |
etag_timeout=30,
|
| 59 |
max_workers=8,
|
| 60 |
force_download=True,
|
| 61 |
+
local_dir_use_symlinks=False,
|
| 62 |
)
|
| 63 |
logging.info("Download successful")
|
| 64 |
return
|
|
|
|
| 70 |
logging.error("Failed to download %s after %s attempts", repo_id, max_attempts)
|
| 71 |
|
| 72 |
|
| 73 |
+
def download_openbench() -> None:
|
| 74 |
+
"""Download necessary data for the leaderboard from HuggingFace repositories."""
|
| 75 |
+
# Download leaderboard metadata
|
|
|
|
|
|
|
| 76 |
try:
|
| 77 |
download_dataset(METAINFO_REPO, DATA_PATH)
|
| 78 |
logging.info("Successfully downloaded leaderboard metainfo data")
|
| 79 |
except Exception as e:
|
| 80 |
logging.error(f"Failed to download leaderboard metainfo: {e}")
|
| 81 |
+
|
| 82 |
+
# Download model evaluation results
|
| 83 |
try:
|
| 84 |
download_dataset(RESULTS_REPO, "m_data")
|
| 85 |
logging.info("Successfully downloaded model evaluation results")
|
|
|
|
| 87 |
logging.error(f"Failed to download model evaluation results: {e}")
|
| 88 |
|
| 89 |
|
| 90 |
+
def build_leaderboard_df() -> pd.DataFrame:
|
| 91 |
"""
|
| 92 |
+
Build leaderboard dataframe from all available sources.
|
| 93 |
+
Ensures only one entry per model (with highest score).
|
| 94 |
"""
|
| 95 |
+
best_model_results: dict[str, dict[str, Any]] = {}
|
| 96 |
+
|
| 97 |
+
# 1. Load data from leaderboard metainfo
|
|
|
|
| 98 |
try:
|
| 99 |
leaderboard_path = os.path.join(DATA_PATH, "leaderboard.json")
|
| 100 |
if os.path.exists(leaderboard_path):
|
| 101 |
+
with open(leaderboard_path, encoding="utf-8") as eval_file:
|
| 102 |
saved_data = json.load(eval_file)
|
| 103 |
if saved_data:
|
| 104 |
logging.info(f"Loaded {len(saved_data)} models from saved leaderboard data")
|
| 105 |
+
|
|
|
|
| 106 |
for item in saved_data:
|
| 107 |
try:
|
|
|
|
| 108 |
model_name = item.get("model_name", item.get("model", ""))
|
| 109 |
if not model_name:
|
| 110 |
continue
|
| 111 |
+
|
|
|
|
| 112 |
model_data = {
|
| 113 |
"model": model_name,
|
| 114 |
"score": float(item.get("score", 0.0)),
|
|
|
|
| 116 |
"physics_score": float(item.get("physics_score", 0.0)),
|
| 117 |
"total_tokens": int(item.get("total_tokens", 0)),
|
| 118 |
"evaluation_time": float(item.get("evaluation_time", 0.0)),
|
| 119 |
+
"system_prompt": item.get(
|
| 120 |
+
"system_prompt",
|
| 121 |
+
"Вы - полезный помощник по математике и физике. Ответьте на русском языке.",
|
| 122 |
+
),
|
| 123 |
}
|
| 124 |
+
|
| 125 |
+
# Keep only best result for each model
|
| 126 |
model_base_name = model_name.split("/")[-1].split("_v")[0]
|
| 127 |
if model_base_name in best_model_results:
|
| 128 |
if model_data["score"] > best_model_results[model_base_name]["score"]:
|
|
|
|
| 130 |
else:
|
| 131 |
best_model_results[model_base_name] = model_data
|
| 132 |
except KeyError as e:
|
|
|
|
| 133 |
logging.error(f"Failed to process model data: {e}")
|
| 134 |
except Exception as e:
|
| 135 |
logging.error(f"Failed to load saved leaderboard data: {e}")
|
| 136 |
+
|
| 137 |
+
# 2. Load models from external directory
|
| 138 |
try:
|
| 139 |
external_dir = "./m_data/model_data/external/"
|
| 140 |
if os.path.exists(external_dir):
|
| 141 |
for file in os.listdir(external_dir):
|
| 142 |
if file.endswith(".json"):
|
| 143 |
try:
|
| 144 |
+
with open(os.path.join(external_dir, file), encoding="utf-8") as f:
|
| 145 |
data = json.load(f)
|
| 146 |
+
|
| 147 |
+
model_name = data.get("model_name", data.get("model", ""))
|
|
|
|
|
|
|
|
|
|
|
|
|
| 148 |
if not model_name:
|
| 149 |
+
logging.error(f"Failed to parse {file}: 'model_name' not found")
|
| 150 |
continue
|
| 151 |
+
|
|
|
|
| 152 |
model_data = {
|
| 153 |
"model": model_name,
|
| 154 |
+
"score": float(data.get("score", 0.0)),
|
| 155 |
+
"math_score": float(data.get("math_score", 0.0)),
|
| 156 |
+
"physics_score": float(data.get("physics_score", 0.0)),
|
| 157 |
+
"total_tokens": int(data.get("total_tokens", 0)),
|
| 158 |
+
"evaluation_time": float(data.get("evaluation_time", 0.0)),
|
| 159 |
+
"system_prompt": data.get(
|
| 160 |
+
"system_prompt",
|
| 161 |
+
"Вы - полезный помощник по математике и физике. Ответьте на русском языке.",
|
| 162 |
+
),
|
| 163 |
}
|
| 164 |
+
|
| 165 |
+
# Keep only best result for each model
|
| 166 |
model_base_name = model_name.split("/")[-1].split("_v")[0]
|
| 167 |
if model_base_name in best_model_results:
|
| 168 |
if model_data["score"] > best_model_results[model_base_name]["score"]:
|
|
|
|
| 174 |
continue
|
| 175 |
except Exception as e:
|
| 176 |
logging.error(f"Failed to process external model data: {e}")
|
| 177 |
+
|
| 178 |
+
# 3. Collect all best results
|
| 179 |
results = list(best_model_results.values())
|
| 180 |
+
|
| 181 |
+
# 4. Add default example models if list is empty
|
| 182 |
if not results:
|
|
|
|
| 183 |
results = [
|
| 184 |
{
|
| 185 |
"model": "example/model-1",
|
|
|
|
| 188 |
"physics_score": 0.6,
|
| 189 |
"total_tokens": 1000000,
|
| 190 |
"evaluation_time": 3600.0,
|
| 191 |
+
"system_prompt": "Вы - полезный помощник по математике и физике. Ответьте на русском языке.",
|
| 192 |
},
|
| 193 |
{
|
| 194 |
"model": "example/model-2",
|
|
|
|
| 197 |
"physics_score": 0.5,
|
| 198 |
"total_tokens": 800000,
|
| 199 |
"evaluation_time": 3000.0,
|
| 200 |
+
"system_prompt": "Вы - полезный помощник по математике и физике. Ответьте на русском языке.",
|
| 201 |
+
},
|
| 202 |
]
|
| 203 |
logging.warning("No model data found, using example models")
|
| 204 |
+
|
| 205 |
+
# Create DataFrame and sort by score
|
| 206 |
df = pd.DataFrame(results)
|
| 207 |
+
df.sort_values(by="score", ascending=False, inplace=True)
|
| 208 |
+
|
| 209 |
+
# Round numeric columns
|
| 210 |
+
numeric_cols = df.select_dtypes(include=["number"]).columns
|
| 211 |
if not numeric_cols.empty:
|
| 212 |
df[numeric_cols] = df[numeric_cols].round(3)
|
|
|
|
|
|
|
|
|
|
| 213 |
|
| 214 |
+
return df
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/leaderboard/filter_models.py
DELETED
|
@@ -1,173 +0,0 @@
|
|
| 1 |
-
from src.display.formatting import model_hyperlink
|
| 2 |
-
from src.display.utils import AutoEvalColumn
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
# Models which have been flagged by users as being problematic for a reason or another
|
| 6 |
-
# (Model name to forum discussion link)
|
| 7 |
-
FLAGGED_MODELS = {
|
| 8 |
-
"merged": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 9 |
-
"Voicelab/trurl-2-13b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/202",
|
| 10 |
-
"deepnight-research/llama-2-70B-inst": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/207",
|
| 11 |
-
"Aspik101/trurl-2-13b-pl-instruct_unload": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/213",
|
| 12 |
-
"Fredithefish/ReasonixPajama-3B-HF": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/236",
|
| 13 |
-
"TigerResearch/tigerbot-7b-sft-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/237",
|
| 14 |
-
"gaodrew/gaodrew-gorgonzola-13b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/215",
|
| 15 |
-
"AIDC-ai-business/Marcoroni-70B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/287",
|
| 16 |
-
"AIDC-ai-business/Marcoroni-13B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/287",
|
| 17 |
-
"AIDC-ai-business/Marcoroni-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/287",
|
| 18 |
-
"fblgit/una-xaberius-34b-v1beta": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/444",
|
| 19 |
-
"jan-hq/trinity-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474",
|
| 20 |
-
"rwitz2/go-bruins-v2.1.1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474",
|
| 21 |
-
"rwitz2/go-bruins-v2.1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474",
|
| 22 |
-
"GreenNode/GreenNodeLM-v3olet-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474",
|
| 23 |
-
"GreenNode/GreenNodeLM-7B-v4leo": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474",
|
| 24 |
-
"GreenNode/LeoScorpius-GreenNode-7B-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474",
|
| 25 |
-
"viethq188/LeoScorpius-7B-Chat-DPO": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474",
|
| 26 |
-
"GreenNode/GreenNodeLM-7B-v2leo": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474",
|
| 27 |
-
"janai-hq/trinity-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474",
|
| 28 |
-
"ignos/LeoScorpius-GreenNode-Alpaca-7B-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474",
|
| 29 |
-
"fblgit/una-cybertron-7b-v3-OMA": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474",
|
| 30 |
-
"mncai/mistral-7b-dpo-merge-v1.1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474",
|
| 31 |
-
"mncai/mistral-7b-dpo-v6": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474",
|
| 32 |
-
"Toten5/LeoScorpius-GreenNode-7B-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474",
|
| 33 |
-
"GreenNode/GreenNodeLM-7B-v1olet": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474",
|
| 34 |
-
"quantumaikr/quantum-dpo-v0.1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474",
|
| 35 |
-
"quantumaikr/quantum-v0.01": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474",
|
| 36 |
-
"quantumaikr/quantum-trinity-v0.1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474",
|
| 37 |
-
"mncai/mistral-7b-dpo-v5": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474",
|
| 38 |
-
"cookinai/BruinHermes": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474",
|
| 39 |
-
"jan-ai/Pandora-10.7B-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474",
|
| 40 |
-
"v1olet/v1olet_marcoroni-go-bruins-merge-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474",
|
| 41 |
-
"v1olet/v1olet_merged_dpo_7B_v3": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474",
|
| 42 |
-
"rwitz2/pee": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474",
|
| 43 |
-
"zyh3826 / GML-Mistral-merged-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/503",
|
| 44 |
-
"dillfrescott/trinity-medium": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474",
|
| 45 |
-
"udkai/Garrulus": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/526",
|
| 46 |
-
"dfurman/GarrulusMarcoro-7B-v0.1": "https://huggingface.co/dfurman/GarrulusMarcoro-7B-v0.1/discussions/1",
|
| 47 |
-
"eren23/slerp-test-turdus-beagle": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/548",
|
| 48 |
-
"abideen/NexoNimbus-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/548",
|
| 49 |
-
"alnrg2arg/test2_3": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/548",
|
| 50 |
-
"nfaheem/Marcoroni-7b-DPO-Merge": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/548",
|
| 51 |
-
"CultriX/MergeTrix-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/548",
|
| 52 |
-
"liminerity/Blur-7b-v1.21": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/548",
|
| 53 |
-
# Merges not indicated
|
| 54 |
-
"gagan3012/MetaModelv2": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 55 |
-
"gagan3012/MetaModelv3": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 56 |
-
"kyujinpy/Sakura-SOLRCA-Math-Instruct-DPO-v2": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 57 |
-
"kyujinpy/Sakura-SOLAR-Instruct-DPO-v2": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 58 |
-
"kyujinpy/Sakura-SOLRCA-Math-Instruct-DPO-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 59 |
-
"kyujinpy/Sakura-SOLRCA-Instruct-DPO": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 60 |
-
"fblgit/LUNA-SOLARkrautLM-Instruct": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 61 |
-
"perlthoughts/Marcoroni-8x7B-v3-MoE": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 62 |
-
"rwitz/go-bruins-v2": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 63 |
-
"rwitz/go-bruins": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 64 |
-
"Walmart-the-bag/Solar-10.7B-Cato": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 65 |
-
"aqweteddy/mistral_tv-neural-marconroni": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 66 |
-
"NExtNewChattingAI/shark_tank_ai_7_b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 67 |
-
"Q-bert/MetaMath-Cybertron": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 68 |
-
"OpenPipe/mistral-ft-optimized-1227": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 69 |
-
"perlthoughts/Falkor-7b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 70 |
-
"v1olet/v1olet_merged_dpo_7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 71 |
-
"Ba2han/BruinsV2-OpHermesNeu-11B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 72 |
-
"DopeorNope/You_can_cry_Snowman-13B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 73 |
-
"PistachioAlt/Synatra-MCS-7B-v0.3-RP-Slerp": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 74 |
-
"Weyaxi/MetaMath-una-cybertron-v2-bf16-Ties": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 75 |
-
"Weyaxi/OpenHermes-2.5-neural-chat-7b-v3-2-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 76 |
-
"perlthoughts/Falkor-8x7B-MoE": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 77 |
-
"elinas/chronos007-70b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 78 |
-
"Weyaxi/MetaMath-NeuralHermes-2.5-Mistral-7B-Linear": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 79 |
-
"Weyaxi/MetaMath-neural-chat-7b-v3-2-Ties": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 80 |
-
"diffnamehard/Mistral-CatMacaroni-slerp-uncensored-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 81 |
-
"Weyaxi/neural-chat-7b-v3-1-OpenHermes-2.5-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 82 |
-
"Weyaxi/MetaMath-NeuralHermes-2.5-Mistral-7B-Ties": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 83 |
-
"Walmart-the-bag/Misted-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 84 |
-
"garage-bAInd/Camel-Platypus2-70B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 85 |
-
"Weyaxi/OpenOrca-Zephyr-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 86 |
-
"uukuguy/speechless-mistral-7b-dare-0.85": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
|
| 87 |
-
"DopeorNope/SOLARC-M-10.7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/511",
|
| 88 |
-
"cloudyu/Mixtral_11Bx2_MoE_19B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/511",
|
| 89 |
-
"DopeorNope/SOLARC-MOE-10.7Bx6 ": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/511",
|
| 90 |
-
"DopeorNope/SOLARC-MOE-10.7Bx4": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/511",
|
| 91 |
-
"gagan3012/MetaModelv2 ": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/511",
|
| 92 |
-
"udkai/Turdus": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540",
|
| 93 |
-
"kodonho/Solar-OrcaDPO-Solar-Instruct-SLERP": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540",
|
| 94 |
-
"kodonho/SolarM-SakuraSolar-SLERP": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540",
|
| 95 |
-
"Yhyu13/LMCocktail-10.7B-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540",
|
| 96 |
-
"mlabonne/NeuralMarcoro14-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540",
|
| 97 |
-
"Neuronovo/neuronovo-7B-v0.2": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540",
|
| 98 |
-
"ryandt/MusingCaterpillar": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540",
|
| 99 |
-
"Neuronovo/neuronovo-7B-v0.3": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540",
|
| 100 |
-
"SanjiWatsuki/Lelantos-DPO-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540",
|
| 101 |
-
"bardsai/jaskier-7b-dpo": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540",
|
| 102 |
-
"cookinai/OpenCM-14": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540",
|
| 103 |
-
"bardsai/jaskier-7b-dpo-v2": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540",
|
| 104 |
-
"jan-hq/supermario-v2": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540",
|
| 105 |
-
# MoErges
|
| 106 |
-
"cloudyu/Yi-34Bx2-MoE-60B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540",
|
| 107 |
-
"cloudyu/Mixtral_34Bx2_MoE_60B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540",
|
| 108 |
-
"gagan3012/MetaModel_moe": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540",
|
| 109 |
-
"macadeliccc/SOLAR-math-2x10.7b-v0.2": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540",
|
| 110 |
-
"cloudyu/Mixtral_7Bx2_MoE": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540",
|
| 111 |
-
"macadeliccc/SOLAR-math-2x10.7b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540",
|
| 112 |
-
"macadeliccc/Orca-SOLAR-4x10.7b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540",
|
| 113 |
-
"macadeliccc/piccolo-8x7b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540",
|
| 114 |
-
"cloudyu/Mixtral_7Bx4_MOE_24B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540",
|
| 115 |
-
"macadeliccc/laser-dolphin-mixtral-2x7b-dpo": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540",
|
| 116 |
-
"macadeliccc/polyglot-math-4x7b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540",
|
| 117 |
-
# Other - contamination mostly
|
| 118 |
-
"DopeorNope/COKAL-v1-70B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/566",
|
| 119 |
-
"CultriX/MistralTrix-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/556",
|
| 120 |
-
"Contamination/contaminated_proof_7b_v1.0": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/664",
|
| 121 |
-
"Contamination/contaminated_proof_7b_v1.0_safetensor": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/664",
|
| 122 |
-
}
|
| 123 |
-
|
| 124 |
-
# Models which have been requested by orgs to not be submitted on the leaderboard
|
| 125 |
-
DO_NOT_SUBMIT_MODELS = [
|
| 126 |
-
"Voicelab/trurl-2-13b", # trained on MMLU
|
| 127 |
-
"TigerResearch/tigerbot-70b-chat", # per authors request
|
| 128 |
-
"TigerResearch/tigerbot-70b-chat-v2", # per authors request
|
| 129 |
-
"TigerResearch/tigerbot-70b-chat-v4-4k", # per authors request
|
| 130 |
-
]
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
def flag_models(leaderboard_data: list[dict]):
|
| 134 |
-
"""Flags models based on external criteria or flagged status."""
|
| 135 |
-
for model_data in leaderboard_data:
|
| 136 |
-
# If a model is not flagged, use its "fullname" as a key
|
| 137 |
-
if model_data[AutoEvalColumn.not_flagged.name]:
|
| 138 |
-
flag_key = model_data[AutoEvalColumn.fullname.name]
|
| 139 |
-
else:
|
| 140 |
-
# Merges and moes are flagged
|
| 141 |
-
flag_key = "merged"
|
| 142 |
-
|
| 143 |
-
# Reverse the logic: Check for non-flagged models instead
|
| 144 |
-
if flag_key in FLAGGED_MODELS:
|
| 145 |
-
issue_num = FLAGGED_MODELS[flag_key].split("/")[-1]
|
| 146 |
-
issue_link = model_hyperlink(
|
| 147 |
-
FLAGGED_MODELS[flag_key],
|
| 148 |
-
f"See discussion #{issue_num}",
|
| 149 |
-
)
|
| 150 |
-
model_data[
|
| 151 |
-
AutoEvalColumn.model.name
|
| 152 |
-
] = f"{model_data[AutoEvalColumn.model.name]} has been flagged! {issue_link}"
|
| 153 |
-
model_data[AutoEvalColumn.not_flagged.name] = False
|
| 154 |
-
else:
|
| 155 |
-
model_data[AutoEvalColumn.not_flagged.name] = True
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
def remove_forbidden_models(leaderboard_data: list[dict]):
|
| 159 |
-
"""Removes models from the leaderboard based on the DO_NOT_SUBMIT list."""
|
| 160 |
-
indices_to_remove = []
|
| 161 |
-
for ix, model in enumerate(leaderboard_data):
|
| 162 |
-
if model[AutoEvalColumn.fullname.name] in DO_NOT_SUBMIT_MODELS:
|
| 163 |
-
indices_to_remove.append(ix)
|
| 164 |
-
|
| 165 |
-
# Remove the models from the list
|
| 166 |
-
for ix in reversed(indices_to_remove):
|
| 167 |
-
leaderboard_data.pop(ix)
|
| 168 |
-
return leaderboard_data
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
def filter_models_flags(leaderboard_data: list[dict]):
|
| 172 |
-
leaderboard_data = remove_forbidden_models(leaderboard_data)
|
| 173 |
-
flag_models(leaderboard_data)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/leaderboard/read_evals.py
DELETED
|
@@ -1,261 +0,0 @@
|
|
| 1 |
-
import json
|
| 2 |
-
from pathlib import Path
|
| 3 |
-
from json import JSONDecodeError
|
| 4 |
-
import logging
|
| 5 |
-
import math
|
| 6 |
-
|
| 7 |
-
from dataclasses import dataclass, field
|
| 8 |
-
from typing import Optional, Dict, List
|
| 9 |
-
|
| 10 |
-
from tqdm import tqdm
|
| 11 |
-
from tqdm.contrib.logging import logging_redirect_tqdm
|
| 12 |
-
|
| 13 |
-
import numpy as np
|
| 14 |
-
|
| 15 |
-
from src.display.formatting import make_clickable_model
|
| 16 |
-
from src.display.utils import AutoEvalColumn, ModelType, Precision, Tasks, WeightType, parse_datetime
|
| 17 |
-
|
| 18 |
-
# Configure logging
|
| 19 |
-
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
@dataclass
|
| 23 |
-
class EvalResult:
|
| 24 |
-
# Also see src.display.utils.AutoEvalColumn for what will be displayed.
|
| 25 |
-
eval_name: str # org_model_precision (uid)
|
| 26 |
-
full_model: str # org/model (path on hub)
|
| 27 |
-
org: Optional[str]
|
| 28 |
-
model: str
|
| 29 |
-
revision: str # commit hash, "" if main
|
| 30 |
-
results: Dict[str, float]
|
| 31 |
-
precision: Precision = Precision.Unknown
|
| 32 |
-
model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
|
| 33 |
-
weight_type: WeightType = WeightType.Original
|
| 34 |
-
architecture: str = "Unknown" # From config file
|
| 35 |
-
license: str = "?"
|
| 36 |
-
likes: int = 0
|
| 37 |
-
num_params: int = 0
|
| 38 |
-
date: str = "" # submission date of request file
|
| 39 |
-
still_on_hub: bool = True
|
| 40 |
-
is_merge: bool = False
|
| 41 |
-
not_flagged: bool = False
|
| 42 |
-
status: str = "FINISHED"
|
| 43 |
-
# List of tags, initialized to a new empty list for each instance to avoid the pitfalls of mutable default arguments.
|
| 44 |
-
tags: List[str] = field(default_factory=list)
|
| 45 |
-
|
| 46 |
-
@classmethod
|
| 47 |
-
def init_from_json_file(cls, json_filepath: str) -> "EvalResult":
|
| 48 |
-
with open(json_filepath, "r") as fp:
|
| 49 |
-
data = json.load(fp)
|
| 50 |
-
|
| 51 |
-
config = data.get("config_general", {})
|
| 52 |
-
precision = Precision.from_str(config.get("model_dtype", "unknown"))
|
| 53 |
-
org_and_model = config.get("model_name", "").split("/", 1)
|
| 54 |
-
org = org_and_model[0] if len(org_and_model) > 1 else None
|
| 55 |
-
model = org_and_model[-1]
|
| 56 |
-
if len(org_and_model) == 1:
|
| 57 |
-
org = None
|
| 58 |
-
model = org_and_model[0]
|
| 59 |
-
result_key = f"{model}_{precision.value.name}"
|
| 60 |
-
else:
|
| 61 |
-
org = org_and_model[0]
|
| 62 |
-
model = org_and_model[1]
|
| 63 |
-
result_key = f"{org}_{model}_{precision.value.name}"
|
| 64 |
-
full_model = "/".join(org_and_model)
|
| 65 |
-
|
| 66 |
-
results = cls.extract_results(data) # Properly call the method to extract results
|
| 67 |
-
|
| 68 |
-
return cls(
|
| 69 |
-
eval_name=result_key,
|
| 70 |
-
full_model=full_model,
|
| 71 |
-
org=org,
|
| 72 |
-
model=model,
|
| 73 |
-
results=results,
|
| 74 |
-
precision=precision,
|
| 75 |
-
revision=config.get("model_sha", ""),
|
| 76 |
-
)
|
| 77 |
-
|
| 78 |
-
@staticmethod
|
| 79 |
-
def extract_results(data: Dict) -> Dict[str, float]:
|
| 80 |
-
"""
|
| 81 |
-
Extract and process benchmark results from a given dict.
|
| 82 |
-
|
| 83 |
-
Parameters:
|
| 84 |
-
- data (Dict): A dictionary containing benchmark data. This dictionary must
|
| 85 |
-
include 'versions' and 'results' keys with respective sub-data.
|
| 86 |
-
|
| 87 |
-
Returns:
|
| 88 |
-
- Dict[str, float]: A dictionary where keys are benchmark names and values
|
| 89 |
-
are the processed average scores as percentages.
|
| 90 |
-
|
| 91 |
-
Notes:
|
| 92 |
-
- The method specifically checks for certain benchmark names to skip outdated entries.
|
| 93 |
-
- Handles NaN values by setting the corresponding benchmark result to 0.0.
|
| 94 |
-
- Averages scores across metrics for benchmarks found in the data, in a percentage format.
|
| 95 |
-
"""
|
| 96 |
-
results = {}
|
| 97 |
-
for task in Tasks:
|
| 98 |
-
task = task.value
|
| 99 |
-
# We skip old mmlu entries
|
| 100 |
-
if task.benchmark == "hendrycksTest":
|
| 101 |
-
for mmlu_k in ["harness|hendrycksTest-abstract_algebra|5", "hendrycksTest-abstract_algebra"]:
|
| 102 |
-
if mmlu_k in data["versions"] and data["versions"][mmlu_k] == 0:
|
| 103 |
-
continue
|
| 104 |
-
|
| 105 |
-
# Some benchamrk values are NaNs, mostly truthfulQA
|
| 106 |
-
# Would be more optimal (without the whole dict itertion) if benchmark name was same as key in results
|
| 107 |
-
# e.g. not harness|truthfulqa:mc|0 but truthfulqa:mc
|
| 108 |
-
for k, v in data["results"].items():
|
| 109 |
-
if task.benchmark in k:
|
| 110 |
-
if math.isnan(float(v[task.metric])):
|
| 111 |
-
results[task.benchmark] = 0.0
|
| 112 |
-
continue
|
| 113 |
-
|
| 114 |
-
# We average all scores of a given metric (mostly for mmlu)
|
| 115 |
-
accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark in k])
|
| 116 |
-
if accs.size == 0 or any([acc is None for acc in accs]):
|
| 117 |
-
continue
|
| 118 |
-
|
| 119 |
-
mean_acc = np.mean(accs) * 100.0
|
| 120 |
-
results[task.benchmark] = mean_acc
|
| 121 |
-
|
| 122 |
-
return results
|
| 123 |
-
|
| 124 |
-
def update_with_request_file(self, requests_path):
|
| 125 |
-
"""Finds the relevant request file for the current model and updates info with it."""
|
| 126 |
-
try:
|
| 127 |
-
request_file = get_request_file_for_model(requests_path, self.full_model, self.precision.value.name)
|
| 128 |
-
if request_file is None:
|
| 129 |
-
logging.warning(f"No request file for {self.org}/{self.model}")
|
| 130 |
-
self.status = "FAILED"
|
| 131 |
-
return
|
| 132 |
-
|
| 133 |
-
with open(request_file, "r") as f:
|
| 134 |
-
request = json.load(f)
|
| 135 |
-
|
| 136 |
-
self.model_type = ModelType.from_str(request.get("model_type", "Unknown"))
|
| 137 |
-
self.weight_type = WeightType[request.get("weight_type", "Original")]
|
| 138 |
-
self.num_params = int(request.get("params", 0)) # Ensuring type safety
|
| 139 |
-
self.date = request.get("submitted_time", "")
|
| 140 |
-
self.architecture = request.get("architectures", "Unknown")
|
| 141 |
-
self.status = request.get("status", "FAILED")
|
| 142 |
-
|
| 143 |
-
except FileNotFoundError:
|
| 144 |
-
self.status = "FAILED"
|
| 145 |
-
logging.error(f"Request file: {request_file} not found for {self.org}/{self.model}")
|
| 146 |
-
except JSONDecodeError:
|
| 147 |
-
self.status = "FAILED"
|
| 148 |
-
logging.error(f"Error decoding JSON from the request file for {self.org}/{self.model}")
|
| 149 |
-
except KeyError as e:
|
| 150 |
-
self.status = "FAILED"
|
| 151 |
-
logging.error(f"Key error {e} in processing request file for {self.org}/{self.model}")
|
| 152 |
-
except Exception as e: # Catch-all for any other unexpected exceptions
|
| 153 |
-
self.status = "FAILED"
|
| 154 |
-
logging.error(f"Unexpected error {e} for {self.org}/{self.model}")
|
| 155 |
-
|
| 156 |
-
def update_with_dynamic_file_dict(self, file_dict):
|
| 157 |
-
"""Update object attributes based on the provided dictionary, with error handling for missing keys and type validation."""
|
| 158 |
-
# Default values set for optional or potentially missing keys.
|
| 159 |
-
self.license = file_dict.get("license", "?")
|
| 160 |
-
self.likes = int(file_dict.get("likes", 0)) # Ensure likes is treated as an integer
|
| 161 |
-
self.still_on_hub = file_dict.get("still_on_hub", False) # Default to False if key is missing
|
| 162 |
-
self.tags = file_dict.get("tags", [])
|
| 163 |
-
|
| 164 |
-
# Calculate `flagged` only if 'tags' is not empty and avoid calculating each time
|
| 165 |
-
self.not_flagged = not (any("flagged" in tag for tag in self.tags))
|
| 166 |
-
|
| 167 |
-
def to_dict(self):
|
| 168 |
-
"""Converts the Eval Result to a dict compatible with our dataframe display"""
|
| 169 |
-
average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
|
| 170 |
-
data_dict = {
|
| 171 |
-
"eval_name": self.eval_name, # not a column, just a save name,
|
| 172 |
-
AutoEvalColumn.precision.name: self.precision.value.name,
|
| 173 |
-
AutoEvalColumn.model_type.name: self.model_type.value.name,
|
| 174 |
-
AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
|
| 175 |
-
AutoEvalColumn.weight_type.name: self.weight_type.value.name,
|
| 176 |
-
AutoEvalColumn.architecture.name: self.architecture,
|
| 177 |
-
AutoEvalColumn.model.name: make_clickable_model(self.full_model),
|
| 178 |
-
AutoEvalColumn.fullname.name: self.full_model,
|
| 179 |
-
AutoEvalColumn.revision.name: self.revision,
|
| 180 |
-
AutoEvalColumn.average.name: average,
|
| 181 |
-
AutoEvalColumn.license.name: self.license,
|
| 182 |
-
AutoEvalColumn.likes.name: self.likes,
|
| 183 |
-
AutoEvalColumn.params.name: self.num_params,
|
| 184 |
-
AutoEvalColumn.still_on_hub.name: self.still_on_hub,
|
| 185 |
-
AutoEvalColumn.merged.name: not ("merge" in self.tags if self.tags else False),
|
| 186 |
-
AutoEvalColumn.moe.name: not (
|
| 187 |
-
("moe" in self.tags if self.tags else False) or "moe" in self.full_model.lower()
|
| 188 |
-
),
|
| 189 |
-
AutoEvalColumn.not_flagged.name: self.not_flagged,
|
| 190 |
-
}
|
| 191 |
-
|
| 192 |
-
for task in Tasks:
|
| 193 |
-
data_dict[task.value.col_name] = self.results[task.value.benchmark]
|
| 194 |
-
|
| 195 |
-
return data_dict
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
def get_request_file_for_model(requests_path, model_name, precision):
|
| 199 |
-
"""Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
|
| 200 |
-
requests_path = Path(requests_path)
|
| 201 |
-
pattern = f"{model_name}_eval_request_*.json"
|
| 202 |
-
|
| 203 |
-
# Using pathlib to find files matching the pattern
|
| 204 |
-
request_files = list(requests_path.glob(pattern))
|
| 205 |
-
|
| 206 |
-
# Sort the files by name in descending order to mimic 'reverse=True'
|
| 207 |
-
request_files.sort(reverse=True)
|
| 208 |
-
|
| 209 |
-
# Select the correct request file based on 'status' and 'precision'
|
| 210 |
-
request_file = None
|
| 211 |
-
for request_file in request_files:
|
| 212 |
-
with request_file.open("r") as f:
|
| 213 |
-
req_content = json.load(f)
|
| 214 |
-
if req_content["status"] == "FINISHED" and req_content["precision"] == precision.split(".")[-1]:
|
| 215 |
-
request_file = str(request_file)
|
| 216 |
-
|
| 217 |
-
# Return empty string if no file found that matches criteria
|
| 218 |
-
return request_file
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
def get_raw_eval_results(results_path: str, requests_path: str, dynamic_path: str) -> list[EvalResult]:
|
| 222 |
-
"""From the path of the results folder root, extract all needed info for results"""
|
| 223 |
-
with open(dynamic_path) as f:
|
| 224 |
-
dynamic_data = json.load(f)
|
| 225 |
-
|
| 226 |
-
results_path = Path(results_path)
|
| 227 |
-
model_files = list(results_path.rglob("results_*.json"))
|
| 228 |
-
model_files.sort(key=lambda file: parse_datetime(file.stem.removeprefix("results_")))
|
| 229 |
-
|
| 230 |
-
eval_results = {}
|
| 231 |
-
# Wrap model_files iteration with tqdm for progress display
|
| 232 |
-
for model_result_filepath in tqdm(model_files, desc="Processing model files"):
|
| 233 |
-
# Creation of result
|
| 234 |
-
eval_result = EvalResult.init_from_json_file(model_result_filepath)
|
| 235 |
-
with logging_redirect_tqdm():
|
| 236 |
-
eval_result.update_with_request_file(requests_path)
|
| 237 |
-
|
| 238 |
-
if eval_result.full_model in dynamic_data:
|
| 239 |
-
eval_result.update_with_dynamic_file_dict(dynamic_data[eval_result.full_model])
|
| 240 |
-
# Hardcoding because of gating problem
|
| 241 |
-
if any([org in eval_result.full_model for org in ["meta-llama/", "google/", "tiiuae/"]]):
|
| 242 |
-
eval_result.still_on_hub = True
|
| 243 |
-
|
| 244 |
-
# Store results of same eval together
|
| 245 |
-
eval_name = eval_result.eval_name
|
| 246 |
-
if eval_name in eval_results.keys():
|
| 247 |
-
eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None})
|
| 248 |
-
else:
|
| 249 |
-
eval_results[eval_name] = eval_result
|
| 250 |
-
|
| 251 |
-
results = []
|
| 252 |
-
for k, v in eval_results.items():
|
| 253 |
-
try:
|
| 254 |
-
if v.status == "FINISHED":
|
| 255 |
-
v.to_dict() # we test if the dict version is complete
|
| 256 |
-
results.append(v)
|
| 257 |
-
except KeyError as e:
|
| 258 |
-
logging.error(f"Error while checking model {k} {v.date} json, no key: {e}") # not all eval values present
|
| 259 |
-
continue
|
| 260 |
-
|
| 261 |
-
return results
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/populate.py
DELETED
|
@@ -1,52 +0,0 @@
|
|
| 1 |
-
import pathlib
|
| 2 |
-
import pandas as pd
|
| 3 |
-
from src.display.formatting import has_no_nan_values, make_clickable_model
|
| 4 |
-
from src.display.utils import AutoEvalColumn, EvalQueueColumn, baseline_row
|
| 5 |
-
from src.leaderboard.filter_models import filter_models_flags
|
| 6 |
-
from src.leaderboard.read_evals import get_raw_eval_results
|
| 7 |
-
from src.display.utils import load_json_data
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
def _process_model_data(entry, model_name_key="model", revision_key="revision"):
|
| 11 |
-
"""Enrich model data with clickable links and revisions."""
|
| 12 |
-
entry[EvalQueueColumn.model.name] = make_clickable_model(entry.get(model_name_key, ""))
|
| 13 |
-
entry[EvalQueueColumn.revision.name] = entry.get(revision_key, "main")
|
| 14 |
-
return entry
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
def get_evaluation_queue_df(save_path, cols):
|
| 18 |
-
"""Generate dataframes for pending, running, and finished evaluation entries."""
|
| 19 |
-
save_path = pathlib.Path(save_path)
|
| 20 |
-
all_evals = []
|
| 21 |
-
|
| 22 |
-
for path in save_path.rglob("*.json"):
|
| 23 |
-
data = load_json_data(path)
|
| 24 |
-
if data:
|
| 25 |
-
all_evals.append(_process_model_data(data))
|
| 26 |
-
|
| 27 |
-
# Organizing data by status
|
| 28 |
-
status_map = {
|
| 29 |
-
"PENDING": ["PENDING", "RERUN"],
|
| 30 |
-
"RUNNING": ["RUNNING"],
|
| 31 |
-
"FINISHED": ["FINISHED", "PENDING_NEW_EVAL"],
|
| 32 |
-
}
|
| 33 |
-
status_dfs = {status: [] for status in status_map}
|
| 34 |
-
for eval_data in all_evals:
|
| 35 |
-
for status, extra_statuses in status_map.items():
|
| 36 |
-
if eval_data["status"] in extra_statuses:
|
| 37 |
-
status_dfs[status].append(eval_data)
|
| 38 |
-
|
| 39 |
-
return tuple(pd.DataFrame(status_dfs[status], columns=cols) for status in ["FINISHED", "RUNNING", "PENDING"])
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
def get_leaderboard_df(results_path, requests_path, dynamic_path, cols, benchmark_cols):
|
| 43 |
-
"""Retrieve and process leaderboard data."""
|
| 44 |
-
raw_data = get_raw_eval_results(results_path, requests_path, dynamic_path)
|
| 45 |
-
all_data_json = [model.to_dict() for model in raw_data] + [baseline_row]
|
| 46 |
-
filter_models_flags(all_data_json)
|
| 47 |
-
|
| 48 |
-
df = pd.DataFrame.from_records(all_data_json)
|
| 49 |
-
df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
|
| 50 |
-
df = df[cols].round(decimals=2)
|
| 51 |
-
df = df[has_no_nan_values(df, benchmark_cols)]
|
| 52 |
-
return raw_data, df
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/radial/radial.py
CHANGED
|
@@ -1,115 +1,126 @@
|
|
| 1 |
-
import
|
|
|
|
|
|
|
| 2 |
import matplotlib.pyplot as plt
|
| 3 |
import numpy as np
|
| 4 |
-
import
|
| 5 |
-
|
| 6 |
-
|
| 7 |
|
| 8 |
-
from src.leaderboard.build_leaderboard import build_leadearboard_df
|
| 9 |
|
| 10 |
def create_plot(selected_models):
|
| 11 |
"""
|
| 12 |
Создает визуализацию для сравнения выбранных моделей по метрикам DeathMath
|
| 13 |
-
|
| 14 |
Args:
|
| 15 |
selected_models: Список названий моделей для отображения на графике
|
| 16 |
-
|
| 17 |
Returns:
|
| 18 |
matplotlib.figure.Figure: График для отображения в интерфейсе
|
| 19 |
"""
|
| 20 |
# Получаем данные моделей из лидерборда
|
| 21 |
-
models_df =
|
| 22 |
-
|
| 23 |
# Если нет выбранных моделей или данные не загружены, возвращаем пустой график
|
| 24 |
if not selected_models or models_df.empty:
|
| 25 |
fig, ax = plt.subplots(figsize=(10, 6))
|
| 26 |
-
ax.text(
|
| 27 |
-
|
| 28 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
ax.set_axis_off()
|
| 30 |
return fig
|
| 31 |
-
|
| 32 |
# Фильтруем DataFrame, чтобы оставить только выбранные модели
|
| 33 |
-
models_to_show = models_df[models_df[
|
| 34 |
-
|
| 35 |
if models_to_show.empty:
|
| 36 |
fig, ax = plt.subplots(figsize=(10, 6))
|
| 37 |
-
ax.text(
|
| 38 |
-
|
| 39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
ax.set_axis_off()
|
| 41 |
return fig
|
| 42 |
-
|
| 43 |
# Настройка бар-графика для сравнения моделей
|
| 44 |
fig, ax = plt.subplots(figsize=(12, 8))
|
| 45 |
-
|
| 46 |
# Ширина столбцов
|
| 47 |
bar_width = 0.25
|
| 48 |
-
|
| 49 |
# Позиции на оси x
|
| 50 |
models_count = len(models_to_show)
|
| 51 |
indices = np.arange(models_count)
|
| 52 |
-
|
| 53 |
# Цветовая палитра
|
| 54 |
-
colors = [
|
| 55 |
-
|
| 56 |
# Строим столбцы для разных метрик
|
| 57 |
-
ax.bar(indices - bar_width, models_to_show[
|
| 58 |
-
|
| 59 |
-
ax.bar(indices, models_to_show[
|
| 60 |
-
|
| 61 |
-
ax.bar(indices + bar_width, models_to_show['score'], bar_width,
|
| 62 |
-
label='Combined Score', color=colors[2])
|
| 63 |
-
|
| 64 |
# Настройка осей и меток
|
| 65 |
-
ax.set_xlabel(
|
| 66 |
-
ax.set_ylabel(
|
| 67 |
-
ax.set_title(
|
| 68 |
ax.set_xticks(indices)
|
| 69 |
-
ax.set_xticklabels(models_to_show[
|
| 70 |
ax.legend()
|
| 71 |
-
|
| 72 |
# Ограничение значений по оси y от 0 до 1
|
| 73 |
ax.set_ylim(0, 1.0)
|
| 74 |
-
|
| 75 |
# Добавляем сетку для лучшей читаемости
|
| 76 |
-
ax.grid(axis=
|
| 77 |
-
|
| 78 |
# Обеспечиваем, чтобы все метки помещались
|
| 79 |
plt.tight_layout()
|
| 80 |
-
|
| 81 |
return fig
|
| 82 |
|
|
|
|
| 83 |
def create_radar_plot(selected_models):
|
| 84 |
"""
|
| 85 |
Создает радиальную диаграмму для сравнения выбранных моделей
|
| 86 |
-
|
| 87 |
Args:
|
| 88 |
selected_models: Список названий моделей для отображения на графике
|
| 89 |
-
|
| 90 |
Returns:
|
| 91 |
plotly.graph_objects.Figure: Интерактивный радиальный график
|
| 92 |
"""
|
| 93 |
-
models =
|
| 94 |
metrics = ["math_score", "physics_score", "score"]
|
| 95 |
metric_labels = ["RussianMath", "RussianPhysics", "Combined"]
|
| 96 |
-
|
| 97 |
MIN_COLOUR_DISTANCE_BETWEEN_MODELS = 100
|
| 98 |
seed = 42
|
| 99 |
-
|
| 100 |
def generate_colours(min_distance, seed):
|
| 101 |
colour_mapping = {}
|
| 102 |
all_models = selected_models
|
| 103 |
-
|
| 104 |
for i in it.count():
|
| 105 |
min_colour_distance = min_distance - i
|
| 106 |
retries_left = 10 * len(all_models)
|
| 107 |
-
|
| 108 |
for model_id in all_models:
|
| 109 |
random.seed(hash(model_id) + i + seed)
|
| 110 |
r, g, b = 0, 0, 0
|
| 111 |
too_bright, similar_to_other_model = True, True
|
| 112 |
-
|
| 113 |
while (too_bright or similar_to_other_model) and retries_left > 0:
|
| 114 |
r, g, b = tuple(random.randint(0, 255) for _ in range(3))
|
| 115 |
too_bright = np.min([r, g, b]) > 200
|
|
@@ -118,44 +129,40 @@ def create_radar_plot(selected_models):
|
|
| 118 |
for colour in colour_mapping.values()
|
| 119 |
)
|
| 120 |
retries_left -= 1
|
| 121 |
-
|
| 122 |
colour_mapping[model_id] = (r, g, b)
|
| 123 |
if len(colour_mapping) == len(all_models):
|
| 124 |
break
|
| 125 |
-
|
| 126 |
return colour_mapping
|
| 127 |
|
| 128 |
colour_mapping = generate_colours(MIN_COLOUR_DISTANCE_BETWEEN_MODELS, seed)
|
| 129 |
fig = go.Figure()
|
| 130 |
-
|
| 131 |
for _, model_data in models.iterrows():
|
| 132 |
model_name = model_data["model"]
|
| 133 |
if model_name not in selected_models:
|
| 134 |
continue
|
| 135 |
-
|
| 136 |
values = [model_data[metric] for metric in metrics]
|
| 137 |
-
color = f
|
| 138 |
-
|
| 139 |
-
fig.add_trace(
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
fig.update_layout(
|
| 149 |
-
polar=dict(
|
| 150 |
-
radialaxis=dict(
|
| 151 |
-
visible=True,
|
| 152 |
-
range=[0, 1]
|
| 153 |
)
|
| 154 |
-
)
|
|
|
|
|
|
|
|
|
|
| 155 |
showlegend=True,
|
| 156 |
-
title=
|
| 157 |
template="plotly_dark",
|
| 158 |
)
|
| 159 |
-
|
| 160 |
-
return fig
|
| 161 |
|
|
|
|
|
|
| 1 |
+
import itertools as it
|
| 2 |
+
import random
|
| 3 |
+
|
| 4 |
import matplotlib.pyplot as plt
|
| 5 |
import numpy as np
|
| 6 |
+
import plotly.graph_objects as go
|
| 7 |
+
|
| 8 |
+
from src.leaderboard.build_leaderboard import build_leaderboard_df
|
| 9 |
|
|
|
|
| 10 |
|
| 11 |
def create_plot(selected_models):
|
| 12 |
"""
|
| 13 |
Создает визуализацию для сравнения выбранных моделей по метрикам DeathMath
|
| 14 |
+
|
| 15 |
Args:
|
| 16 |
selected_models: Список названий моделей для отображения на графике
|
| 17 |
+
|
| 18 |
Returns:
|
| 19 |
matplotlib.figure.Figure: График для отображения в интерфейсе
|
| 20 |
"""
|
| 21 |
# Получаем данные моделей из лидерборда
|
| 22 |
+
models_df = build_leaderboard_df()
|
| 23 |
+
|
| 24 |
# Если нет выбранных моделей или данные не загружены, возвращаем пустой график
|
| 25 |
if not selected_models or models_df.empty:
|
| 26 |
fig, ax = plt.subplots(figsize=(10, 6))
|
| 27 |
+
ax.text(
|
| 28 |
+
0.5,
|
| 29 |
+
0.5,
|
| 30 |
+
"Нет данных для отображения",
|
| 31 |
+
horizontalalignment="center",
|
| 32 |
+
verticalalignment="center",
|
| 33 |
+
transform=ax.transAxes,
|
| 34 |
+
fontsize=14,
|
| 35 |
+
)
|
| 36 |
ax.set_axis_off()
|
| 37 |
return fig
|
| 38 |
+
|
| 39 |
# Фильтруем DataFrame, чтобы оставить только выбранные модели
|
| 40 |
+
models_to_show = models_df[models_df["model"].isin(selected_models)]
|
| 41 |
+
|
| 42 |
if models_to_show.empty:
|
| 43 |
fig, ax = plt.subplots(figsize=(10, 6))
|
| 44 |
+
ax.text(
|
| 45 |
+
0.5,
|
| 46 |
+
0.5,
|
| 47 |
+
"Выбранные модели не найдены в данных",
|
| 48 |
+
horizontalalignment="center",
|
| 49 |
+
verticalalignment="center",
|
| 50 |
+
transform=ax.transAxes,
|
| 51 |
+
fontsize=14,
|
| 52 |
+
)
|
| 53 |
ax.set_axis_off()
|
| 54 |
return fig
|
| 55 |
+
|
| 56 |
# Настройка бар-графика для сравнения моделей
|
| 57 |
fig, ax = plt.subplots(figsize=(12, 8))
|
| 58 |
+
|
| 59 |
# Ширина столбцов
|
| 60 |
bar_width = 0.25
|
| 61 |
+
|
| 62 |
# Позиции на оси x
|
| 63 |
models_count = len(models_to_show)
|
| 64 |
indices = np.arange(models_count)
|
| 65 |
+
|
| 66 |
# Цветовая палитра
|
| 67 |
+
colors = ["#1f77b4", "#ff7f0e", "#2ca02c"]
|
| 68 |
+
|
| 69 |
# Строим столбцы для разных метрик
|
| 70 |
+
ax.bar(indices - bar_width, models_to_show["math_score"], bar_width, label="RussianMath Score", color=colors[0])
|
| 71 |
+
ax.bar(indices, models_to_show["physics_score"], bar_width, label="RussianPhysics Score", color=colors[1])
|
| 72 |
+
ax.bar(indices + bar_width, models_to_show["score"], bar_width, label="Combined Score", color=colors[2])
|
| 73 |
+
|
|
|
|
|
|
|
|
|
|
| 74 |
# Настройка осей и меток
|
| 75 |
+
ax.set_xlabel("Модели")
|
| 76 |
+
ax.set_ylabel("Баллы")
|
| 77 |
+
ax.set_title("Сравнение производительности моделей на DeathMath benchmark")
|
| 78 |
ax.set_xticks(indices)
|
| 79 |
+
ax.set_xticklabels(models_to_show["model"], rotation=45, ha="right")
|
| 80 |
ax.legend()
|
| 81 |
+
|
| 82 |
# Ограничение значений по оси y от 0 до 1
|
| 83 |
ax.set_ylim(0, 1.0)
|
| 84 |
+
|
| 85 |
# Добавляем сетку для лучшей читаемости
|
| 86 |
+
ax.grid(axis="y", linestyle="--", alpha=0.7)
|
| 87 |
+
|
| 88 |
# Обеспечиваем, чтобы все метки помещались
|
| 89 |
plt.tight_layout()
|
| 90 |
+
|
| 91 |
return fig
|
| 92 |
|
| 93 |
+
|
| 94 |
def create_radar_plot(selected_models):
|
| 95 |
"""
|
| 96 |
Создает радиальную диаграмму для сравнения выбранных моделей
|
| 97 |
+
|
| 98 |
Args:
|
| 99 |
selected_models: Список названий моделей для отображения на графике
|
| 100 |
+
|
| 101 |
Returns:
|
| 102 |
plotly.graph_objects.Figure: Интерактивный радиальный график
|
| 103 |
"""
|
| 104 |
+
models = build_leaderboard_df()
|
| 105 |
metrics = ["math_score", "physics_score", "score"]
|
| 106 |
metric_labels = ["RussianMath", "RussianPhysics", "Combined"]
|
| 107 |
+
|
| 108 |
MIN_COLOUR_DISTANCE_BETWEEN_MODELS = 100
|
| 109 |
seed = 42
|
| 110 |
+
|
| 111 |
def generate_colours(min_distance, seed):
|
| 112 |
colour_mapping = {}
|
| 113 |
all_models = selected_models
|
| 114 |
+
|
| 115 |
for i in it.count():
|
| 116 |
min_colour_distance = min_distance - i
|
| 117 |
retries_left = 10 * len(all_models)
|
| 118 |
+
|
| 119 |
for model_id in all_models:
|
| 120 |
random.seed(hash(model_id) + i + seed)
|
| 121 |
r, g, b = 0, 0, 0
|
| 122 |
too_bright, similar_to_other_model = True, True
|
| 123 |
+
|
| 124 |
while (too_bright or similar_to_other_model) and retries_left > 0:
|
| 125 |
r, g, b = tuple(random.randint(0, 255) for _ in range(3))
|
| 126 |
too_bright = np.min([r, g, b]) > 200
|
|
|
|
| 129 |
for colour in colour_mapping.values()
|
| 130 |
)
|
| 131 |
retries_left -= 1
|
| 132 |
+
|
| 133 |
colour_mapping[model_id] = (r, g, b)
|
| 134 |
if len(colour_mapping) == len(all_models):
|
| 135 |
break
|
| 136 |
+
|
| 137 |
return colour_mapping
|
| 138 |
|
| 139 |
colour_mapping = generate_colours(MIN_COLOUR_DISTANCE_BETWEEN_MODELS, seed)
|
| 140 |
fig = go.Figure()
|
| 141 |
+
|
| 142 |
for _, model_data in models.iterrows():
|
| 143 |
model_name = model_data["model"]
|
| 144 |
if model_name not in selected_models:
|
| 145 |
continue
|
| 146 |
+
|
| 147 |
values = [model_data[metric] for metric in metrics]
|
| 148 |
+
color = f"rgb{colour_mapping[model_name]}"
|
| 149 |
+
|
| 150 |
+
fig.add_trace(
|
| 151 |
+
go.Scatterpolar(
|
| 152 |
+
r=values,
|
| 153 |
+
theta=metric_labels,
|
| 154 |
+
name=model_name,
|
| 155 |
+
fill="toself",
|
| 156 |
+
fillcolor=f"rgba{colour_mapping[model_name] + (0.6,)}",
|
| 157 |
+
line=dict(color=color),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 158 |
)
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
fig.update_layout(
|
| 162 |
+
polar=dict(radialaxis=dict(visible=True, range=[0, 1])),
|
| 163 |
showlegend=True,
|
| 164 |
+
title="Сравнение моделей на DeathMath",
|
| 165 |
template="plotly_dark",
|
| 166 |
)
|
|
|
|
|
|
|
| 167 |
|
| 168 |
+
return fig
|
src/scripts/create_request_file.py
DELETED
|
@@ -1,92 +0,0 @@
|
|
| 1 |
-
import json
|
| 2 |
-
import os
|
| 3 |
-
import pprint
|
| 4 |
-
from datetime import datetime, timezone
|
| 5 |
-
|
| 6 |
-
import click
|
| 7 |
-
from colorama import Fore
|
| 8 |
-
from huggingface_hub import HfApi, snapshot_download
|
| 9 |
-
|
| 10 |
-
from src.display.utils import ModelType, WeightType
|
| 11 |
-
from src.submission.check_validity import get_model_size
|
| 12 |
-
|
| 13 |
-
EVAL_REQUESTS_PATH = "eval-queue"
|
| 14 |
-
QUEUE_REPO = "open-llm-leaderboard/requests"
|
| 15 |
-
|
| 16 |
-
precisions = ("float16", "bfloat16", "8bit (LLM.int8)", "4bit (QLoRA / FP4)", "GPTQ")
|
| 17 |
-
model_types = [e.name for e in ModelType]
|
| 18 |
-
weight_types = [e.name for e in WeightType]
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
def main():
|
| 22 |
-
api = HfApi()
|
| 23 |
-
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
| 24 |
-
snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH, repo_type="dataset")
|
| 25 |
-
|
| 26 |
-
model_name = click.prompt("Enter model name")
|
| 27 |
-
revision = click.prompt("Enter revision", default="main")
|
| 28 |
-
precision = click.prompt("Enter precision", default="float16", type=click.Choice(precisions))
|
| 29 |
-
model_type = click.prompt("Enter model type", type=click.Choice(model_types))
|
| 30 |
-
weight_type = click.prompt("Enter weight type", default="Original", type=click.Choice(weight_types))
|
| 31 |
-
base_model = click.prompt("Enter base model", default="")
|
| 32 |
-
status = click.prompt("Enter status", default="FINISHED")
|
| 33 |
-
|
| 34 |
-
try:
|
| 35 |
-
model_info = api.model_info(repo_id=model_name, revision=revision)
|
| 36 |
-
except Exception as e:
|
| 37 |
-
print(f"{Fore.RED}Could not find model info for {model_name} on the Hub\n{e}{Fore.RESET}")
|
| 38 |
-
return 1
|
| 39 |
-
|
| 40 |
-
model_size = get_model_size(model_info=model_info, precision=precision)
|
| 41 |
-
|
| 42 |
-
try:
|
| 43 |
-
license = model_info.cardData["license"]
|
| 44 |
-
except Exception:
|
| 45 |
-
license = "?"
|
| 46 |
-
|
| 47 |
-
eval_entry = {
|
| 48 |
-
"model": model_name,
|
| 49 |
-
"base_model": base_model,
|
| 50 |
-
"revision": model_info.sha, # force to use the exact model commit
|
| 51 |
-
"private": False,
|
| 52 |
-
"precision": precision,
|
| 53 |
-
"weight_type": weight_type,
|
| 54 |
-
"status": status,
|
| 55 |
-
"submitted_time": current_time,
|
| 56 |
-
"model_type": model_type,
|
| 57 |
-
"likes": model_info.likes,
|
| 58 |
-
"params": model_size,
|
| 59 |
-
"license": license,
|
| 60 |
-
}
|
| 61 |
-
|
| 62 |
-
user_name = ""
|
| 63 |
-
model_path = model_name
|
| 64 |
-
if "/" in model_name:
|
| 65 |
-
user_name = model_name.split("/")[0]
|
| 66 |
-
model_path = model_name.split("/")[1]
|
| 67 |
-
|
| 68 |
-
pprint.pprint(eval_entry)
|
| 69 |
-
|
| 70 |
-
if click.confirm("Do you want to continue? This request file will be pushed to the hub"):
|
| 71 |
-
click.echo("continuing...")
|
| 72 |
-
|
| 73 |
-
out_dir = f"{EVAL_REQUESTS_PATH}/{user_name}"
|
| 74 |
-
os.makedirs(out_dir, exist_ok=True)
|
| 75 |
-
out_path = f"{out_dir}/{model_path}_eval_request_{False}_{precision}_{weight_type}.json"
|
| 76 |
-
|
| 77 |
-
with open(out_path, "w") as f:
|
| 78 |
-
f.write(json.dumps(eval_entry))
|
| 79 |
-
|
| 80 |
-
api.upload_file(
|
| 81 |
-
path_or_fileobj=out_path,
|
| 82 |
-
path_in_repo=out_path.split(f"{EVAL_REQUESTS_PATH}/")[1],
|
| 83 |
-
repo_id=QUEUE_REPO,
|
| 84 |
-
repo_type="dataset",
|
| 85 |
-
commit_message=f"Add {model_name} to eval queue",
|
| 86 |
-
)
|
| 87 |
-
else:
|
| 88 |
-
click.echo("aborting...")
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
if __name__ == "__main__":
|
| 92 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/scripts/update_all_request_files.py
DELETED
|
@@ -1,96 +0,0 @@
|
|
| 1 |
-
import json
|
| 2 |
-
import os
|
| 3 |
-
import subprocess
|
| 4 |
-
|
| 5 |
-
from src.envs import EVAL_REQUESTS_PATH, H4_TOKEN
|
| 6 |
-
from src.submission.check_validity import check_model_card, get_model_tags, is_model_on_hub
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
def update_one_model(model_id, data, models_on_the_hub):
|
| 10 |
-
# Model no longer on the hub at all
|
| 11 |
-
if model_id not in models_on_the_hub:
|
| 12 |
-
data["still_on_hub"] = False
|
| 13 |
-
data["likes"] = 0
|
| 14 |
-
data["downloads"] = 0
|
| 15 |
-
data["created_at"] = ""
|
| 16 |
-
data["tags"] = []
|
| 17 |
-
return data
|
| 18 |
-
|
| 19 |
-
# Grabbing model parameters
|
| 20 |
-
model_cfg = models_on_the_hub[model_id]
|
| 21 |
-
data["likes"] = model_cfg.likes
|
| 22 |
-
data["downloads"] = model_cfg.downloads
|
| 23 |
-
data["created_at"] = str(model_cfg.created_at)
|
| 24 |
-
data["license"] = model_cfg.card_data.license if model_cfg.card_data is not None else ""
|
| 25 |
-
|
| 26 |
-
# Grabbing model details
|
| 27 |
-
model_name = model_id
|
| 28 |
-
if model_cfg.card_data is not None and model_cfg.card_data.base_model is not None:
|
| 29 |
-
if isinstance(model_cfg.card_data.base_model, str):
|
| 30 |
-
model_name = model_cfg.card_data.base_model # for adapters, we look at the parent model
|
| 31 |
-
still_on_hub, _, _ = is_model_on_hub(
|
| 32 |
-
model_name=model_name,
|
| 33 |
-
revision=data.get("revision"),
|
| 34 |
-
trust_remote_code=True,
|
| 35 |
-
test_tokenizer=False,
|
| 36 |
-
token=H4_TOKEN,
|
| 37 |
-
)
|
| 38 |
-
# If the model doesn't have a model card or a license, we consider it's deleted
|
| 39 |
-
if still_on_hub:
|
| 40 |
-
try:
|
| 41 |
-
status, _, model_card = check_model_card(model_id)
|
| 42 |
-
if status is False:
|
| 43 |
-
still_on_hub = False
|
| 44 |
-
except Exception:
|
| 45 |
-
model_card = None
|
| 46 |
-
still_on_hub = False
|
| 47 |
-
data["still_on_hub"] = still_on_hub
|
| 48 |
-
|
| 49 |
-
tags = get_model_tags(model_card, model_id) if still_on_hub else []
|
| 50 |
-
|
| 51 |
-
data["tags"] = tags
|
| 52 |
-
return data
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
def update_models(file_path, models_on_the_hub):
|
| 56 |
-
"""
|
| 57 |
-
Search through all JSON files in the specified root folder and its subfolders,
|
| 58 |
-
and update the likes key in JSON dict from value of input dict
|
| 59 |
-
"""
|
| 60 |
-
seen_models = []
|
| 61 |
-
with open(file_path, "r") as f:
|
| 62 |
-
model_infos = json.load(f)
|
| 63 |
-
for model_id in model_infos.keys():
|
| 64 |
-
seen_models.append(model_id)
|
| 65 |
-
model_infos[model_id] = update_one_model(
|
| 66 |
-
model_id=model_id, data=model_infos[model_id], models_on_the_hub=models_on_the_hub
|
| 67 |
-
)
|
| 68 |
-
|
| 69 |
-
# If new requests files have been created since we started all this
|
| 70 |
-
# we grab them
|
| 71 |
-
all_models = []
|
| 72 |
-
try:
|
| 73 |
-
for ix, (root, _, files) in enumerate(os.walk(EVAL_REQUESTS_PATH)):
|
| 74 |
-
if ix == 0:
|
| 75 |
-
continue
|
| 76 |
-
for file in files:
|
| 77 |
-
if "eval_request" in file:
|
| 78 |
-
path = root.split("/")[-1] + "/" + file.split("_eval_request")[0]
|
| 79 |
-
all_models.append(path)
|
| 80 |
-
except Exception as e:
|
| 81 |
-
print(e)
|
| 82 |
-
pass
|
| 83 |
-
|
| 84 |
-
for model_id in all_models:
|
| 85 |
-
if model_id not in seen_models:
|
| 86 |
-
model_infos[model_id] = update_one_model(model_id=model_id, data={}, models_on_the_hub=models_on_the_hub)
|
| 87 |
-
|
| 88 |
-
with open(file_path, "w") as f:
|
| 89 |
-
json.dump(model_infos, f, indent=2)
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
def update_dynamic_files():
|
| 93 |
-
# from gen import gen_answer,gen_judgment\
|
| 94 |
-
subprocess.Popen("python3 ../gen/gen_judgement.py")
|
| 95 |
-
|
| 96 |
-
subprocess.Popen("python3 ../gen/show_result.py --output")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/submission/check_validity.py
DELETED
|
@@ -1,178 +0,0 @@
|
|
| 1 |
-
import json
|
| 2 |
-
import os
|
| 3 |
-
import re
|
| 4 |
-
from collections import defaultdict
|
| 5 |
-
from datetime import datetime, timedelta, timezone
|
| 6 |
-
|
| 7 |
-
import huggingface_hub
|
| 8 |
-
from huggingface_hub import ModelCard
|
| 9 |
-
from huggingface_hub.hf_api import ModelInfo, get_safetensors_metadata
|
| 10 |
-
from transformers import AutoConfig, AutoTokenizer
|
| 11 |
-
|
| 12 |
-
from src.envs import HAS_HIGHER_RATE_LIMIT
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
# ht to @Wauplin, thank you for the snippet!
|
| 16 |
-
# See https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/317
|
| 17 |
-
def check_model_card(repo_id: str) -> tuple[bool, str]:
|
| 18 |
-
# Returns operation status, and error message
|
| 19 |
-
try:
|
| 20 |
-
card = ModelCard.load(repo_id)
|
| 21 |
-
except huggingface_hub.utils.EntryNotFoundError:
|
| 22 |
-
return False, "Please add a model card to your model to explain how you trained/fine-tuned it.", None
|
| 23 |
-
|
| 24 |
-
# Enforce license metadata
|
| 25 |
-
if card.data.license is None:
|
| 26 |
-
if not ("license_name" in card.data and "license_link" in card.data):
|
| 27 |
-
return (
|
| 28 |
-
False,
|
| 29 |
-
(
|
| 30 |
-
"License not found. Please add a license to your model card using the `license` metadata or a"
|
| 31 |
-
" `license_name`/`license_link` pair."
|
| 32 |
-
),
|
| 33 |
-
None,
|
| 34 |
-
)
|
| 35 |
-
|
| 36 |
-
# Enforce card content
|
| 37 |
-
if len(card.text) < 200:
|
| 38 |
-
return False, "Please add a description to your model card, it is too short.", None
|
| 39 |
-
|
| 40 |
-
return True, "", card
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
def is_model_on_hub(
|
| 44 |
-
model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False
|
| 45 |
-
) -> tuple[bool, str, AutoConfig]:
|
| 46 |
-
try:
|
| 47 |
-
config = AutoConfig.from_pretrained(
|
| 48 |
-
model_name, revision=revision, trust_remote_code=trust_remote_code, token=token
|
| 49 |
-
) # , force_download=True)
|
| 50 |
-
if test_tokenizer:
|
| 51 |
-
try:
|
| 52 |
-
AutoTokenizer.from_pretrained(
|
| 53 |
-
model_name, revision=revision, trust_remote_code=trust_remote_code, token=token
|
| 54 |
-
)
|
| 55 |
-
except ValueError as e:
|
| 56 |
-
return (False, f"uses a tokenizer which is not in a transformers release: {e}", None)
|
| 57 |
-
except Exception:
|
| 58 |
-
return (
|
| 59 |
-
False,
|
| 60 |
-
"'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?",
|
| 61 |
-
None,
|
| 62 |
-
)
|
| 63 |
-
return True, None, config
|
| 64 |
-
|
| 65 |
-
except ValueError:
|
| 66 |
-
return (
|
| 67 |
-
False,
|
| 68 |
-
"needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
|
| 69 |
-
None,
|
| 70 |
-
)
|
| 71 |
-
|
| 72 |
-
except Exception as e:
|
| 73 |
-
if "You are trying to access a gated repo." in str(e):
|
| 74 |
-
return True, "uses a gated model.", None
|
| 75 |
-
return False, f"was not found or misconfigured on the hub! Error raised was {e.args[0]}", None
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
def get_model_size(model_info: ModelInfo, precision: str):
|
| 79 |
-
size_pattern = re.compile(r"(\d+\.)?\d+(b|m)")
|
| 80 |
-
safetensors = None
|
| 81 |
-
try:
|
| 82 |
-
safetensors = get_safetensors_metadata(model_info.id)
|
| 83 |
-
except Exception as e:
|
| 84 |
-
print(e)
|
| 85 |
-
|
| 86 |
-
if safetensors is not None:
|
| 87 |
-
model_size = round(sum(safetensors.parameter_count.values()) / 1e9, 3)
|
| 88 |
-
else:
|
| 89 |
-
try:
|
| 90 |
-
size_match = re.search(size_pattern, model_info.id.lower())
|
| 91 |
-
model_size = size_match.group(0)
|
| 92 |
-
model_size = round(float(model_size[:-1]) if model_size[-1] == "b" else float(model_size[:-1]) / 1e3, 3)
|
| 93 |
-
except AttributeError:
|
| 94 |
-
return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
|
| 95 |
-
|
| 96 |
-
size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.id.lower()) else 1
|
| 97 |
-
model_size = size_factor * model_size
|
| 98 |
-
return model_size
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
def get_model_arch(model_info: ModelInfo):
|
| 102 |
-
return model_info.config.get("architectures", "Unknown")
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
def user_submission_permission(org_or_user, users_to_submission_dates, rate_limit_period, rate_limit_quota):
|
| 106 |
-
if org_or_user not in users_to_submission_dates:
|
| 107 |
-
return True, ""
|
| 108 |
-
submission_dates = sorted(users_to_submission_dates[org_or_user])
|
| 109 |
-
|
| 110 |
-
time_limit = (datetime.now(timezone.utc) - timedelta(days=rate_limit_period)).strftime("%Y-%m-%dT%H:%M:%SZ")
|
| 111 |
-
submissions_after_timelimit = [d for d in submission_dates if d > time_limit]
|
| 112 |
-
|
| 113 |
-
num_models_submitted_in_period = len(submissions_after_timelimit)
|
| 114 |
-
if org_or_user in HAS_HIGHER_RATE_LIMIT:
|
| 115 |
-
rate_limit_quota = 2 * rate_limit_quota
|
| 116 |
-
|
| 117 |
-
if num_models_submitted_in_period > rate_limit_quota:
|
| 118 |
-
error_msg = f"Organisation or user `{org_or_user}`"
|
| 119 |
-
error_msg += f"already has {num_models_submitted_in_period} model requests submitted to the leaderboard "
|
| 120 |
-
error_msg += f"in the last {rate_limit_period} days.\n"
|
| 121 |
-
error_msg += (
|
| 122 |
-
"Please wait a couple of days before resubmitting, so that everybody can enjoy using the leaderboard 🤗"
|
| 123 |
-
)
|
| 124 |
-
return False, error_msg
|
| 125 |
-
return True, ""
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
def already_submitted_models(requested_models_dir: str) -> set[str]:
|
| 129 |
-
depth = 1
|
| 130 |
-
file_names = []
|
| 131 |
-
users_to_submission_dates = defaultdict(list)
|
| 132 |
-
|
| 133 |
-
for root, _, files in os.walk(requested_models_dir):
|
| 134 |
-
current_depth = root.count(os.sep) - requested_models_dir.count(os.sep)
|
| 135 |
-
if current_depth == depth:
|
| 136 |
-
for file in files:
|
| 137 |
-
if not file.endswith(".json"):
|
| 138 |
-
continue
|
| 139 |
-
with open(os.path.join(root, file), "r") as f:
|
| 140 |
-
info = json.load(f)
|
| 141 |
-
file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}")
|
| 142 |
-
|
| 143 |
-
# Select organisation
|
| 144 |
-
if info["model"].count("/") == 0 or "submitted_time" not in info:
|
| 145 |
-
continue
|
| 146 |
-
organisation, _ = info["model"].split("/")
|
| 147 |
-
users_to_submission_dates[organisation].append(info["submitted_time"])
|
| 148 |
-
|
| 149 |
-
return set(file_names), users_to_submission_dates
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
def get_model_tags(model_card, model: str):
|
| 153 |
-
is_merge_from_metadata = False
|
| 154 |
-
is_moe_from_metadata = False
|
| 155 |
-
|
| 156 |
-
tags = []
|
| 157 |
-
if model_card is None:
|
| 158 |
-
return tags
|
| 159 |
-
if model_card.data.tags:
|
| 160 |
-
is_merge_from_metadata = any(
|
| 161 |
-
[tag in model_card.data.tags for tag in ["merge", "moerge", "mergekit", "lazymergekit"]]
|
| 162 |
-
)
|
| 163 |
-
is_moe_from_metadata = any([tag in model_card.data.tags for tag in ["moe", "moerge"]])
|
| 164 |
-
|
| 165 |
-
is_merge_from_model_card = any(
|
| 166 |
-
keyword in model_card.text.lower() for keyword in ["merged model", "merge model", "moerge"]
|
| 167 |
-
)
|
| 168 |
-
if is_merge_from_model_card or is_merge_from_metadata:
|
| 169 |
-
tags.append("merge")
|
| 170 |
-
is_moe_from_model_card = any(keyword in model_card.text.lower() for keyword in ["moe", "mixtral"])
|
| 171 |
-
# Hardcoding because of gating problem
|
| 172 |
-
if "Qwen/Qwen1.5-32B" in model:
|
| 173 |
-
is_moe_from_model_card = False
|
| 174 |
-
is_moe_from_name = "moe" in model.lower().replace("/", "-").replace("_", "-").split("-")
|
| 175 |
-
if is_moe_from_model_card or is_moe_from_name or is_moe_from_metadata:
|
| 176 |
-
tags.append("moe")
|
| 177 |
-
|
| 178 |
-
return tags
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/submission/submit.py
DELETED
|
@@ -1,171 +0,0 @@
|
|
| 1 |
-
from src.display.formatting import styled_message
|
| 2 |
-
# from src.leaderboard.filter_models import DO_NOT_SUBMIT_MODELS
|
| 3 |
-
# from src.submission.check_validity import (
|
| 4 |
-
# already_submitted_models,
|
| 5 |
-
# check_model_card,
|
| 6 |
-
# get_model_size,
|
| 7 |
-
# get_model_tags,
|
| 8 |
-
# is_model_on_hub,
|
| 9 |
-
# user_submission_permission,
|
| 10 |
-
# )
|
| 11 |
-
|
| 12 |
-
REQUESTED_MODELS = None
|
| 13 |
-
USERS_TO_SUBMISSION_DATES = None
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
def add_new_eval(
|
| 17 |
-
model: str,
|
| 18 |
-
):
|
| 19 |
-
# global REQUESTED_MODELS
|
| 20 |
-
# global USERS_TO_SUBMISSION_DATES
|
| 21 |
-
# if not REQUESTED_MODELS:
|
| 22 |
-
# REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
|
| 23 |
-
|
| 24 |
-
# user_name = ""
|
| 25 |
-
# model_path = model
|
| 26 |
-
# if "/" in model:
|
| 27 |
-
# user_name = model.split("/")[0]
|
| 28 |
-
# model_path = model.split("/")[1]
|
| 29 |
-
|
| 30 |
-
# # precision = precision.split(" ")[0]
|
| 31 |
-
# current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
| 32 |
-
|
| 33 |
-
# if model_type is None or model_type == "":
|
| 34 |
-
# return styled_error("Please select a model type.")
|
| 35 |
-
|
| 36 |
-
# # Is the user rate limited?
|
| 37 |
-
# if user_name != "":
|
| 38 |
-
# user_can_submit, error_msg = user_submission_permission(
|
| 39 |
-
# user_name, USERS_TO_SUBMISSION_DATES, RATE_LIMIT_PERIOD, RATE_LIMIT_QUOTA
|
| 40 |
-
# )
|
| 41 |
-
# if not user_can_submit:
|
| 42 |
-
# return styled_error(error_msg)
|
| 43 |
-
|
| 44 |
-
# Did the model authors forbid its submission to the leaderboard?
|
| 45 |
-
# if model in DO_NOT_SUBMIT_MODELS or base_model in DO_NOT_SUBMIT_MODELS:
|
| 46 |
-
# return styled_warning("Model authors have requested that their model be not submitted on the leaderboard.")
|
| 47 |
-
|
| 48 |
-
# if model == "CohereForAI/c4ai-command-r-plus":
|
| 49 |
-
# return styled_warning(
|
| 50 |
-
# "This model cannot be submitted manually on the leaderboard before the transformers release."
|
| 51 |
-
# )
|
| 52 |
-
|
| 53 |
-
# # Does the model actually exist?
|
| 54 |
-
# if revision == "":
|
| 55 |
-
# revision = "main"
|
| 56 |
-
|
| 57 |
-
# # Is the model on the hub?
|
| 58 |
-
# if weight_type in ["Delta", "Adapter"]:
|
| 59 |
-
# base_model_on_hub, error, _ = is_model_on_hub(
|
| 60 |
-
# model_name=base_model, revision=revision, token=H4_TOKEN, test_tokenizer=True
|
| 61 |
-
# )
|
| 62 |
-
# if not base_model_on_hub:
|
| 63 |
-
# return styled_error(f'Base model "{base_model}" {error}')
|
| 64 |
-
|
| 65 |
-
# architecture = "?"
|
| 66 |
-
# downloads = 0
|
| 67 |
-
# created_at = ""
|
| 68 |
-
# if not weight_type == "Adapter":
|
| 69 |
-
# model_on_hub, error, model_config = is_model_on_hub(model_name=model, revision=revision, test_tokenizer=True)
|
| 70 |
-
# if not model_on_hub or model_config is None:
|
| 71 |
-
# return styled_error(f'Model "{model}" {error}')
|
| 72 |
-
# if model_config is not None:
|
| 73 |
-
# architectures = getattr(model_config, "architectures", None)
|
| 74 |
-
# if architectures:
|
| 75 |
-
# architecture = ";".join(architectures)
|
| 76 |
-
# downloads = getattr(model_config, "downloads", 0)
|
| 77 |
-
# created_at = getattr(model_config, "created_at", "")
|
| 78 |
-
|
| 79 |
-
# Is the model info correctly filled?
|
| 80 |
-
# try:
|
| 81 |
-
# model_info = API.model_info(repo_id=model, revision=revision)
|
| 82 |
-
# except Exception:
|
| 83 |
-
# return styled_error("Could not get your model information. Please fill it up properly.")
|
| 84 |
-
|
| 85 |
-
# model_size = get_model_size(model_info=model_info, precision=precision)
|
| 86 |
-
|
| 87 |
-
# Were the model card and license filled?
|
| 88 |
-
# try:
|
| 89 |
-
# license = model_info.cardData["license"]
|
| 90 |
-
# except Exception:
|
| 91 |
-
# return styled_error("Please select a license for your model")
|
| 92 |
-
|
| 93 |
-
# modelcard_OK, error_msg, model_card = check_model_card(model)
|
| 94 |
-
# if not modelcard_OK:
|
| 95 |
-
# return styled_error(error_msg)
|
| 96 |
-
|
| 97 |
-
# tags = get_model_tags(model_card, model)
|
| 98 |
-
|
| 99 |
-
# # Seems good, creating the eval
|
| 100 |
-
# print("Adding new eval")
|
| 101 |
-
|
| 102 |
-
# eval_entry = {
|
| 103 |
-
# "model": model,
|
| 104 |
-
# # "base_model": base_model,
|
| 105 |
-
# # "revision": model_info.sha, # force to use the exact model commit
|
| 106 |
-
# # "private": private,
|
| 107 |
-
# # "precision": precision,
|
| 108 |
-
# # "params": model_size,
|
| 109 |
-
# # "architectures": architecture,
|
| 110 |
-
# # "weight_type": weight_type,
|
| 111 |
-
# "status": "PENDING",
|
| 112 |
-
# # "submitted_time": current_time,
|
| 113 |
-
# # "model_type": model_type,
|
| 114 |
-
# "job_id": -1,
|
| 115 |
-
# "job_start_time": None,
|
| 116 |
-
# }
|
| 117 |
-
|
| 118 |
-
# supplementary_info = {
|
| 119 |
-
# "likes": model_info.likes,
|
| 120 |
-
# "license": license,
|
| 121 |
-
# "still_on_hub": True,
|
| 122 |
-
# "tags": tags,
|
| 123 |
-
# "downloads": downloads,
|
| 124 |
-
# "created_at": created_at,
|
| 125 |
-
# }
|
| 126 |
-
|
| 127 |
-
# # Check for duplicate submission
|
| 128 |
-
# if f"{model}_{revision}_{precision}" in REQUESTED_MODELS:
|
| 129 |
-
# return styled_warning("This model has been already submitted.")
|
| 130 |
-
|
| 131 |
-
# print("Creating eval file")
|
| 132 |
-
# OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
|
| 133 |
-
# os.makedirs(OUT_DIR, exist_ok=True)
|
| 134 |
-
# out_path = f"{OUT_DIR}/{model_path}_eval_request_{private}_{precision}_{weight_type}.json"
|
| 135 |
-
|
| 136 |
-
# with open(out_path, "w") as f:
|
| 137 |
-
# f.write(json.dumps(eval_entry))
|
| 138 |
-
|
| 139 |
-
# print("Uploading eval file")
|
| 140 |
-
# API.upload_file(
|
| 141 |
-
# path_or_fileobj=out_path,
|
| 142 |
-
# path_in_repo=out_path.split("eval-queue/")[1],
|
| 143 |
-
# repo_id=QUEUE_REPO,
|
| 144 |
-
# repo_type="dataset",
|
| 145 |
-
# commit_message=f"Add {model} to eval queue",
|
| 146 |
-
# )
|
| 147 |
-
|
| 148 |
-
# We want to grab the latest version of the submission file to not accidentally overwrite it
|
| 149 |
-
# snapshot_download(
|
| 150 |
-
# repo_id=DYNAMIC_INFO_REPO, local_dir=DYNAMIC_INFO_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30
|
| 151 |
-
# )
|
| 152 |
-
|
| 153 |
-
# with open(DYNAMIC_INFO_FILE_PATH) as f:
|
| 154 |
-
# all_supplementary_info = json.load(f)
|
| 155 |
-
|
| 156 |
-
# # all_supplementary_info[model] = supplementary_info
|
| 157 |
-
# with open(DYNAMIC_INFO_FILE_PATH, "w") as f:
|
| 158 |
-
# json.dump(all_supplementary_info, f, indent=2)
|
| 159 |
-
|
| 160 |
-
# API.upload_file(
|
| 161 |
-
# path_or_fileobj=DYNAMIC_INFO_FILE_PATH,
|
| 162 |
-
# path_in_repo=DYNAMIC_INFO_FILE_PATH.split("/")[-1],
|
| 163 |
-
# repo_id=DYNAMIC_INFO_REPO,
|
| 164 |
-
# repo_type="dataset",
|
| 165 |
-
# commit_message=f"Add {model} to dynamic info queue",
|
| 166 |
-
# )
|
| 167 |
-
|
| 168 |
-
# # Remove the local file
|
| 169 |
-
# os.remove(out_path)
|
| 170 |
-
|
| 171 |
-
return styled_message("Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/tools/collections.py
DELETED
|
@@ -1,76 +0,0 @@
|
|
| 1 |
-
import pandas as pd
|
| 2 |
-
from huggingface_hub import add_collection_item, delete_collection_item, get_collection, update_collection_item
|
| 3 |
-
from huggingface_hub.utils._errors import HfHubHTTPError
|
| 4 |
-
from pandas import DataFrame
|
| 5 |
-
|
| 6 |
-
from src.display.utils import AutoEvalColumn, ModelType
|
| 7 |
-
from src.envs import H4_TOKEN, PATH_TO_COLLECTION
|
| 8 |
-
|
| 9 |
-
# Specific intervals for the collections
|
| 10 |
-
intervals = {
|
| 11 |
-
"1B": pd.Interval(0, 1.5, closed="right"),
|
| 12 |
-
"3B": pd.Interval(2.5, 3.5, closed="neither"),
|
| 13 |
-
"7B": pd.Interval(6, 8, closed="neither"),
|
| 14 |
-
"13B": pd.Interval(10, 14, closed="neither"),
|
| 15 |
-
"30B": pd.Interval(25, 35, closed="neither"),
|
| 16 |
-
"65B": pd.Interval(60, 70, closed="neither"),
|
| 17 |
-
}
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
def _filter_by_type_and_size(df, model_type, size_interval):
|
| 21 |
-
"""Filter DataFrame by model type and parameter size interval."""
|
| 22 |
-
type_emoji = model_type.value.symbol[0]
|
| 23 |
-
filtered_df = df[df[AutoEvalColumn.model_type_symbol.name] == type_emoji]
|
| 24 |
-
params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors="coerce")
|
| 25 |
-
mask = params_column.apply(lambda x: x in size_interval)
|
| 26 |
-
return filtered_df.loc[mask]
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
def _add_models_to_collection(collection, models, model_type, size):
|
| 30 |
-
"""Add best models to the collection and update positions."""
|
| 31 |
-
cur_len_collection = len(collection.items)
|
| 32 |
-
for ix, model in enumerate(models, start=1):
|
| 33 |
-
try:
|
| 34 |
-
collection = add_collection_item(
|
| 35 |
-
PATH_TO_COLLECTION,
|
| 36 |
-
item_id=model,
|
| 37 |
-
item_type="model",
|
| 38 |
-
exists_ok=True,
|
| 39 |
-
note=f"Best {model_type.to_str(' ')} model of around {size} on the leaderboard today!",
|
| 40 |
-
token=H4_TOKEN,
|
| 41 |
-
)
|
| 42 |
-
# Ensure position is correct if item was added
|
| 43 |
-
if len(collection.items) > cur_len_collection:
|
| 44 |
-
item_object_id = collection.items[-1].item_object_id
|
| 45 |
-
update_collection_item(collection_slug=PATH_TO_COLLECTION, item_object_id=item_object_id, position=ix)
|
| 46 |
-
cur_len_collection = len(collection.items)
|
| 47 |
-
break # assuming we only add the top model
|
| 48 |
-
except HfHubHTTPError:
|
| 49 |
-
continue
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
def update_collections(df: DataFrame):
|
| 53 |
-
"""Update collections by filtering and adding the best models."""
|
| 54 |
-
collection = get_collection(collection_slug=PATH_TO_COLLECTION, token=H4_TOKEN)
|
| 55 |
-
cur_best_models = []
|
| 56 |
-
|
| 57 |
-
for model_type in ModelType:
|
| 58 |
-
if not model_type.value.name:
|
| 59 |
-
continue
|
| 60 |
-
for size, interval in intervals.items():
|
| 61 |
-
filtered_df = _filter_by_type_and_size(df, model_type, interval)
|
| 62 |
-
best_models = list(
|
| 63 |
-
filtered_df.sort_values(AutoEvalColumn.average.name, ascending=False)[AutoEvalColumn.fullname.name][:10]
|
| 64 |
-
)
|
| 65 |
-
print(model_type.value.symbol, size, best_models)
|
| 66 |
-
_add_models_to_collection(collection, best_models, model_type, size)
|
| 67 |
-
cur_best_models.extend(best_models)
|
| 68 |
-
|
| 69 |
-
# Cleanup
|
| 70 |
-
existing_models = {item.item_id for item in collection.items}
|
| 71 |
-
to_remove = existing_models - set(cur_best_models)
|
| 72 |
-
for item_id in to_remove:
|
| 73 |
-
try:
|
| 74 |
-
delete_collection_item(collection_slug=PATH_TO_COLLECTION, item_object_id=item_id, token=H4_TOKEN)
|
| 75 |
-
except HfHubHTTPError:
|
| 76 |
-
continue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/tools/model_backlinks.py
DELETED
|
@@ -1,1309 +0,0 @@
|
|
| 1 |
-
models = [
|
| 2 |
-
"uni-tianyan/Uni-TianYan",
|
| 3 |
-
"fangloveskari/ORCA_LLaMA_70B_QLoRA",
|
| 4 |
-
"garage-bAInd/Platypus2-70B-instruct",
|
| 5 |
-
"upstage/Llama-2-70b-instruct-v2",
|
| 6 |
-
"fangloveskari/Platypus_QLoRA_LLaMA_70b",
|
| 7 |
-
"yeontaek/llama-2-70B-ensemble-v5",
|
| 8 |
-
"TheBloke/Genz-70b-GPTQ",
|
| 9 |
-
"TheBloke/Platypus2-70B-Instruct-GPTQ",
|
| 10 |
-
"psmathur/model_007",
|
| 11 |
-
"yeontaek/llama-2-70B-ensemble-v4",
|
| 12 |
-
"psmathur/orca_mini_v3_70b",
|
| 13 |
-
"ehartford/Samantha-1.11-70b",
|
| 14 |
-
"MayaPH/GodziLLa2-70B",
|
| 15 |
-
"psmathur/model_007_v2",
|
| 16 |
-
"chargoddard/MelangeA-70b",
|
| 17 |
-
"ehartford/Samantha-1.1-70b",
|
| 18 |
-
"psmathur/model_009",
|
| 19 |
-
"upstage/Llama-2-70b-instruct",
|
| 20 |
-
"yeontaek/llama-2-70B-ensemble-v7",
|
| 21 |
-
"yeontaek/llama-2-70B-ensemble-v6",
|
| 22 |
-
"chargoddard/MelangeB-70b",
|
| 23 |
-
"yeontaek/llama-2-70B-ensemble-v3",
|
| 24 |
-
"chargoddard/MelangeC-70b",
|
| 25 |
-
"garage-bAInd/Camel-Platypus2-70B",
|
| 26 |
-
"yeontaek/llama-2-70B-ensemble-v2",
|
| 27 |
-
"garage-bAInd/Camel-Platypus2-70B",
|
| 28 |
-
"migtissera/Synthia-70B-v1.2",
|
| 29 |
-
"v2ray/LLaMA-2-Wizard-70B-QLoRA",
|
| 30 |
-
"quantumaikr/llama-2-70b-fb16-orca-chat-10k",
|
| 31 |
-
"v2ray/LLaMA-2-Wizard-70B-QLoRA",
|
| 32 |
-
"stabilityai/StableBeluga2",
|
| 33 |
-
"quantumaikr/llama-2-70b-fb16-guanaco-1k",
|
| 34 |
-
"garage-bAInd/Camel-Platypus2-70B",
|
| 35 |
-
"migtissera/Synthia-70B-v1.1",
|
| 36 |
-
"migtissera/Synthia-70B",
|
| 37 |
-
"psmathur/model_101",
|
| 38 |
-
"augtoma/qCammel70",
|
| 39 |
-
"augtoma/qCammel-70",
|
| 40 |
-
"augtoma/qCammel-70v1",
|
| 41 |
-
"augtoma/qCammel-70x",
|
| 42 |
-
"augtoma/qCammel-70-x",
|
| 43 |
-
"jondurbin/airoboros-l2-70b-gpt4-1.4.1",
|
| 44 |
-
"dfurman/llama-2-70b-dolphin-peft",
|
| 45 |
-
"jondurbin/airoboros-l2-70b-2.1",
|
| 46 |
-
"TheBloke/llama-2-70b-Guanaco-QLoRA-fp16",
|
| 47 |
-
"quantumaikr/QuantumLM-llama2-70B-Korean-LoRA",
|
| 48 |
-
"quantumaikr/quantumairk-llama-2-70B-instruct",
|
| 49 |
-
"psmathur/model_420",
|
| 50 |
-
"psmathur/model_51",
|
| 51 |
-
"garage-bAInd/Camel-Platypus2-70B",
|
| 52 |
-
"TheBloke/Airoboros-L2-70B-2.1-GPTQ",
|
| 53 |
-
"OpenAssistant/llama2-70b-oasst-sft-v10",
|
| 54 |
-
"garage-bAInd/Platypus2-70B",
|
| 55 |
-
"liuxiang886/llama2-70B-qlora-gpt4",
|
| 56 |
-
"upstage/llama-65b-instruct",
|
| 57 |
-
"quantumaikr/llama-2-70b-fb16-korean",
|
| 58 |
-
"NousResearch/Nous-Hermes-Llama2-70b",
|
| 59 |
-
"v2ray/LLaMA-2-Jannie-70B-QLoRA",
|
| 60 |
-
"jondurbin/airoboros-l2-70b-gpt4-m2.0",
|
| 61 |
-
"jondurbin/airoboros-l2-70b-gpt4-m2.0",
|
| 62 |
-
"OpenAssistant/llama2-70b-oasst-sft-v10",
|
| 63 |
-
"yeontaek/llama-2-70B-ensemble-v8",
|
| 64 |
-
"jondurbin/airoboros-l2-70b-gpt4-2.0",
|
| 65 |
-
"jarradh/llama2_70b_chat_uncensored",
|
| 66 |
-
"WizardLM/WizardMath-70B-V1.0",
|
| 67 |
-
"jordiclive/Llama-2-70b-oasst-1-200",
|
| 68 |
-
"WizardLM/WizardMath-70B-V1.0",
|
| 69 |
-
"jondurbin/airoboros-l2-70b-gpt4-2.0",
|
| 70 |
-
"OpenLemur/lemur-70b-chat-v1",
|
| 71 |
-
"tiiuae/falcon-180B",
|
| 72 |
-
"tiiuae/falcon-180B",
|
| 73 |
-
"stabilityai/StableBeluga1-Delta",
|
| 74 |
-
"psmathur/model_42_70b",
|
| 75 |
-
"psmathur/test_42_70b",
|
| 76 |
-
"TheBloke/fiction.live-Kimiko-V2-70B-fp16",
|
| 77 |
-
"tiiuae/falcon-180B",
|
| 78 |
-
"WizardLM/WizardMath-70B-V1.0",
|
| 79 |
-
"tiiuae/falcon-180B-chat",
|
| 80 |
-
"jondurbin/airoboros-l2-70b-gpt4-2.0",
|
| 81 |
-
"ehartford/samantha-1.1-llama-33b",
|
| 82 |
-
"ajibawa-2023/scarlett-33b",
|
| 83 |
-
"ddobokki/Llama-2-70b-orca-200k",
|
| 84 |
-
"TheBloke/gpt4-alpaca-lora_mlp-65B-HF",
|
| 85 |
-
"tiiuae/falcon-180B-chat",
|
| 86 |
-
"tiiuae/falcon-180B-chat",
|
| 87 |
-
"tiiuae/falcon-180B",
|
| 88 |
-
"TheBloke/Lemur-70B-Chat-v1-GPTQ",
|
| 89 |
-
"NousResearch/Nous-Puffin-70B",
|
| 90 |
-
"WizardLM/WizardLM-70B-V1.0",
|
| 91 |
-
"WizardLM/WizardMath-70B-V1.0",
|
| 92 |
-
"meta-llama/Llama-2-70b-hf",
|
| 93 |
-
"TheBloke/Llama-2-70B-fp16",
|
| 94 |
-
"Weyaxi/llama-2-alpacagpt4-1000step",
|
| 95 |
-
"WizardLM/WizardLM-70B-V1.0",
|
| 96 |
-
"simsim314/WizardLM-70B-V1.0-HF",
|
| 97 |
-
"simsim314/WizardLM-70B-V1.0-HF",
|
| 98 |
-
"WizardLM/WizardLM-70B-V1.0",
|
| 99 |
-
"openbmb/UltraLM-65b",
|
| 100 |
-
"psmathur/model_420_preview",
|
| 101 |
-
"WizardLM/WizardLM-70B-V1.0",
|
| 102 |
-
"simsim314/WizardLM-70B-V1.0-HF",
|
| 103 |
-
"OpenBuddy/openbuddy-llama2-70b-v10.1-bf16",
|
| 104 |
-
"upstage/llama-30b-instruct-2048",
|
| 105 |
-
"jondurbin/airoboros-65b-gpt4-1.2",
|
| 106 |
-
"TheBloke/guanaco-65B-HF",
|
| 107 |
-
"jondurbin/airoboros-65b-gpt4-1.3",
|
| 108 |
-
"meta-llama/Llama-2-70b-chat-hf",
|
| 109 |
-
"ValiantLabs/ShiningValiant",
|
| 110 |
-
"Faradaylab/Aria-70B",
|
| 111 |
-
"lilloukas/GPlatty-30B",
|
| 112 |
-
"TheBloke/VicUnlocked-alpaca-65B-QLoRA-fp16",
|
| 113 |
-
"jondurbin/airoboros-65b-gpt4-1.4-peft",
|
| 114 |
-
"jondurbin/airoboros-65b-gpt4-1.4",
|
| 115 |
-
"jondurbin/airoboros-65b-gpt4-2.0",
|
| 116 |
-
"TheBloke/WizardLM-70B-V1.0-GPTQ",
|
| 117 |
-
"TheBloke/WizardLM-70B-V1.0-GPTQ",
|
| 118 |
-
"ariellee/SuperPlatty-30B",
|
| 119 |
-
"jondurbin/airoboros-65b-gpt4-1.4",
|
| 120 |
-
"jondurbin/airoboros-65b-gpt4-2.0",
|
| 121 |
-
"yeontaek/llama-2-70b-IA3-guanaco",
|
| 122 |
-
"CalderaAI/30B-Lazarus",
|
| 123 |
-
"Aspik101/trurl-2-13b-pl-instruct_unload",
|
| 124 |
-
"ehartford/WizardLM-33B-V1.0-Uncensored",
|
| 125 |
-
"ehartford/WizardLM-33B-V1.0-Uncensored",
|
| 126 |
-
"OpenBuddy/openbuddy-llama-65b-v8-bf16",
|
| 127 |
-
"Aspik101/llama-30b-instruct-2048-PL-lora",
|
| 128 |
-
"h2oai/h2ogpt-research-oasst1-llama-65b",
|
| 129 |
-
"Aspik101/llama-30b-instruct-2048-PL-lora",
|
| 130 |
-
"CalderaAI/30B-Epsilon",
|
| 131 |
-
"Aspik101/llama-30b-2048-instruct-PL-lora_unload",
|
| 132 |
-
"jondurbin/airoboros-65b-gpt4-m2.0",
|
| 133 |
-
"jondurbin/airoboros-65b-gpt4-m2.0",
|
| 134 |
-
"Aeala/Alpaca-elina-65b",
|
| 135 |
-
"TheBloke/robin-65b-v2-fp16",
|
| 136 |
-
"TheBloke/gpt4-alpaca-lora-30b-HF",
|
| 137 |
-
"TheBloke/Llama-2-70B-chat-GPTQ",
|
| 138 |
-
"upstage/llama-30b-instruct",
|
| 139 |
-
"OpenLemur/lemur-70b-v1",
|
| 140 |
-
"lmsys/vicuna-33b-v1.3",
|
| 141 |
-
"ausboss/llama-30b-supercot",
|
| 142 |
-
"ai-business/Luban-13B",
|
| 143 |
-
"Henk717/airochronos-33B",
|
| 144 |
-
"lmsys/vicuna-33b-v1.3",
|
| 145 |
-
"Henk717/airochronos-33B",
|
| 146 |
-
"bavest/fin-llama-33b-merged",
|
| 147 |
-
"jondurbin/airoboros-33b-gpt4-1.4",
|
| 148 |
-
"YeungNLP/firefly-llama-30b",
|
| 149 |
-
"Aspik101/30B-Lazarus-instruct-PL-lora_unload",
|
| 150 |
-
"uukuguy/speechless-llama2-luban-orca-platypus-13b",
|
| 151 |
-
"xxyyy123/test_merge_p_ov1_w0.66_w0.5_n1",
|
| 152 |
-
"jondurbin/airoboros-33b-gpt4-1.2",
|
| 153 |
-
"TheBloke/alpaca-lora-65B-HF",
|
| 154 |
-
"bofenghuang/vigogne-33b-instruct",
|
| 155 |
-
"yeontaek/llama-2-13B-ensemble-v5",
|
| 156 |
-
"garage-bAInd/Platypus-30B",
|
| 157 |
-
"Open-Orca/OpenOrca-Platypus2-13B",
|
| 158 |
-
"kajdun/viwaai-30b_v4",
|
| 159 |
-
"lilloukas/Platypus-30B",
|
| 160 |
-
"Open-Orca/OpenOrca-Platypus2-13B",
|
| 161 |
-
"Henk717/chronoboros-33B",
|
| 162 |
-
"jondurbin/airoboros-33b-2.1",
|
| 163 |
-
"HiTZ/alpaca-lora-65b-en-pt-es-ca",
|
| 164 |
-
"quantumaikr/QuantumLM-70B-hf",
|
| 165 |
-
"uukuguy/speechless-llama2-13b",
|
| 166 |
-
"uukuguy/speechless-llama2-hermes-orca-platypus-13b",
|
| 167 |
-
"openaccess-ai-collective/manticore-30b-chat-pyg-alpha",
|
| 168 |
-
"LLMs/WizardLM-30B-V1.0",
|
| 169 |
-
"TheBloke/WizardLM-30B-fp16",
|
| 170 |
-
"openaccess-ai-collective/hippogriff-30b-chat",
|
| 171 |
-
"concedo/Vicuzard-30B-Uncensored",
|
| 172 |
-
"TFLai/OpenOrca-Platypus2-13B-QLoRA-0.80-epoch",
|
| 173 |
-
"huggingface/llama-65b",
|
| 174 |
-
"huggyllama/llama-65b",
|
| 175 |
-
"gaodrew/gaodrew-llama-30b-instruct-2048-Open-Platypus-100steps",
|
| 176 |
-
"uukuguy/speechless-llama2-hermes-orca-platypus-wizardlm-13b",
|
| 177 |
-
"Sao10K/Mythical-Destroyer-V2-L2-13B",
|
| 178 |
-
"camel-ai/CAMEL-33B-Combined-Data",
|
| 179 |
-
"dsvv-cair/alpaca-cleaned-llama-30b-bf16",
|
| 180 |
-
"MetaIX/GPT4-X-Alpasta-30b",
|
| 181 |
-
"garage-bAInd/Stable-Platypus2-13B",
|
| 182 |
-
"TFLai/Luban-Platypus2-13B-QLora-0.80-epoch",
|
| 183 |
-
"TheBloke/OpenOrca-Platypus2-13B-GPTQ",
|
| 184 |
-
"IkariDev/Athena-tmp",
|
| 185 |
-
"OpenBuddyEA/openbuddy-llama-30b-v7.1-bf16",
|
| 186 |
-
"OpenBuddyEA/openbuddy-llama-30b-v7.1-bf16",
|
| 187 |
-
"Open-Orca/OpenOrcaxOpenChat-Preview2-13B",
|
| 188 |
-
"psmathur/model_007_13b_v2",
|
| 189 |
-
"Aspik101/Vicuzard-30B-Uncensored-instruct-PL-lora_unload",
|
| 190 |
-
"jondurbin/airoboros-33b-gpt4-m2.0",
|
| 191 |
-
"Sao10K/Mythical-Destroyer-L2-13B",
|
| 192 |
-
"TheBloke/Wizard-Vicuna-30B-Uncensored-fp16",
|
| 193 |
-
"ehartford/Wizard-Vicuna-30B-Uncensored",
|
| 194 |
-
"TFLai/Nova-13B",
|
| 195 |
-
"TheBloke/robin-33B-v2-fp16",
|
| 196 |
-
"totally-not-an-llm/PuddleJumper-13b",
|
| 197 |
-
"Aeala/VicUnlocked-alpaca-30b",
|
| 198 |
-
"Yhyu13/oasst-rlhf-2-llama-30b-7k-steps-hf",
|
| 199 |
-
"jondurbin/airoboros-33b-gpt4",
|
| 200 |
-
"jondurbin/airoboros-33b-gpt4-m2.0",
|
| 201 |
-
"tiiuae/falcon-40b-instruct",
|
| 202 |
-
"psmathur/orca_mini_v3_13b",
|
| 203 |
-
"Aeala/GPT4-x-AlpacaDente-30b",
|
| 204 |
-
"MayaPH/GodziLLa-30B",
|
| 205 |
-
"jondurbin/airoboros-33b-gpt4-m2.0",
|
| 206 |
-
"TFLai/SpeechlessV1-Nova-13B",
|
| 207 |
-
"yeontaek/llama-2-13B-ensemble-v4",
|
| 208 |
-
"ajibawa-2023/carl-33b",
|
| 209 |
-
"jondurbin/airoboros-33b-gpt4-2.0",
|
| 210 |
-
"TFLai/Stable-Platypus2-13B-QLoRA-0.80-epoch",
|
| 211 |
-
"jondurbin/airoboros-33b-gpt4-1.3",
|
| 212 |
-
"TehVenom/oasst-sft-6-llama-33b-xor-MERGED-16bit",
|
| 213 |
-
"TFLai/OrcaMini-Platypus2-13B-QLoRA-0.80-epoch",
|
| 214 |
-
"jondurbin/airoboros-33b-gpt4-2.0",
|
| 215 |
-
"chargoddard/Chronorctypus-Limarobormes-13b",
|
| 216 |
-
"jondurbin/airoboros-33b-gpt4-1.3",
|
| 217 |
-
"Open-Orca/OpenOrca-Platypus2-13B",
|
| 218 |
-
"FelixChao/vicuna-33b-coder",
|
| 219 |
-
"FelixChao/vicuna-33b-coder",
|
| 220 |
-
"Gryphe/MythoMix-L2-13b",
|
| 221 |
-
"Aeala/Enterredaas-33b",
|
| 222 |
-
"yeontaek/llama-2-13B-ensemble-v1",
|
| 223 |
-
"TFLai/OpenOrcaPlatypus2-Platypus2-13B-QLora-0.80-epoch",
|
| 224 |
-
"TFLai/Ensemble5-Platypus2-13B-QLora-0.80-epoch",
|
| 225 |
-
"yeontaek/llama-2-13B-ensemble-v3",
|
| 226 |
-
"TFLai/MythoMix-Platypus2-13B-QLoRA-0.80-epoch",
|
| 227 |
-
"yihan6324/llama2-13b-instructmining-40k-sharegpt",
|
| 228 |
-
"timdettmers/guanaco-33b-merged",
|
| 229 |
-
"TFLai/EnsembleV5-Nova-13B",
|
| 230 |
-
"circulus/Llama-2-13b-orca-v1",
|
| 231 |
-
"Undi95/ReMM-SLERP-L2-13B",
|
| 232 |
-
"Gryphe/MythoMax-L2-13b",
|
| 233 |
-
"stabilityai/StableBeluga-13B",
|
| 234 |
-
"circulus/Llama-2-13b-orca-v1",
|
| 235 |
-
"ehartford/WizardLM-30B-Uncensored",
|
| 236 |
-
"The-Face-Of-Goonery/huginnv1.2",
|
| 237 |
-
"TheBloke/OpenOrcaxOpenChat-Preview2-13B-GPTQ",
|
| 238 |
-
"Sao10K/Stheno-L2-13B",
|
| 239 |
-
"bofenghuang/vigogne-2-13b-instruct",
|
| 240 |
-
"The-Face-Of-Goonery/Huginn-13b-FP16",
|
| 241 |
-
"grimpep/L2-MythoMax22b-instruct-Falseblock",
|
| 242 |
-
"TFLai/Nous-Hermes-Platypus2-13B-QLoRA-0.80-epoch",
|
| 243 |
-
"yeontaek/Platypus2xOpenOrca-13B-IA3-v4",
|
| 244 |
-
"yeontaek/Platypus2xOpenOrca-13B-IA3",
|
| 245 |
-
"yeontaek/Platypus2xOpenOrca-13B-IA3-ensemble",
|
| 246 |
-
"Open-Orca/LlongOrca-13B-16k",
|
| 247 |
-
"Sao10K/Stheno-Inverted-L2-13B",
|
| 248 |
-
"garage-bAInd/Camel-Platypus2-13B",
|
| 249 |
-
"digitous/Alpacino30b",
|
| 250 |
-
"NousResearch/Nous-Hermes-Llama2-13b",
|
| 251 |
-
"yeontaek/Platypus2xOpenOrca-13B-IA3-v3",
|
| 252 |
-
"TFLai/MythicalDestroyerV2-Platypus2-13B-QLora-0.80-epoch",
|
| 253 |
-
"TheBloke/VicUnlocked-30B-LoRA-HF",
|
| 254 |
-
"Undi95/Nous-Hermes-13B-Code",
|
| 255 |
-
"The-Face-Of-Goonery/Chronos-Beluga-v2-13bfp16",
|
| 256 |
-
"NousResearch/Nous-Hermes-Llama2-13b",
|
| 257 |
-
"Monero/WizardLM-Uncensored-SuperCOT-StoryTelling-30b",
|
| 258 |
-
"TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ",
|
| 259 |
-
"Open-Orca/OpenOrcaxOpenChat-Preview2-13B",
|
| 260 |
-
"Austism/chronos-hermes-13b-v2",
|
| 261 |
-
"yeontaek/Platypus2xOpenOrca-13B-IA3-v2.1",
|
| 262 |
-
"yeontaek/Platypus2xOpenOrca-13B-IA3-v2",
|
| 263 |
-
"Gryphe/MythoLogic-L2-13b",
|
| 264 |
-
"augtoma/qCammel-13",
|
| 265 |
-
"YeungNLP/firefly-llama2-13b-v1.2",
|
| 266 |
-
"Aspik101/StableBeluga-13B-instruct-PL-lora_unload",
|
| 267 |
-
"andreaskoepf/llama2-13b-megacode2_min100",
|
| 268 |
-
"rombodawg/LosslessMegaCoder-llama2-13b-mini",
|
| 269 |
-
"yulan-team/YuLan-Chat-2-13b-fp16",
|
| 270 |
-
"elinas/chronos-33b",
|
| 271 |
-
"YeungNLP/firefly-llama2-13b",
|
| 272 |
-
"Sao10K/Medusa-13b",
|
| 273 |
-
"OptimalScale/robin-65b-v2-delta",
|
| 274 |
-
"minlik/chinese-alpaca-33b-merged",
|
| 275 |
-
"OpenAssistant/llama2-13b-megacode2-oasst",
|
| 276 |
-
"TheBloke/OpenAssistant-SFT-7-Llama-30B-HF",
|
| 277 |
-
"Undi95/UndiMix-v1-13b",
|
| 278 |
-
"ehartford/Samantha-1.11-13b",
|
| 279 |
-
"beaugogh/Llama2-13b-sharegpt4",
|
| 280 |
-
"Aeala/GPT4-x-AlpacaDente2-30b",
|
| 281 |
-
"luffycodes/nash-vicuna-13b-v1dot5-ep2-w-rag-w-simple",
|
| 282 |
-
"WizardLM/WizardLM-13B-V1.1",
|
| 283 |
-
"uukuguy/speechless-orca-platypus-coig-lite-2k-0.6e-13b",
|
| 284 |
-
"huggyllama/llama-30b",
|
| 285 |
-
"Undi95/ReMM-L2-13B-PIPPA",
|
| 286 |
-
"Undi95/ReMM-L2-13B",
|
| 287 |
-
"gaodrew/gaodrew-gorgonzola-13b",
|
| 288 |
-
"lmsys/vicuna-13b-v1.5",
|
| 289 |
-
"yeontaek/Platypus2xOpenOrca-13B-LoRa",
|
| 290 |
-
"Yhyu13/llama-30B-hf-openassitant",
|
| 291 |
-
"huggingface/llama-30b",
|
| 292 |
-
"lmsys/vicuna-13b-v1.5",
|
| 293 |
-
"TFLai/Athena-Platypus2-13B-QLora-0.80-epoch",
|
| 294 |
-
"TheBloke/dromedary-65b-lora-HF",
|
| 295 |
-
"yeontaek/llama-2-13b-Beluga-QLoRA",
|
| 296 |
-
"The-Face-Of-Goonery/Huginn-13b-V4",
|
| 297 |
-
"The-Face-Of-Goonery/Huginn-13b-v4.5",
|
| 298 |
-
"The-Face-Of-Goonery/Huginn-v3-13b",
|
| 299 |
-
"tiiuae/falcon-40b",
|
| 300 |
-
"WhoTookMyAmogusNickname/NewHope_HF_not_official",
|
| 301 |
-
"gaodrew/OpenOrca-Platypus2-13B-thera-1250",
|
| 302 |
-
"SLAM-group/NewHope",
|
| 303 |
-
"garage-bAInd/Platypus2-13B",
|
| 304 |
-
"migtissera/Synthia-13B",
|
| 305 |
-
"elinas/chronos-13b-v2",
|
| 306 |
-
"mosaicml/mpt-30b-chat",
|
| 307 |
-
"CHIH-HUNG/llama-2-13b-OpenOrca_5w",
|
| 308 |
-
"uukuguy/speechless-hermes-coig-lite-13b",
|
| 309 |
-
"TheBloke/tulu-30B-fp16",
|
| 310 |
-
"uukuguy/speechless-hermes-coig-lite-13b",
|
| 311 |
-
"xDAN-AI/xDAN_13b_l2_lora",
|
| 312 |
-
"lmsys/vicuna-13b-v1.5-16k",
|
| 313 |
-
"openchat/openchat_v3.1",
|
| 314 |
-
"CHIH-HUNG/llama-2-13b-dolphin_5w",
|
| 315 |
-
"Aspik101/vicuna-13b-v1.5-PL-lora_unload",
|
| 316 |
-
"Undi95/MLewd-L2-13B",
|
| 317 |
-
"ehartford/minotaur-llama2-13b-qlora",
|
| 318 |
-
"kajdun/iubaris-13b-v3",
|
| 319 |
-
"TFLai/Limarp-Platypus2-13B-QLoRA-0.80-epoch",
|
| 320 |
-
"openchat/openchat_v3.1",
|
| 321 |
-
"uukuguy/speechless-orca-platypus-coig-lite-4k-0.6e-13b",
|
| 322 |
-
"ziqingyang/chinese-alpaca-2-13b",
|
| 323 |
-
"TFLai/Airboros2.1-Platypus2-13B-QLora-0.80-epoch",
|
| 324 |
-
"yeontaek/llama-2-13b-Guanaco-QLoRA",
|
| 325 |
-
"lmsys/vicuna-13b-v1.5-16k",
|
| 326 |
-
"ehartford/based-30b",
|
| 327 |
-
"kingbri/airolima-chronos-grad-l2-13B",
|
| 328 |
-
"openchat/openchat_v3.2",
|
| 329 |
-
"uukuguy/speechless-orca-platypus-coig-lite-4k-0.5e-13b",
|
| 330 |
-
"yeontaek/Platypus2-13B-LoRa",
|
| 331 |
-
"kingbri/chronolima-airo-grad-l2-13B",
|
| 332 |
-
"openchat/openchat_v3.2",
|
| 333 |
-
"TFLai/PuddleJumper-Platypus2-13B-QLoRA-0.80-epoch",
|
| 334 |
-
"shareAI/llama2-13b-Chinese-chat",
|
| 335 |
-
"ehartford/WizardLM-1.0-Uncensored-Llama2-13b",
|
| 336 |
-
"Aspik101/Redmond-Puffin-13B-instruct-PL-lora_unload",
|
| 337 |
-
"yeontaek/llama-2-13B-ensemble-v6",
|
| 338 |
-
"WizardLM/WizardLM-13B-V1.2",
|
| 339 |
-
"TheBloke/WizardLM-13B-V1.1-GPTQ",
|
| 340 |
-
"bhenrym14/airophin-13b-pntk-16k-fp16",
|
| 341 |
-
"ehartford/WizardLM-1.0-Uncensored-Llama2-13b",
|
| 342 |
-
"Mikael110/llama-2-13b-guanaco-fp16",
|
| 343 |
-
"yeontaek/airoboros-2.1-llama-2-13B-QLoRa",
|
| 344 |
-
"CalderaAI/13B-Legerdemain-L2",
|
| 345 |
-
"grimpep/llama2-22b-wizard_vicuna",
|
| 346 |
-
"grimpep/llama2-22B-GPLATTY",
|
| 347 |
-
"bhenrym14/airophin-13b-pntk-16k-fp16",
|
| 348 |
-
"yeontaek/llama-2-13b-QLoRA",
|
| 349 |
-
"OpenAssistant/llama2-13b-orca-8k-3319",
|
| 350 |
-
"TheBloke/WizardLM-13B-V1-1-SuperHOT-8K-fp16",
|
| 351 |
-
"duliadotio/dulia-13b-8k-alpha",
|
| 352 |
-
"Undi95/LewdEngine",
|
| 353 |
-
"OpenBuddy/openbuddy-llama2-13b-v8.1-fp16",
|
| 354 |
-
"CHIH-HUNG/llama-2-13b-open_orca_20w",
|
| 355 |
-
"bhenrym14/airoboros-33b-gpt4-1.4.1-lxctx-PI-16384-fp16",
|
| 356 |
-
"FlagAlpha/Llama2-Chinese-13b-Chat",
|
| 357 |
-
"LLMs/WizardLM-13B-V1.0",
|
| 358 |
-
"chansung/gpt4-alpaca-lora-13b-decapoda-1024",
|
| 359 |
-
"TheBloke/wizardLM-13B-1.0-fp16",
|
| 360 |
-
"digitous/13B-Chimera",
|
| 361 |
-
"yeontaek/Platypus2xOpenOrcaxGuanaco-13B-LoRa",
|
| 362 |
-
"jondurbin/airoboros-l2-13b-2.1",
|
| 363 |
-
"Monero/WizardLM-30B-Uncensored-Guanaco-SuperCOT-30b",
|
| 364 |
-
"TheBloke/UltraLM-13B-fp16",
|
| 365 |
-
"openaccess-ai-collective/minotaur-13b-fixed",
|
| 366 |
-
"NousResearch/Redmond-Puffin-13B",
|
| 367 |
-
"KoboldAI/LLaMA2-13B-Holomax",
|
| 368 |
-
"Lajonbot/WizardLM-13B-V1.2-PL-lora_unload",
|
| 369 |
-
"yeontaek/Platypus2-13B-LoRa-v2",
|
| 370 |
-
"TheBloke/airoboros-13B-HF",
|
| 371 |
-
"jondurbin/airoboros-13b",
|
| 372 |
-
"jjaaaww/posi_13b",
|
| 373 |
-
"CoolWP/llama-2-13b-guanaco-fp16",
|
| 374 |
-
"yeontaek/Platypus2-13B-QLoRa",
|
| 375 |
-
"h2oai/h2ogpt-research-oig-oasst1-512-30b",
|
| 376 |
-
"dfurman/llama-2-13b-guanaco-peft",
|
| 377 |
-
"NousResearch/Redmond-Puffin-13B",
|
| 378 |
-
"pe-nlp/llama-2-13b-platypus-vicuna-wizard",
|
| 379 |
-
"CHIH-HUNG/llama-2-13b-dolphin_20w",
|
| 380 |
-
"NousResearch/Nous-Hermes-13b",
|
| 381 |
-
"NobodyExistsOnTheInternet/GiftedConvo13bLoraNoEconsE4",
|
| 382 |
-
"ehartford/Wizard-Vicuna-13B-Uncensored",
|
| 383 |
-
"TheBloke/Wizard-Vicuna-13B-Uncensored-HF",
|
| 384 |
-
"openchat/openchat_v3.2_super",
|
| 385 |
-
"bhenrym14/airophin-v2-13b-PI-8k-fp16",
|
| 386 |
-
"openaccess-ai-collective/manticore-13b",
|
| 387 |
-
"The-Face-Of-Goonery/Huginn-22b-Prototype",
|
| 388 |
-
"jphme/Llama-2-13b-chat-german",
|
| 389 |
-
"grimpep/llama2-28B-Airo03",
|
| 390 |
-
"TheBloke/Kimiko-v2-13B-fp16",
|
| 391 |
-
"FPHam/Free_Sydney_13b_HF",
|
| 392 |
-
"lmsys/vicuna-13b-v1.3",
|
| 393 |
-
"FelixChao/llama2-13b-math1.1",
|
| 394 |
-
"CalderaAI/13B-BlueMethod",
|
| 395 |
-
"meta-llama/Llama-2-13b-chat-hf",
|
| 396 |
-
"deepse/CodeUp-Llama-2-13b-chat-hf",
|
| 397 |
-
"WizardLM/WizardMath-13B-V1.0",
|
| 398 |
-
"WizardLM/WizardMath-13B-V1.0",
|
| 399 |
-
"HyperbeeAI/Tulpar-7b-v0",
|
| 400 |
-
"xxyyy123/test_qkvo_adptor",
|
| 401 |
-
"xxyyy123/mc_data_30k_from_platpus_orca_7b_10k_v1_lora_qkvo_rank14_v2",
|
| 402 |
-
"openchat/openchat_v2_w",
|
| 403 |
-
"FelixChao/llama2-13b-math1.1",
|
| 404 |
-
"psmathur/orca_mini_v3_7b",
|
| 405 |
-
"TehVenom/Metharme-13b-Merged",
|
| 406 |
-
"xxyyy123/10k_v1_lora_qkvo_rank14_v3",
|
| 407 |
-
"OpenAssistant/llama2-13b-orca-v2-8k-3166",
|
| 408 |
-
"openaccess-ai-collective/wizard-mega-13b",
|
| 409 |
-
"jondurbin/airoboros-13b-gpt4-1.4",
|
| 410 |
-
"jondurbin/airoboros-13b-gpt4-1.4-fp16",
|
| 411 |
-
"Monero/Manticore-13b-Chat-Pyg-Guanaco",
|
| 412 |
-
"FelixChao/llama2-13b-math1.2",
|
| 413 |
-
"chargoddard/platypus-2-22b-relora",
|
| 414 |
-
"FelixChao/llama2-13b-math1.2",
|
| 415 |
-
"Gryphe/MythoBoros-13b",
|
| 416 |
-
"CalderaAI/13B-Ouroboros",
|
| 417 |
-
"OpenAssistant/llama2-13b-orca-v2-8k-3166",
|
| 418 |
-
"heegyu/LIMA2-13b-hf",
|
| 419 |
-
"digitous/13B-HyperMantis",
|
| 420 |
-
"Gryphe/MythoLogic-13b",
|
| 421 |
-
"TheBloke/Airoboros-L2-13B-2.1-GPTQ",
|
| 422 |
-
"chargoddard/platypus2-22b-relora",
|
| 423 |
-
"openchat/openchat_v2",
|
| 424 |
-
"yeontaek/Platypus2-13B-IA3",
|
| 425 |
-
"stabilityai/StableBeluga-7B",
|
| 426 |
-
"circulus/Llama-2-7b-orca-v1",
|
| 427 |
-
"budecosystem/genz-13b-v2",
|
| 428 |
-
"TheBloke/gpt4-x-vicuna-13B-HF",
|
| 429 |
-
"NobodyExistsOnTheInternet/GiftedConvo13bLoraNoEcons",
|
| 430 |
-
"zarakiquemparte/zarafusionex-1.1-l2-7b",
|
| 431 |
-
"Lajonbot/tableBeluga-7B-instruct-pl-lora_unload",
|
| 432 |
-
"jondurbin/airoboros-13b-gpt4",
|
| 433 |
-
"gaodrew/gaodrew-gorgonzola-13b",
|
| 434 |
-
"jondurbin/airoboros-13b-gpt4-1.1",
|
| 435 |
-
"TheBloke/gpt4-alpaca-lora-13B-HF",
|
| 436 |
-
"zarakiquemparte/zarablendex-vq-l2-7b",
|
| 437 |
-
"openaccess-ai-collective/manticore-13b-chat-pyg",
|
| 438 |
-
"Lajonbot/Llama-2-13b-hf-instruct-pl-lora_unload",
|
| 439 |
-
"NobodyExistsOnTheInternet/PuffedLIMA13bQLORA",
|
| 440 |
-
"xxyyy123/10k_v1_lora_qkvo_rank28_v2",
|
| 441 |
-
"jondurbin/airoboros-l2-13b-gpt4-1.4.1",
|
| 442 |
-
"dhmeltzer/Llama-2-13b-hf-eli5-wiki-1024_r_64_alpha_16",
|
| 443 |
-
"NobodyExistsOnTheInternet/PuffedConvo13bLoraE4",
|
| 444 |
-
"yihan6324/llama2-7b-instructmining-40k-sharegpt",
|
| 445 |
-
"CHIH-HUNG/llama-2-13b-Open_Platypus_and_ccp_2.6w",
|
| 446 |
-
"Aeala/GPT4-x-Alpasta-13b",
|
| 447 |
-
"psmathur/orca_mini_v2_13b",
|
| 448 |
-
"YeungNLP/firefly-llama-13b",
|
| 449 |
-
"psmathur/orca_mini_v2_13b",
|
| 450 |
-
"zarakiquemparte/zarafusionix-l2-7b",
|
| 451 |
-
"yihan6324/llama2-7b-instructmining-60k-sharegpt",
|
| 452 |
-
"yihan6324/llama-2-7b-instructmining-60k-sharegpt",
|
| 453 |
-
"layoric/llama-2-13b-code-alpaca",
|
| 454 |
-
"bofenghuang/vigogne-13b-instruct",
|
| 455 |
-
"Lajonbot/vicuna-13b-v1.3-PL-lora_unload",
|
| 456 |
-
"lvkaokao/llama2-7b-hf-chat-lora-v3",
|
| 457 |
-
"ehartford/dolphin-llama-13b",
|
| 458 |
-
"YeungNLP/firefly-llama-13b-v1.2",
|
| 459 |
-
"TheBloke/Kimiko-13B-fp16",
|
| 460 |
-
"kevinpro/Vicuna-13B-CoT",
|
| 461 |
-
"eachadea/vicuna-13b-1.1",
|
| 462 |
-
"pillowtalks-ai/delta13b",
|
| 463 |
-
"TheBloke/vicuna-13B-1.1-HF",
|
| 464 |
-
"TheBloke/Vicuna-13B-CoT-fp16",
|
| 465 |
-
"lmsys/vicuna-13b-delta-v1.1",
|
| 466 |
-
"lmsys/vicuna-13b-v1.1",
|
| 467 |
-
"xxyyy123/20k_v1_lora_qkvo_rank14_v2",
|
| 468 |
-
"TheBloke/guanaco-13B-HF",
|
| 469 |
-
"TheBloke/vicuna-13b-v1.3.0-GPTQ",
|
| 470 |
-
"edor/Stable-Platypus2-mini-7B",
|
| 471 |
-
"totally-not-an-llm/EverythingLM-13b-V2-16k",
|
| 472 |
-
"zarakiquemparte/zaraxe-l2-7b",
|
| 473 |
-
"beaugogh/Llama2-7b-openorca-mc-v2",
|
| 474 |
-
"TheBloke/Nous-Hermes-13B-SuperHOT-8K-fp16",
|
| 475 |
-
"quantumaikr/QuantumLM",
|
| 476 |
-
"jondurbin/airoboros-13b-gpt4-1.2",
|
| 477 |
-
"TheBloke/robin-13B-v2-fp16",
|
| 478 |
-
"TFLai/llama-2-13b-4bit-alpaca-gpt4",
|
| 479 |
-
"yihan6324/llama2-7b-instructmining-orca-40k",
|
| 480 |
-
"dvruette/oasst-llama-13b-2-epochs",
|
| 481 |
-
"Open-Orca/LlongOrca-7B-16k",
|
| 482 |
-
"Aspik101/Nous-Hermes-13b-pl-lora_unload",
|
| 483 |
-
"ehartford/Samantha-1.11-CodeLlama-34b",
|
| 484 |
-
"nkpz/llama2-22b-chat-wizard-uncensored",
|
| 485 |
-
"bofenghuang/vigogne-13b-chat",
|
| 486 |
-
"beaugogh/Llama2-7b-openorca-mc-v1",
|
| 487 |
-
"OptimalScale/robin-13b-v2-delta",
|
| 488 |
-
"pe-nlp/llama-2-13b-vicuna-wizard",
|
| 489 |
-
"chargoddard/llama2-22b",
|
| 490 |
-
"gywy/llama2-13b-chinese-v1",
|
| 491 |
-
"frank098/Wizard-Vicuna-13B-juniper",
|
| 492 |
-
"IGeniusDev/llama13B-quant8-testv1-openorca-customdataset",
|
| 493 |
-
"CHIH-HUNG/llama-2-13b-huangyt_Fintune_1_17w-gate_up_down_proj",
|
| 494 |
-
"eachadea/vicuna-13b",
|
| 495 |
-
"yihan6324/llama2-7b-instructmining-orca-90k",
|
| 496 |
-
"chargoddard/llama2-22b-blocktriangular",
|
| 497 |
-
"luffycodes/mcq-vicuna-13b-v1.5",
|
| 498 |
-
"Yhyu13/chimera-inst-chat-13b-hf",
|
| 499 |
-
"luffycodes/mcq-vicuna-13b-v1.5",
|
| 500 |
-
"chargoddard/ypotryll-22b-epoch2-qlora",
|
| 501 |
-
"totally-not-an-llm/EverythingLM-13b-16k",
|
| 502 |
-
"luffycodes/mcq-hal-vicuna-13b-v1.5",
|
| 503 |
-
"openaccess-ai-collective/minotaur-13b",
|
| 504 |
-
"IGeniusDev/llama13B-quant8-testv1-openorca-customdataset",
|
| 505 |
-
"chargoddard/llama2-22b-blocktriangular",
|
| 506 |
-
"TFLai/Platypus2-13B-QLoRA-0.80-epoch",
|
| 507 |
-
"meta-llama/Llama-2-13b-hf",
|
| 508 |
-
"CHIH-HUNG/llama-2-13b-huangyt_FINETUNE2_3w-gate_up_down_proj",
|
| 509 |
-
"luffycodes/mcq-hal-vicuna-13b-v1.5",
|
| 510 |
-
"TheBloke/Llama-2-13B-fp16",
|
| 511 |
-
"TaylorAI/Flash-Llama-13B",
|
| 512 |
-
"shareAI/bimoGPT-llama2-13b",
|
| 513 |
-
"wahaha1987/llama_13b_sharegpt94k_fastchat",
|
| 514 |
-
"openchat/openchat_8192",
|
| 515 |
-
"CHIH-HUNG/llama-2-13b-huangyt_Fintune_1_17w-q_k_v_o_proj",
|
| 516 |
-
"dvruette/llama-13b-pretrained-sft-do2",
|
| 517 |
-
"CHIH-HUNG/llama-2-13b-alpaca-test",
|
| 518 |
-
"OpenBuddy/openbuddy-llama2-13b-v11.1-bf16",
|
| 519 |
-
"CHIH-HUNG/llama-2-13b-FINETUNE2_TEST_2.2w",
|
| 520 |
-
"project-baize/baize-v2-13b",
|
| 521 |
-
"jondurbin/airoboros-l2-13b-gpt4-m2.0",
|
| 522 |
-
"yeontaek/Platypus2xOpenOrca-13B-LoRa-v2",
|
| 523 |
-
"CHIH-HUNG/llama-2-13b-huangyt_FINETUNE2_3w",
|
| 524 |
-
"xzuyn/Alpacino-SuperCOT-13B",
|
| 525 |
-
"jondurbin/airoboros-l2-13b-gpt4-2.0",
|
| 526 |
-
"aiplanet/effi-13b",
|
| 527 |
-
"clibrain/Llama-2-13b-ft-instruct-es",
|
| 528 |
-
"CHIH-HUNG/llama-2-13b-huangyt_Fintune_1_17w",
|
| 529 |
-
"bofenghuang/vigogne-2-7b-instruct",
|
| 530 |
-
"CHIH-HUNG/llama-2-13b-huangyt_FINETUNE2_3w-q_k_v_o_proj",
|
| 531 |
-
"bofenghuang/vigogne-2-7b-chat",
|
| 532 |
-
"aiplanet/effi-13b",
|
| 533 |
-
"haonan-li/bactrian-x-llama-13b-merged",
|
| 534 |
-
"beaugogh/Llama2-7b-sharegpt4",
|
| 535 |
-
"HWERI/Llama2-7b-sharegpt4",
|
| 536 |
-
"jondurbin/airoboros-13b-gpt4-1.3",
|
| 537 |
-
"jondurbin/airoboros-c34b-2.1",
|
| 538 |
-
"junelee/wizard-vicuna-13b",
|
| 539 |
-
"TheBloke/wizard-vicuna-13B-HF",
|
| 540 |
-
"Open-Orca/OpenOrca-Preview1-13B",
|
| 541 |
-
"TheBloke/h2ogpt-oasst1-512-30B-HF",
|
| 542 |
-
"TheBloke/Llama-2-13B-GPTQ",
|
| 543 |
-
"camel-ai/CAMEL-13B-Combined-Data",
|
| 544 |
-
"lmsys/vicuna-7b-v1.5",
|
| 545 |
-
"lmsys/vicuna-7b-v1.5-16k",
|
| 546 |
-
"lmsys/vicuna-7b-v1.5",
|
| 547 |
-
"ausboss/llama-13b-supercot",
|
| 548 |
-
"TheBloke/tulu-13B-fp16",
|
| 549 |
-
"NousResearch/Nous-Hermes-llama-2-7b",
|
| 550 |
-
"jlevin/guanaco-13b-llama-2",
|
| 551 |
-
"lmsys/vicuna-7b-v1.5-16k",
|
| 552 |
-
"dvruette/llama-13b-pretrained",
|
| 553 |
-
"nkpz/llama2-22b-daydreamer-v3",
|
| 554 |
-
"dvruette/llama-13b-pretrained-dropout",
|
| 555 |
-
"jondurbin/airoboros-l2-13b-2.1",
|
| 556 |
-
"LLMs/Stable-Vicuna-13B",
|
| 557 |
-
"64bits/LexPodLM-13B",
|
| 558 |
-
"lizhuang144/llama_mirror_13b_v1.0",
|
| 559 |
-
"TheBloke/stable-vicuna-13B-HF",
|
| 560 |
-
"zarakiquemparte/zaraxls-l2-7b",
|
| 561 |
-
"TheBloke/Llama-2-13B-GPTQ",
|
| 562 |
-
"Kiddyz/testlm-3",
|
| 563 |
-
"migtissera/Synthia-7B",
|
| 564 |
-
"zarakiquemparte/zarablend-l2-7b",
|
| 565 |
-
"mosaicml/mpt-30b-instruct",
|
| 566 |
-
"PocketDoc/Dans-PileOfSets-Mk1-llama-13b-merged",
|
| 567 |
-
"vonjack/Qwen-LLaMAfied-HFTok-7B-Chat",
|
| 568 |
-
"l3utterfly/llama2-7b-layla",
|
| 569 |
-
"Lajonbot/vicuna-7b-v1.5-PL-lora_unload",
|
| 570 |
-
"heegyu/LIMA-13b-hf",
|
| 571 |
-
"frank098/WizardLM_13B_juniper",
|
| 572 |
-
"ashercn97/manatee-7b",
|
| 573 |
-
"chavinlo/gpt4-x-alpaca",
|
| 574 |
-
"PocketDoc/Dans-PersonalityEngine-13b",
|
| 575 |
-
"ehartford/WizardLM-1.0-Uncensored-CodeLlama-34b",
|
| 576 |
-
"digitous/Alpacino13b",
|
| 577 |
-
"edor/Hermes-Platypus2-mini-7B",
|
| 578 |
-
"lvkaokao/llama2-7b-hf-chat-lora-v2",
|
| 579 |
-
"Kiddyz/testlm-1-1",
|
| 580 |
-
"Kiddyz/testlm",
|
| 581 |
-
"Kiddyz/testlm-1",
|
| 582 |
-
"Kiddyz/testlm2",
|
| 583 |
-
"radm/Philosophy-Platypus2-13b",
|
| 584 |
-
"aiplanet/effi-13b",
|
| 585 |
-
"Harshvir/Llama-2-7B-physics",
|
| 586 |
-
"YeungNLP/firefly-ziya-13b",
|
| 587 |
-
"LinkSoul/Chinese-Llama-2-7b",
|
| 588 |
-
"PeanutJar/LLaMa-2-PeanutButter_v10-7B",
|
| 589 |
-
"OpenBuddy/openbuddy-llama2-13b-v11-bf16",
|
| 590 |
-
"StudentLLM/Alpagasus-2-13B-QLoRA-pipeline",
|
| 591 |
-
"meta-llama/Llama-2-13b-hf",
|
| 592 |
-
"WizardLM/WizardCoder-Python-34B-V1.0",
|
| 593 |
-
"dvruette/llama-13b-pretrained-sft-epoch-1",
|
| 594 |
-
"camel-ai/CAMEL-13B-Role-Playing-Data",
|
| 595 |
-
"ziqingyang/chinese-llama-2-13b",
|
| 596 |
-
"rombodawg/LosslessMegaCoder-llama2-7b-mini",
|
| 597 |
-
"TheBloke/koala-13B-HF",
|
| 598 |
-
"lmsys/vicuna-7b-delta-v1.1",
|
| 599 |
-
"eachadea/vicuna-7b-1.1",
|
| 600 |
-
"Ejafa/vicuna_7B_vanilla_1.1",
|
| 601 |
-
"lvkaokao/llama2-7b-hf-chat-lora",
|
| 602 |
-
"OpenBuddy/openbuddy-atom-13b-v9-bf16",
|
| 603 |
-
"Norquinal/llama-2-7b-claude-chat-rp",
|
| 604 |
-
"Danielbrdz/Barcenas-7b",
|
| 605 |
-
"heegyu/WizardVicuna2-13b-hf",
|
| 606 |
-
"meta-llama/Llama-2-7b-chat-hf",
|
| 607 |
-
"PeanutJar/LLaMa-2-PeanutButter_v14-7B",
|
| 608 |
-
"PeanutJar/LLaMa-2-PeanutButter_v4-7B",
|
| 609 |
-
"davzoku/cria-llama2-7b-v1.3",
|
| 610 |
-
"OpenBuddy/openbuddy-atom-13b-v9-bf16",
|
| 611 |
-
"lvkaokao/llama2-7b-hf-instruction-lora",
|
| 612 |
-
"Tap-M/Luna-AI-Llama2-Uncensored",
|
| 613 |
-
"ehartford/Samantha-1.11-7b",
|
| 614 |
-
"WizardLM/WizardCoder-Python-34B-V1.0",
|
| 615 |
-
"TheBloke/Manticore-13B-Chat-Pyg-Guanaco-SuperHOT-8K-GPTQ",
|
| 616 |
-
"Mikael110/llama-2-7b-guanaco-fp16",
|
| 617 |
-
"garage-bAInd/Platypus2-7B",
|
| 618 |
-
"PeanutJar/LLaMa-2-PeanutButter_v18_B-7B",
|
| 619 |
-
"mosaicml/mpt-30b",
|
| 620 |
-
"garage-bAInd/Platypus2-7B",
|
| 621 |
-
"huggingface/llama-13b",
|
| 622 |
-
"dvruette/oasst-llama-13b-1000-steps",
|
| 623 |
-
"jordiclive/gpt4all-alpaca-oa-codealpaca-lora-13b",
|
| 624 |
-
"huggyllama/llama-13b",
|
| 625 |
-
"Voicelab/trurl-2-7b",
|
| 626 |
-
"TFLai/llama-13b-4bit-alpaca",
|
| 627 |
-
"gywy/llama2-13b-chinese-v2",
|
| 628 |
-
"lmsys/longchat-13b-16k",
|
| 629 |
-
"Aspik101/trurl-2-7b-pl-instruct_unload",
|
| 630 |
-
"WizardLM/WizardMath-7B-V1.0",
|
| 631 |
-
"Norquinal/llama-2-7b-claude-chat",
|
| 632 |
-
"TheTravellingEngineer/llama2-7b-chat-hf-dpo",
|
| 633 |
-
"HuggingFaceH4/starchat-beta",
|
| 634 |
-
"joehuangx/spatial-vicuna-7b-v1.5-LoRA",
|
| 635 |
-
"conceptofmind/LLongMA-2-13b-16k",
|
| 636 |
-
"tianyil1/denas-llama2",
|
| 637 |
-
"lmsys/vicuna-7b-v1.3",
|
| 638 |
-
"conceptofmind/LLongMA-2-13b-16k",
|
| 639 |
-
"openchat/opencoderplus",
|
| 640 |
-
"ajibawa-2023/scarlett-7b",
|
| 641 |
-
"dhmeltzer/llama-7b-SFT_eli5_wiki65k_1024_r_64_alpha_16_merged",
|
| 642 |
-
"psyche/kollama2-7b-v2",
|
| 643 |
-
"heegyu/LIMA2-7b-hf",
|
| 644 |
-
"dhmeltzer/llama-7b-SFT-qlora-eli5-wiki_DPO_ds_RM_top_2_1024_r_64_alpha_16",
|
| 645 |
-
"abhishek/llama2guanacotest",
|
| 646 |
-
"jondurbin/airoboros-l2-7b-2.1",
|
| 647 |
-
"llama-anon/instruct-13b",
|
| 648 |
-
"FelixChao/vicuna-7B-physics",
|
| 649 |
-
"Aspik101/Llama-2-7b-hf-instruct-pl-lora_unload",
|
| 650 |
-
"shibing624/chinese-alpaca-plus-13b-hf",
|
| 651 |
-
"davzoku/cria-llama2-7b-v1.3_peft",
|
| 652 |
-
"quantumaikr/llama-2-7b-hf-guanaco-1k",
|
| 653 |
-
"togethercomputer/Llama-2-7B-32K-Instruct",
|
| 654 |
-
"sia-ai/llama-2-7b-1-percent-open-orca-1000-steps-v0",
|
| 655 |
-
"TheTravellingEngineer/llama2-7b-hf-guanaco",
|
| 656 |
-
"Lajonbot/Llama-2-7b-chat-hf-instruct-pl-lora_unload",
|
| 657 |
-
"jondurbin/airoboros-l2-7b-gpt4-1.4.1",
|
| 658 |
-
"wahaha1987/llama_7b_sharegpt94k_fastchat",
|
| 659 |
-
"FelixChao/vicuna-7B-chemical",
|
| 660 |
-
"TinyPixel/llama2-7b-oa",
|
| 661 |
-
"chaoyi-wu/MedLLaMA_13B",
|
| 662 |
-
"edor/Platypus2-mini-7B",
|
| 663 |
-
"RoversX/llama-2-7b-hf-small-shards-Samantha-V1-SFT",
|
| 664 |
-
"venkycs/llama-v2-7b-32kC-Security",
|
| 665 |
-
"psyche/kollama2-7b",
|
| 666 |
-
"Fredithefish/Guanaco-7B-Uncensored",
|
| 667 |
-
"TheTravellingEngineer/llama2-7b-chat-hf-guanaco",
|
| 668 |
-
"ehartford/WizardLM-13B-Uncensored",
|
| 669 |
-
"PocketDoc/Dans-CreepingSenseOfDoom",
|
| 670 |
-
"wenge-research/yayi-7b-llama2",
|
| 671 |
-
"georgesung/llama2_7b_chat_uncensored",
|
| 672 |
-
"TinyPixel/llama2-7b-instruct",
|
| 673 |
-
"quantumaikr/QuantumLM-7B",
|
| 674 |
-
"xzuyn/MedicWizard-7B",
|
| 675 |
-
"wenge-research/yayi-7b-llama2",
|
| 676 |
-
"TinyPixel/lima-test",
|
| 677 |
-
"elyza/ELYZA-japanese-Llama-2-7b-instruct",
|
| 678 |
-
"lgaalves/llama-2-7b-hf_open-platypus",
|
| 679 |
-
"ziqingyang/chinese-alpaca-2-7b",
|
| 680 |
-
"TehVenom/Pygmalion-Vicuna-1.1-7b",
|
| 681 |
-
"meta-llama/Llama-2-7b-hf",
|
| 682 |
-
"bongchoi/test-llama2-7b",
|
| 683 |
-
"TaylorAI/Flash-Llama-7B",
|
| 684 |
-
"TheTravellingEngineer/llama2-7b-chat-hf-v2",
|
| 685 |
-
"TheTravellingEngineer/llama2-7b-chat-hf-v4",
|
| 686 |
-
"kashif/stack-llama-2",
|
| 687 |
-
"PeanutJar/LLaMa-2-PeanutButter_v18_A-7B",
|
| 688 |
-
"ToolBench/ToolLLaMA-7b-LoRA",
|
| 689 |
-
"Monero/WizardLM-13b-OpenAssistant-Uncensored",
|
| 690 |
-
"TheTravellingEngineer/llama2-7b-chat-hf-v2",
|
| 691 |
-
"TheTravellingEngineer/llama2-7b-chat-hf-v4",
|
| 692 |
-
"mrm8488/llama-2-coder-7b",
|
| 693 |
-
"elyza/ELYZA-japanese-Llama-2-7b-fast-instruct",
|
| 694 |
-
"clibrain/Llama-2-7b-ft-instruct-es",
|
| 695 |
-
"medalpaca/medalpaca-7b",
|
| 696 |
-
"TheBloke/tulu-7B-fp16",
|
| 697 |
-
"OpenBuddy/openbuddy-openllama-13b-v7-fp16",
|
| 698 |
-
"TaylorAI/FLAN-Llama-7B-2_Llama2-7B-Flash_868_full_model",
|
| 699 |
-
"Aspik101/vicuna-7b-v1.3-instruct-pl-lora_unload",
|
| 700 |
-
"jondurbin/airoboros-l2-7b-gpt4-2.0",
|
| 701 |
-
"dhmeltzer/llama-7b-SFT_ds_eli5_1024_r_64_alpha_16_merged",
|
| 702 |
-
"GOAT-AI/GOAT-7B-Community",
|
| 703 |
-
"AtomEchoAI/AtomGPT_56k",
|
| 704 |
-
"julianweng/Llama-2-7b-chat-orcah",
|
| 705 |
-
"TehVenom/Pygmalion-13b-Merged",
|
| 706 |
-
"jondurbin/airoboros-7b-gpt4-1.1",
|
| 707 |
-
"dhmeltzer/llama-7b-SFT_ds_wiki65k_1024_r_64_alpha_16_merged",
|
| 708 |
-
"bofenghuang/vigogne-7b-chat",
|
| 709 |
-
"lmsys/longchat-7b-v1.5-32k",
|
| 710 |
-
"jondurbin/airoboros-l2-7b-gpt4-m2.0",
|
| 711 |
-
"synapsoft/Llama-2-7b-chat-hf-flan2022-1.2M",
|
| 712 |
-
"jondurbin/airoboros-7b-gpt4-1.4",
|
| 713 |
-
"Charlie911/vicuna-7b-v1.5-lora-mctaco",
|
| 714 |
-
"yihan6324/instructmining-platypus-15k",
|
| 715 |
-
"meta-llama/Llama-2-7b-hf",
|
| 716 |
-
"TheTravellingEngineer/llama2-7b-chat-hf-v3",
|
| 717 |
-
"quantumaikr/KoreanLM-hf",
|
| 718 |
-
"openthaigpt/openthaigpt-1.0.0-alpha-7b-chat-ckpt-hf",
|
| 719 |
-
"TheBloke/Llama-2-7B-GPTQ",
|
| 720 |
-
"TheBloke/Llama-2-7B-GPTQ",
|
| 721 |
-
"LLMs/AlpacaGPT4-7B-elina",
|
| 722 |
-
"ehartford/Wizard-Vicuna-7B-Uncensored",
|
| 723 |
-
"TheBloke/Wizard-Vicuna-7B-Uncensored-HF",
|
| 724 |
-
"TheTravellingEngineer/llama2-7b-chat-hf-v3",
|
| 725 |
-
"golaxy/gowizardlm",
|
| 726 |
-
"ehartford/dolphin-llama2-7b",
|
| 727 |
-
"CHIH-HUNG/llama-2-7b-dolphin_10w-test",
|
| 728 |
-
"mncai/chatdoctor",
|
| 729 |
-
"psyche/kollama2-7b-v3",
|
| 730 |
-
"jondurbin/airoboros-7b-gpt4",
|
| 731 |
-
"jondurbin/airoboros-7b",
|
| 732 |
-
"TheBloke/airoboros-7b-gpt4-fp16",
|
| 733 |
-
"mosaicml/mpt-7b-8k-chat",
|
| 734 |
-
"elyza/ELYZA-japanese-Llama-2-7b",
|
| 735 |
-
"bofenghuang/vigogne-7b-instruct",
|
| 736 |
-
"jxhong/CAlign-alpaca-7b",
|
| 737 |
-
"golaxy/goims",
|
| 738 |
-
"jondurbin/airoboros-7b-gpt4-1.2",
|
| 739 |
-
"jphme/orca_mini_v2_ger_7b",
|
| 740 |
-
"psmathur/orca_mini_v2_7b",
|
| 741 |
-
"notstoic/PygmalionCoT-7b",
|
| 742 |
-
"golaxy/gogpt2-13b",
|
| 743 |
-
"golaxy/gogpt2-13b-chat",
|
| 744 |
-
"togethercomputer/LLaMA-2-7B-32K",
|
| 745 |
-
"TheBloke/wizardLM-7B-HF",
|
| 746 |
-
"keyfan/vicuna-chinese-replication-v1.1",
|
| 747 |
-
"golaxy/gogpt2-7b",
|
| 748 |
-
"aiplanet/effi-7b",
|
| 749 |
-
"arver/llama7b-qlora",
|
| 750 |
-
"titan087/OpenLlama13B-Guanaco",
|
| 751 |
-
"chavinlo/alpaca-native",
|
| 752 |
-
"project-baize/baize-healthcare-lora-7B",
|
| 753 |
-
"AlpinDale/pygmalion-instruct",
|
| 754 |
-
"openlm-research/open_llama_13b",
|
| 755 |
-
"jondurbin/airoboros-7b-gpt4-1.3",
|
| 756 |
-
"elyza/ELYZA-japanese-Llama-2-7b-fast",
|
| 757 |
-
"jondurbin/airoboros-gpt-3.5-turbo-100k-7b",
|
| 758 |
-
"uukuguy/speechless-codellama-orca-13b",
|
| 759 |
-
"bigcode/starcoderplus",
|
| 760 |
-
"TheBloke/guanaco-7B-HF",
|
| 761 |
-
"Neko-Institute-of-Science/metharme-7b",
|
| 762 |
-
"TigerResearch/tigerbot-7b-base",
|
| 763 |
-
"golaxy/gogpt-7b",
|
| 764 |
-
"togethercomputer/LLaMA-2-7B-32K",
|
| 765 |
-
"yhyhy3/open_llama_7b_v2_med_instruct",
|
| 766 |
-
"ajibawa-2023/carl-7b",
|
| 767 |
-
"stabilityai/stablelm-base-alpha-7b-v2",
|
| 768 |
-
"conceptofmind/LLongMA-2-7b-16k",
|
| 769 |
-
"TehVenom/Pygmalion_AlpacaLora-7b",
|
| 770 |
-
"jondurbin/airoboros-7b-gpt4-1.4.1-qlora",
|
| 771 |
-
"wannaphong/openthaigpt-0.1.0-beta-full-model_for_open_llm_leaderboard",
|
| 772 |
-
"ausboss/llama7b-wizardlm-unfiltered",
|
| 773 |
-
"project-baize/baize-v2-7b",
|
| 774 |
-
"LMFlow/Robin-v2",
|
| 775 |
-
"HanningZhang/Robin-v2",
|
| 776 |
-
"LMFlow/Robin-7b-v2",
|
| 777 |
-
"OptimalScale/robin-7b-v2-delta",
|
| 778 |
-
"uukuguy/speechless-codellama-platypus-13b",
|
| 779 |
-
"jerryjalapeno/nart-100k-7b",
|
| 780 |
-
"wenge-research/yayi-13b-llama2",
|
| 781 |
-
"fireballoon/baichuan-vicuna-chinese-7b",
|
| 782 |
-
"jlevin/guanaco-unchained-llama-2-7b",
|
| 783 |
-
"csitfun/llama-7b-logicot",
|
| 784 |
-
"DevaMalla/llama7b_alpaca_1gpu_bf16",
|
| 785 |
-
"WeOpenML/PandaLM-Alpaca-7B-v1",
|
| 786 |
-
"illuin/test-custom-llama",
|
| 787 |
-
"yeontaek/WizardCoder-Python-13B-LoRa",
|
| 788 |
-
"ashercn97/giraffe-7b",
|
| 789 |
-
"mosaicml/mpt-7b-chat",
|
| 790 |
-
"abhishek/autotrain-llama-alpaca-peft-52508123785",
|
| 791 |
-
"Neko-Institute-of-Science/pygmalion-7b",
|
| 792 |
-
"TFLai/llama-7b-4bit-alpaca",
|
| 793 |
-
"huggingface/llama-7b",
|
| 794 |
-
"TheBloke/Planner-7B-fp16",
|
| 795 |
-
"shibing624/chinese-llama-plus-13b-hf",
|
| 796 |
-
"AGI-inc/lora_moe_7b_baseline",
|
| 797 |
-
"DevaMalla/llama-base-7b",
|
| 798 |
-
"AGI-inc/lora_moe_7b",
|
| 799 |
-
"togethercomputer/GPT-JT-6B-v0",
|
| 800 |
-
"ehartford/WizardLM-7B-Uncensored",
|
| 801 |
-
"shibing624/chinese-alpaca-plus-7b-hf",
|
| 802 |
-
"beomi/llama-2-ko-7b",
|
| 803 |
-
"mosaicml/mpt-7b-8k-instruct",
|
| 804 |
-
"Enno-Ai/ennodata-7b",
|
| 805 |
-
"mosaicml/mpt-7b-instruct",
|
| 806 |
-
"facebook/opt-iml-max-30b",
|
| 807 |
-
"WeOpenML/Alpaca-7B-v1",
|
| 808 |
-
"TheBloke/Project-Baize-v2-7B-GPTQ",
|
| 809 |
-
"codellama/CodeLlama-13b-Instruct-hf",
|
| 810 |
-
"TheBloke/CodeLlama-13B-Instruct-fp16",
|
| 811 |
-
"facebook/galactica-30b",
|
| 812 |
-
"FreedomIntelligence/phoenix-inst-chat-7b",
|
| 813 |
-
"openlm-research/open_llama_7b_v2",
|
| 814 |
-
"GeorgiaTechResearchInstitute/galpaca-30b",
|
| 815 |
-
"THUDM/chatglm2-6b",
|
| 816 |
-
"togethercomputer/GPT-JT-6B-v1",
|
| 817 |
-
"TheBloke/koala-7B-HF",
|
| 818 |
-
"nathan0/mpt_delta_tuned_model_v3",
|
| 819 |
-
"nathan0/mpt_delta_tuned_model_v2",
|
| 820 |
-
"GeorgiaTechResearchInstitute/galpaca-30b",
|
| 821 |
-
"JosephusCheung/Guanaco",
|
| 822 |
-
"shareAI/CodeLLaMA-chat-13b-Chinese",
|
| 823 |
-
"TigerResearch/tigerbot-7b-sft",
|
| 824 |
-
"Writer/InstructPalmyra-20b",
|
| 825 |
-
"OpenAssistant/codellama-13b-oasst-sft-v10",
|
| 826 |
-
"bigscience/bloomz-7b1-mt",
|
| 827 |
-
"nathan0/mpt_delta_tuned_model_v3",
|
| 828 |
-
"VMware/open-llama-7b-open-instruct",
|
| 829 |
-
"baichuan-inc/Baichuan-7B",
|
| 830 |
-
"anas-awadalla/mpt-7b",
|
| 831 |
-
"mosaicml/mpt-7b",
|
| 832 |
-
"bigscience/bloomz-7b1",
|
| 833 |
-
"ziqingyang/chinese-llama-2-7b",
|
| 834 |
-
"OpenAssistant/codellama-13b-oasst-sft-v10",
|
| 835 |
-
"wenge-research/yayi-7b",
|
| 836 |
-
"tiiuae/falcon-7b",
|
| 837 |
-
"togethercomputer/RedPajama-INCITE-Instruct-7B-v0.1",
|
| 838 |
-
"togethercomputer/RedPajama-INCITE-7B-Instruct",
|
| 839 |
-
"TheBloke/landmark-attention-llama7b-fp16",
|
| 840 |
-
"togethercomputer/GPT-JT-Moderation-6B",
|
| 841 |
-
"h2oai/h2ogpt-gm-oasst1-en-1024-20b",
|
| 842 |
-
"dvruette/gpt-neox-20b-full-precision",
|
| 843 |
-
"TehVenom/Moderator-Chan_GPT-JT-6b",
|
| 844 |
-
"dvruette/oasst-gpt-neox-20b-1000-steps",
|
| 845 |
-
"AlekseyKorshuk/pygmalion-6b-vicuna-chatml",
|
| 846 |
-
"facebook/opt-66b",
|
| 847 |
-
"Salesforce/codegen-16B-nl",
|
| 848 |
-
"Vmware/open-llama-7b-v2-open-instruct",
|
| 849 |
-
"mosaicml/mpt-7b-storywriter",
|
| 850 |
-
"acrastt/Marx-3B-V2",
|
| 851 |
-
"openlm-research/open_llama_7b",
|
| 852 |
-
"Fredithefish/ReasonixPajama-3B-HF",
|
| 853 |
-
"togethercomputer/GPT-NeoXT-Chat-Base-20B",
|
| 854 |
-
"psmathur/orca_mini_13b",
|
| 855 |
-
"RWKV/rwkv-raven-14b",
|
| 856 |
-
"h2oai/h2ogpt-oasst1-512-20b",
|
| 857 |
-
"acrastt/Marx-3B",
|
| 858 |
-
"klosax/open_llama_13b_600bt_preview",
|
| 859 |
-
"synapsoft/Llama-2-7b-hf-flan2022-1.2M",
|
| 860 |
-
"OpenAssistant/oasst-sft-1-pythia-12b",
|
| 861 |
-
"golaxy/gogpt-7b-bloom",
|
| 862 |
-
"Writer/palmyra-large",
|
| 863 |
-
"psmathur/orca_mini_7b",
|
| 864 |
-
"dvruette/oasst-pythia-12b-6000-steps",
|
| 865 |
-
"NousResearch/CodeLlama-13b-hf",
|
| 866 |
-
"codellama/CodeLlama-13b-hf",
|
| 867 |
-
"h2oai/h2ogpt-gm-oasst1-multilang-1024-20b",
|
| 868 |
-
"VMware/open-llama-0.7T-7B-open-instruct-v1.1",
|
| 869 |
-
"dvruette/oasst-pythia-12b-flash-attn-5000-steps",
|
| 870 |
-
"dvruette/oasst-gpt-neox-20b-3000-steps",
|
| 871 |
-
"RobbeD/OpenLlama-Platypus-3B",
|
| 872 |
-
"facebook/opt-30b",
|
| 873 |
-
"acrastt/Puma-3B",
|
| 874 |
-
"OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
|
| 875 |
-
"dvruette/oasst-pythia-12b-pretrained-sft",
|
| 876 |
-
"digitous/GPT-R",
|
| 877 |
-
"acrastt/Griffin-3B",
|
| 878 |
-
"togethercomputer/RedPajama-INCITE-Base-7B-v0.1",
|
| 879 |
-
"togethercomputer/RedPajama-INCITE-7B-Base",
|
| 880 |
-
"CobraMamba/mamba-gpt-3b-v3",
|
| 881 |
-
"Danielbrdz/CodeBarcenas-7b",
|
| 882 |
-
"l3utterfly/open-llama-3b-v2-layla",
|
| 883 |
-
"CobraMamba/mamba-gpt-3b-v2",
|
| 884 |
-
"OpenAssistant/pythia-12b-sft-v8-7k-steps",
|
| 885 |
-
"KoboldAI/GPT-NeoX-20B-Erebus",
|
| 886 |
-
"RobbeD/Orca-Platypus-3B",
|
| 887 |
-
"h2oai/h2ogpt-gm-oasst1-en-1024-12b",
|
| 888 |
-
"OpenAssistant/pythia-12b-sft-v8-2.5k-steps",
|
| 889 |
-
"AlekseyKorshuk/chatml-pyg-v1",
|
| 890 |
-
"togethercomputer/RedPajama-INCITE-Chat-7B-v0.1",
|
| 891 |
-
"togethercomputer/RedPajama-INCITE-7B-Chat",
|
| 892 |
-
"digitous/Javelin-R",
|
| 893 |
-
"dvruette/oasst-pythia-12b-reference",
|
| 894 |
-
"EleutherAI/gpt-neox-20b",
|
| 895 |
-
"KoboldAI/fairseq-dense-13B",
|
| 896 |
-
"OpenAssistant/pythia-12b-sft-v8-rlhf-2k-steps",
|
| 897 |
-
"codellama/CodeLlama-7b-Instruct-hf",
|
| 898 |
-
"digitous/Javelin-GPTJ",
|
| 899 |
-
"KoboldAI/GPT-NeoX-20B-Skein",
|
| 900 |
-
"digitous/Javalion-R",
|
| 901 |
-
"h2oai/h2ogpt-oasst1-512-12b",
|
| 902 |
-
"acrastt/Bean-3B",
|
| 903 |
-
"KoboldAI/GPT-J-6B-Skein",
|
| 904 |
-
"nomic-ai/gpt4all-j",
|
| 905 |
-
"databricks/dolly-v2-12b",
|
| 906 |
-
"TehVenom/Dolly_Shygmalion-6b-Dev_V8P2",
|
| 907 |
-
"databricks/dolly-v2-7b",
|
| 908 |
-
"Aspik101/WizardVicuna-Uncensored-3B-instruct-PL-lora_unload",
|
| 909 |
-
"digitous/Adventien-GPTJ",
|
| 910 |
-
"openlm-research/open_llama_3b_v2",
|
| 911 |
-
"RWKV/rwkv-4-14b-pile",
|
| 912 |
-
"Lazycuber/Janemalion-6B",
|
| 913 |
-
"OpenAssistant/pythia-12b-pre-v8-12.5k-steps",
|
| 914 |
-
"digitous/Janin-R",
|
| 915 |
-
"kfkas/Llama-2-ko-7b-Chat",
|
| 916 |
-
"heegyu/WizardVicuna-Uncensored-3B-0719",
|
| 917 |
-
"h2oai/h2ogpt-gm-oasst1-en-1024-open-llama-7b-preview-400bt",
|
| 918 |
-
"TaylorAI/Flash-Llama-3B",
|
| 919 |
-
"kfkas/Llama-2-ko-7b-Chat",
|
| 920 |
-
"digitous/Skegma-GPTJ",
|
| 921 |
-
"digitous/Javalion-GPTJ",
|
| 922 |
-
"Pirr/pythia-13b-deduped-green_devil",
|
| 923 |
-
"TehVenom/PPO_Shygmalion-V8p4_Dev-6b",
|
| 924 |
-
"dvruette/oasst-pythia-6.9b-4000-steps",
|
| 925 |
-
"heegyu/WizardVicuna-3B-0719",
|
| 926 |
-
"psmathur/orca_mini_3b",
|
| 927 |
-
"OpenAssistant/galactica-6.7b-finetuned",
|
| 928 |
-
"frank098/orca_mini_3b_juniper",
|
| 929 |
-
"PygmalionAI/pygmalion-6b",
|
| 930 |
-
"TehVenom/PPO_Pygway-V8p4_Dev-6b",
|
| 931 |
-
"TFLai/gpt-neox-20b-4bit-alpaca",
|
| 932 |
-
"Corianas/gpt-j-6B-Dolly",
|
| 933 |
-
"TehVenom/Dolly_Shygmalion-6b",
|
| 934 |
-
"digitous/Janin-GPTJ",
|
| 935 |
-
"TehVenom/GPT-J-Pyg_PPO-6B-Dev-V8p4",
|
| 936 |
-
"EleutherAI/gpt-j-6b",
|
| 937 |
-
"KoboldAI/GPT-J-6B-Shinen",
|
| 938 |
-
"TehVenom/Dolly_Malion-6b",
|
| 939 |
-
"TehVenom/ChanMalion",
|
| 940 |
-
"Salesforce/codegen-6B-nl",
|
| 941 |
-
"Fredithefish/RedPajama-INCITE-Chat-3B-Instruction-Tuning-with-GPT-4",
|
| 942 |
-
"KoboldAI/GPT-J-6B-Janeway",
|
| 943 |
-
"togethercomputer/RedPajama-INCITE-Chat-3B-v1",
|
| 944 |
-
"togethercomputer/Pythia-Chat-Base-7B",
|
| 945 |
-
"heegyu/RedTulu-Uncensored-3B-0719",
|
| 946 |
-
"KoboldAI/PPO_Pygway-6b-Mix",
|
| 947 |
-
"KoboldAI/OPT-13B-Erebus",
|
| 948 |
-
"KoboldAI/fairseq-dense-6.7B",
|
| 949 |
-
"EleutherAI/pythia-12b-deduped",
|
| 950 |
-
"pszemraj/pythia-6.9b-HC3",
|
| 951 |
-
"Fredithefish/Guanaco-3B-Uncensored-v2",
|
| 952 |
-
"facebook/opt-13b",
|
| 953 |
-
"TehVenom/GPT-J-Pyg_PPO-6B",
|
| 954 |
-
"EleutherAI/pythia-6.9b-deduped",
|
| 955 |
-
"Devio/test-1400",
|
| 956 |
-
"Fredithefish/Guanaco-3B-Uncensored",
|
| 957 |
-
"codellama/CodeLlama-7b-hf",
|
| 958 |
-
"acrastt/RedPajama-INCITE-Chat-Instruct-3B-V1",
|
| 959 |
-
"Fredithefish/ScarletPajama-3B-HF",
|
| 960 |
-
"KoboldAI/OPT-13B-Nerybus-Mix",
|
| 961 |
-
"YeungNLP/firefly-bloom-7b1",
|
| 962 |
-
"DanielSc4/RedPajama-INCITE-Chat-3B-v1-RL-LoRA-8bit-test1",
|
| 963 |
-
"klosax/open_llama_7b_400bt_preview",
|
| 964 |
-
"KoboldAI/OPT-13B-Nerys-v2",
|
| 965 |
-
"TehVenom/PPO_Shygmalion-6b",
|
| 966 |
-
"amazon/LightGPT",
|
| 967 |
-
"KnutJaegersberg/black_goo_recipe_c",
|
| 968 |
-
"NousResearch/CodeLlama-7b-hf",
|
| 969 |
-
"togethercomputer/RedPajama-INCITE-Instruct-3B-v1",
|
| 970 |
-
"heegyu/WizardVicuna-open-llama-3b-v2",
|
| 971 |
-
"bigscience/bloom-7b1",
|
| 972 |
-
"Devio/test-22B",
|
| 973 |
-
"RWKV/rwkv-raven-7b",
|
| 974 |
-
"hakurei/instruct-12b",
|
| 975 |
-
"CobraMamba/mamba-gpt-3b",
|
| 976 |
-
"KnutJaegersberg/black_goo_recipe_a",
|
| 977 |
-
"acrastt/OmegLLaMA-3B",
|
| 978 |
-
"codellama/CodeLlama-7b-Instruct-hf",
|
| 979 |
-
"h2oai/h2ogpt-oig-oasst1-512-6_9b",
|
| 980 |
-
"KoboldAI/OPT-6.7B-Erebus",
|
| 981 |
-
"facebook/opt-6.7b",
|
| 982 |
-
"KnutJaegersberg/black_goo_recipe_d",
|
| 983 |
-
"KnutJaegersberg/LLongMA-3b-LIMA",
|
| 984 |
-
"KnutJaegersberg/black_goo_recipe_b",
|
| 985 |
-
"KoboldAI/OPT-6.7B-Nerybus-Mix",
|
| 986 |
-
"health360/Healix-3B",
|
| 987 |
-
"EleutherAI/pythia-12b",
|
| 988 |
-
"Fredithefish/RedPajama-INCITE-Chat-3B-ShareGPT-11K",
|
| 989 |
-
"GeorgiaTechResearchInstitute/galactica-6.7b-evol-instruct-70k",
|
| 990 |
-
"h2oai/h2ogpt-oig-oasst1-256-6_9b",
|
| 991 |
-
"ikala/bloom-zh-3b-chat",
|
| 992 |
-
"Taekyoon/llama2-ko-7b-test",
|
| 993 |
-
"anhnv125/pygmalion-6b-roleplay",
|
| 994 |
-
"TehVenom/DiffMerge_Pygmalion_Main-onto-V8P4",
|
| 995 |
-
"KoboldAI/OPT-6B-nerys-v2",
|
| 996 |
-
"Lazycuber/pyg-instruct-wizardlm",
|
| 997 |
-
"Devio/testC",
|
| 998 |
-
"KoboldAI/OPT-30B-Erebus",
|
| 999 |
-
"Fredithefish/CrimsonPajama",
|
| 1000 |
-
"togethercomputer/RedPajama-INCITE-Base-3B-v1",
|
| 1001 |
-
"bigscience/bloomz-3b",
|
| 1002 |
-
"conceptofmind/Open-LLongMA-3b",
|
| 1003 |
-
"RWKV/rwkv-4-7b-pile",
|
| 1004 |
-
"openlm-research/open_llama_3b",
|
| 1005 |
-
"ewof/koishi-instruct-3b",
|
| 1006 |
-
"DanielSc4/RedPajama-INCITE-Chat-3B-v1-FT-LoRA-8bit-test1",
|
| 1007 |
-
"cerebras/Cerebras-GPT-13B",
|
| 1008 |
-
"EleutherAI/pythia-6.7b",
|
| 1009 |
-
"aisquared/chopt-2_7b",
|
| 1010 |
-
"Azure99/blossom-v1-3b",
|
| 1011 |
-
"PSanni/Deer-3b",
|
| 1012 |
-
"bertin-project/bertin-gpt-j-6B-alpaca",
|
| 1013 |
-
"OpenBuddy/openbuddy-openllama-3b-v10-bf16",
|
| 1014 |
-
"KoboldAI/fairseq-dense-2.7B",
|
| 1015 |
-
"ehartford/CodeLlama-34b-Instruct-hf",
|
| 1016 |
-
"codellama/CodeLlama-34b-Instruct-hf",
|
| 1017 |
-
"TheBloke/CodeLlama-34B-Instruct-fp16",
|
| 1018 |
-
"h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt-v2",
|
| 1019 |
-
"openlm-research/open_llama_7b_700bt_preview",
|
| 1020 |
-
"NbAiLab/nb-gpt-j-6B-alpaca",
|
| 1021 |
-
"KoboldAI/OPT-2.7B-Erebus",
|
| 1022 |
-
"Writer/camel-5b-hf",
|
| 1023 |
-
"EleutherAI/pythia-2.7b",
|
| 1024 |
-
"facebook/xglm-7.5B",
|
| 1025 |
-
"EleutherAI/pythia-2.8b-deduped",
|
| 1026 |
-
"klosax/open_llama_3b_350bt_preview",
|
| 1027 |
-
"klosax/openllama-3b-350bt",
|
| 1028 |
-
"KoboldAI/OPT-2.7B-Nerybus-Mix",
|
| 1029 |
-
"KoboldAI/GPT-J-6B-Adventure",
|
| 1030 |
-
"cerebras/Cerebras-GPT-6.7B",
|
| 1031 |
-
"TFLai/pythia-2.8b-4bit-alpaca",
|
| 1032 |
-
"facebook/opt-2.7b",
|
| 1033 |
-
"KoboldAI/OPT-2.7B-Nerys-v2",
|
| 1034 |
-
"bigscience/bloom-3b",
|
| 1035 |
-
"Devio/test100",
|
| 1036 |
-
"RWKV/rwkv-raven-3b",
|
| 1037 |
-
"Azure99/blossom-v2-3b",
|
| 1038 |
-
"codellama/CodeLlama-34b-Python-hf",
|
| 1039 |
-
"bhenrym14/airoboros-33b-gpt4-1.4.1-PI-8192-fp16",
|
| 1040 |
-
"EleutherAI/gpt-neo-2.7B",
|
| 1041 |
-
"danielhanchen/open_llama_3b_600bt_preview",
|
| 1042 |
-
"HuggingFaceH4/starchat-alpha",
|
| 1043 |
-
"pythainlp/wangchanglm-7.5B-sft-en-sharded",
|
| 1044 |
-
"beaugogh/pythia-1.4b-deduped-sharegpt",
|
| 1045 |
-
"HWERI/pythia-1.4b-deduped-sharegpt",
|
| 1046 |
-
"OpenAssistant/stablelm-7b-sft-v7-epoch-3",
|
| 1047 |
-
"codellama/CodeLlama-7b-Python-hf",
|
| 1048 |
-
"aisquared/chopt-1_3b",
|
| 1049 |
-
"PygmalionAI/metharme-1.3b",
|
| 1050 |
-
"Linly-AI/Chinese-LLaMA-2-13B-hf",
|
| 1051 |
-
"chargoddard/llama-2-34b-uncode",
|
| 1052 |
-
"RWKV/rwkv-4-3b-pile",
|
| 1053 |
-
"pythainlp/wangchanglm-7.5B-sft-enth",
|
| 1054 |
-
"MBZUAI/LaMini-GPT-1.5B",
|
| 1055 |
-
"Writer/palmyra-base",
|
| 1056 |
-
"KoboldAI/fairseq-dense-1.3B",
|
| 1057 |
-
"EleutherAI/pythia-1.4b-deduped",
|
| 1058 |
-
"MBZUAI/lamini-neo-1.3b",
|
| 1059 |
-
"h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt",
|
| 1060 |
-
"sartmis1/starcoder-finetune-openapi",
|
| 1061 |
-
"MayaPH/opt-flan-iml-6.7b",
|
| 1062 |
-
"facebook/xglm-4.5B",
|
| 1063 |
-
"WizardLM/WizardCoder-15B-V1.0",
|
| 1064 |
-
"facebook/opt-iml-max-1.3b",
|
| 1065 |
-
"stabilityai/stablelm-tuned-alpha-7b",
|
| 1066 |
-
"aisquared/dlite-v2-1_5b",
|
| 1067 |
-
"stabilityai/stablelm-base-alpha-7b",
|
| 1068 |
-
"sartmis1/starcoder-finetune-selfinstruct",
|
| 1069 |
-
"lizhuang144/starcoder_mirror",
|
| 1070 |
-
"bigcode/starcoder",
|
| 1071 |
-
"TheBloke/CodeLlama-34B-Python-fp16",
|
| 1072 |
-
"open-llm-leaderboard/bloomz-1b7-4bit-alpaca-auto-eval-adapter-applied",
|
| 1073 |
-
"ehartford/CodeLlama-34b-Python-hf",
|
| 1074 |
-
"codellama/CodeLlama-7b-Python-hf",
|
| 1075 |
-
"GeorgiaTechResearchInstitute/starcoder-gpteacher-code-instruct",
|
| 1076 |
-
"LoupGarou/WizardCoder-Guanaco-15B-V1.0",
|
| 1077 |
-
"golaxy/gogpt-3b-bloom",
|
| 1078 |
-
"EleutherAI/pythia-1.3b",
|
| 1079 |
-
"codellama/CodeLlama-13b-Python-hf",
|
| 1080 |
-
"hakurei/lotus-12B",
|
| 1081 |
-
"NYTK/PULI-GPTrio",
|
| 1082 |
-
"facebook/opt-1.3b",
|
| 1083 |
-
"TheBloke/CodeLlama-13B-Python-fp16",
|
| 1084 |
-
"codellama/CodeLlama-13b-Python-hf",
|
| 1085 |
-
"RWKV/rwkv-raven-1b5",
|
| 1086 |
-
"PygmalionAI/pygmalion-2.7b",
|
| 1087 |
-
"bigscience/bloom-1b7",
|
| 1088 |
-
"gpt2-xl",
|
| 1089 |
-
"LoupGarou/WizardCoder-Guanaco-15B-V1.1",
|
| 1090 |
-
"RWKV/rwkv-4-1b5-pile",
|
| 1091 |
-
"codellama/CodeLlama-34b-hf",
|
| 1092 |
-
"NousResearch/CodeLlama-34b-hf",
|
| 1093 |
-
"rinna/bilingual-gpt-neox-4b-8k",
|
| 1094 |
-
"lxe/Cerebras-GPT-2.7B-Alpaca-SP",
|
| 1095 |
-
"cerebras/Cerebras-GPT-2.7B",
|
| 1096 |
-
"jzjiao/opt-1.3b-rlhf",
|
| 1097 |
-
"EleutherAI/gpt-neo-1.3B",
|
| 1098 |
-
"aisquared/dlite-v1-1_5b",
|
| 1099 |
-
"Corianas/Quokka_2.7b",
|
| 1100 |
-
"MrNJK/gpt2-xl-sft",
|
| 1101 |
-
"facebook/galactica-1.3b",
|
| 1102 |
-
"aisquared/dlite-v2-774m",
|
| 1103 |
-
"EleutherAI/pythia-1b-deduped",
|
| 1104 |
-
"Kunhao/pile-7b-250b-tokens",
|
| 1105 |
-
"w601sxs/b1ade-1b",
|
| 1106 |
-
"rinna/bilingual-gpt-neox-4b",
|
| 1107 |
-
"shaohang/SparseOPT-1.3B",
|
| 1108 |
-
"shaohang/Sparse0.5_OPT-1.3",
|
| 1109 |
-
"EleutherAI/polyglot-ko-12.8b",
|
| 1110 |
-
"Salesforce/codegen-6B-multi",
|
| 1111 |
-
"bigscience/bloom-1b1",
|
| 1112 |
-
"TFLai/gpt-neo-1.3B-4bit-alpaca",
|
| 1113 |
-
"FabbriSimo01/Bloom_1b_Quantized",
|
| 1114 |
-
"MBZUAI/LaMini-GPT-774M",
|
| 1115 |
-
"Locutusque/gpt2-large-conversational",
|
| 1116 |
-
"Devio/test-3b",
|
| 1117 |
-
"stabilityai/stablelm-tuned-alpha-3b",
|
| 1118 |
-
"PygmalionAI/pygmalion-1.3b",
|
| 1119 |
-
"KoboldAI/fairseq-dense-355M",
|
| 1120 |
-
"Rachneet/gpt2-xl-alpaca",
|
| 1121 |
-
"gpt2-large",
|
| 1122 |
-
"Mikivis/gpt2-large-lora-sft",
|
| 1123 |
-
"stabilityai/stablelm-base-alpha-3b",
|
| 1124 |
-
"gpt2-medium",
|
| 1125 |
-
"Kunhao/pile-7b",
|
| 1126 |
-
"aisquared/dlite-v1-774m",
|
| 1127 |
-
"aisquared/dlite-v2-355m",
|
| 1128 |
-
"YeungNLP/firefly-bloom-2b6-v2",
|
| 1129 |
-
"KnutJaegersberg/gpt-2-xl-EvolInstruct",
|
| 1130 |
-
"KnutJaegersberg/galactica-orca-wizardlm-1.3b",
|
| 1131 |
-
"cerebras/Cerebras-GPT-1.3B",
|
| 1132 |
-
"FabbriSimo01/Cerebras_1.3b_Quantized",
|
| 1133 |
-
"facebook/xglm-1.7B",
|
| 1134 |
-
"EleutherAI/pythia-410m-deduped",
|
| 1135 |
-
"TheBloke/GPlatty-30B-SuperHOT-8K-fp16",
|
| 1136 |
-
"DataLinguistic/DataLinguistic-34B-V1.0",
|
| 1137 |
-
"Corianas/Quokka_1.3b",
|
| 1138 |
-
"TheTravellingEngineer/bloom-560m-RLHF-v2",
|
| 1139 |
-
"Corianas/1.3b",
|
| 1140 |
-
"RWKV/rwkv-4-430m-pile",
|
| 1141 |
-
"porkorbeef/Llama-2-13b-sf",
|
| 1142 |
-
"xhyi/PT_GPTNEO350_ATG",
|
| 1143 |
-
"TheBloke/Wizard-Vicuna-13B-Uncensored-GPTQ",
|
| 1144 |
-
"bigscience/bloomz-560m",
|
| 1145 |
-
"TheBloke/medalpaca-13B-GPTQ-4bit",
|
| 1146 |
-
"TheBloke/Vicuna-33B-1-3-SuperHOT-8K-fp16",
|
| 1147 |
-
"aisquared/dlite-v1-355m",
|
| 1148 |
-
"uukuguy/speechless-codellama-orca-airoboros-13b-0.10e",
|
| 1149 |
-
"yhyhy3/med-orca-instruct-33b",
|
| 1150 |
-
"TheBloke/Wizard-Vicuna-30B-Superhot-8K-fp16",
|
| 1151 |
-
"TheTravellingEngineer/bloom-1b1-RLHF",
|
| 1152 |
-
"MBZUAI/lamini-cerebras-1.3b",
|
| 1153 |
-
"IDEA-CCNL/Ziya-LLaMA-13B-Pretrain-v1",
|
| 1154 |
-
"TheBloke/WizardLM-7B-uncensored-GPTQ",
|
| 1155 |
-
"TheBloke/EverythingLM-13B-16K-GPTQ",
|
| 1156 |
-
"quantumaikr/open_llama_7b_hf",
|
| 1157 |
-
"TheBloke/chronos-wizardlm-uc-scot-st-13B-GPTQ",
|
| 1158 |
-
"TheBloke/WizardLM-30B-Uncensored-GPTQ",
|
| 1159 |
-
"IDEA-CCNL/Ziya-LLaMA-13B-v1",
|
| 1160 |
-
"Phind/Phind-CodeLlama-34B-v1",
|
| 1161 |
-
"robowaifudev/megatron-gpt2-345m",
|
| 1162 |
-
"MayaPH/GodziLLa-30B-instruct",
|
| 1163 |
-
"TheBloke/CAMEL-33B-Combined-Data-SuperHOT-8K-fp16",
|
| 1164 |
-
"uukuguy/speechless-codellama-orca-platypus-13b-0.10e",
|
| 1165 |
-
"doas/test2",
|
| 1166 |
-
"BreadAi/PM_modelV2",
|
| 1167 |
-
"bigcode/santacoder",
|
| 1168 |
-
"TheBloke/wizard-vicuna-13B-GPTQ",
|
| 1169 |
-
"porkorbeef/Llama-2-13b",
|
| 1170 |
-
"TehVenom/DiffMerge-DollyGPT-Pygmalion",
|
| 1171 |
-
"PygmalionAI/pygmalion-350m",
|
| 1172 |
-
"TheBloke/orca_mini_v3_7B-GPTQ",
|
| 1173 |
-
"TheBloke/WizardLM-Uncensored-SuperCOT-StoryTelling-30B-GPTQ",
|
| 1174 |
-
"TheBloke/WizardLM-30B-GPTQ",
|
| 1175 |
-
"bigscience/bloom-560m",
|
| 1176 |
-
"TFLai/gpt2-turkish-uncased",
|
| 1177 |
-
"TheBloke/guanaco-33B-GPTQ",
|
| 1178 |
-
"TheBloke/openchat_v2_openorca_preview-GPTQ",
|
| 1179 |
-
"porkorbeef/Llama-2-13b-public",
|
| 1180 |
-
"TheBloke/LongChat-13B-GPTQ",
|
| 1181 |
-
"yhyhy3/med-orca-instruct-33b",
|
| 1182 |
-
"TheBloke/airoboros-33B-gpt4-1-4-SuperHOT-8K-fp16",
|
| 1183 |
-
"TheBloke/Chinese-Alpaca-33B-SuperHOT-8K-fp16",
|
| 1184 |
-
"MayaPH/FinOPT-Franklin",
|
| 1185 |
-
"TheBloke/WizardLM-33B-V1.0-Uncensored-GPTQ",
|
| 1186 |
-
"TheBloke/Project-Baize-v2-13B-GPTQ",
|
| 1187 |
-
"malhajar/Platypus2-70B-instruct-4bit-gptq",
|
| 1188 |
-
"KoboldAI/OPT-350M-Erebus",
|
| 1189 |
-
"rishiraj/bloom-560m-guanaco",
|
| 1190 |
-
"Panchovix/WizardLM-33B-V1.0-Uncensored-SuperHOT-8k",
|
| 1191 |
-
"doas/test5",
|
| 1192 |
-
"vicgalle/alpaca-7b",
|
| 1193 |
-
"beomi/KoAlpaca-Polyglot-5.8B",
|
| 1194 |
-
"Phind/Phind-CodeLlama-34B-Python-v1",
|
| 1195 |
-
"timdettmers/guanaco-65b-merged",
|
| 1196 |
-
"TheBloke/wizard-mega-13B-GPTQ",
|
| 1197 |
-
"MayaPH/GodziLLa-30B-plus",
|
| 1198 |
-
"TheBloke/Platypus-30B-SuperHOT-8K-fp16",
|
| 1199 |
-
"facebook/opt-350m",
|
| 1200 |
-
"KoboldAI/OPT-350M-Nerys-v2",
|
| 1201 |
-
"TheBloke/robin-33B-v2-GPTQ",
|
| 1202 |
-
"jaspercatapang/Echidna-30B",
|
| 1203 |
-
"TheBloke/llama-30b-supercot-SuperHOT-8K-fp16",
|
| 1204 |
-
"marcchew/test1",
|
| 1205 |
-
"Harshvir/LaMini-Neo-1.3B-Mental-Health_lora",
|
| 1206 |
-
"golaxy/gogpt-560m",
|
| 1207 |
-
"TheBloke/orca_mini_13B-GPTQ",
|
| 1208 |
-
"Panchovix/airoboros-33b-gpt4-1.2-SuperHOT-8k",
|
| 1209 |
-
"Aspik101/tulu-7b-instruct-pl-lora_unload",
|
| 1210 |
-
"Phind/Phind-CodeLlama-34B-v2",
|
| 1211 |
-
"BreadAi/MusePy-1-2",
|
| 1212 |
-
"cerebras/Cerebras-GPT-590M",
|
| 1213 |
-
"microsoft/CodeGPT-small-py",
|
| 1214 |
-
"victor123/WizardLM-13B-1.0",
|
| 1215 |
-
"OptimalScale/robin-65b-v2-delta",
|
| 1216 |
-
"voidful/changpt-bart",
|
| 1217 |
-
"FabbriSimo01/GPT_Large_Quantized",
|
| 1218 |
-
"MayaPH/FinOPT-Lincoln",
|
| 1219 |
-
"KoboldAI/fairseq-dense-125M",
|
| 1220 |
-
"SebastianSchramm/Cerebras-GPT-111M-instruction",
|
| 1221 |
-
"TheTravellingEngineer/bloom-560m-RLHF",
|
| 1222 |
-
"breadlicker45/dough-instruct-base-001",
|
| 1223 |
-
"WizardLM/WizardLM-30B-V1.0",
|
| 1224 |
-
"WizardLM/WizardLM-30B-V1.0",
|
| 1225 |
-
"WizardLM/WizardLM-30B-V1.0",
|
| 1226 |
-
"TaylorAI/Flash-Llama-30M-20001",
|
| 1227 |
-
"porkorbeef/Llama-2-13b-12_153950",
|
| 1228 |
-
"huggingtweets/bladeecity-jerma985",
|
| 1229 |
-
"KnutJaegersberg/megatron-GPT-2-345m-EvolInstruct",
|
| 1230 |
-
"bhenrym14/airoboros-33b-gpt4-1.4.1-lxctx-PI-16384-fp16",
|
| 1231 |
-
"microsoft/DialoGPT-small",
|
| 1232 |
-
"Corianas/590m",
|
| 1233 |
-
"facebook/xglm-564M",
|
| 1234 |
-
"EleutherAI/gpt-neo-125m",
|
| 1235 |
-
"EleutherAI/pythia-160m-deduped",
|
| 1236 |
-
"klosax/pythia-160m-deduped-step92k-193bt",
|
| 1237 |
-
"MBZUAI/lamini-neo-125m",
|
| 1238 |
-
"bigcode/tiny_starcoder_py",
|
| 1239 |
-
"concedo/OPT-19M-ChatSalad",
|
| 1240 |
-
"anton-l/gpt-j-tiny-random",
|
| 1241 |
-
"grantprice/Cerebras-GPT-590M-finetuned-DND",
|
| 1242 |
-
"deepnight-research/zsc-text",
|
| 1243 |
-
"WangZeJun/bloom-820m-chat",
|
| 1244 |
-
"cerebras/Cerebras-GPT-256M",
|
| 1245 |
-
"ai-forever/rugpt3large_based_on_gpt2",
|
| 1246 |
-
"alibidaran/medical_transcription_generator",
|
| 1247 |
-
"Deci/DeciCoder-1b",
|
| 1248 |
-
"microsoft/DialoGPT-medium",
|
| 1249 |
-
"ogimgio/gpt-neo-125m-neurallinguisticpioneers",
|
| 1250 |
-
"open-llm-leaderboard/bloom-560m-4bit-alpaca-auto-eval-adapter-applied",
|
| 1251 |
-
"BreadAi/gpt-YA-1-1_160M",
|
| 1252 |
-
"microsoft/DialoGPT-large",
|
| 1253 |
-
"facebook/opt-125m",
|
| 1254 |
-
"huggingtweets/jerma985",
|
| 1255 |
-
"Locutusque/gpt2-conversational-or-qa",
|
| 1256 |
-
"concedo/Pythia-70M-ChatSalad",
|
| 1257 |
-
"roneneldan/TinyStories-1M",
|
| 1258 |
-
"BreadAi/DiscordPy",
|
| 1259 |
-
"bigcode/gpt_bigcode-santacoder",
|
| 1260 |
-
"Tincando/fiction_story_generator",
|
| 1261 |
-
"klosax/pythia-70m-deduped-step44k-92bt",
|
| 1262 |
-
"Quake24/easyTermsSummerizer",
|
| 1263 |
-
"BreadAi/gpt-YA-1-1_70M",
|
| 1264 |
-
"EleutherAI/pythia-160m",
|
| 1265 |
-
"euclaise/gpt-neox-122m-minipile-digits",
|
| 1266 |
-
"MBZUAI/lamini-cerebras-590m",
|
| 1267 |
-
"nicholasKluge/Aira-124M",
|
| 1268 |
-
"MayaPH/FinOPT-Washington",
|
| 1269 |
-
"cyberagent/open-calm-large",
|
| 1270 |
-
"BreadAi/StoryPy",
|
| 1271 |
-
"EleutherAI/pythia-70m",
|
| 1272 |
-
"BreadAi/gpt-Youtube",
|
| 1273 |
-
"roneneldan/TinyStories-33M",
|
| 1274 |
-
"EleutherAI/pythia-70m-deduped",
|
| 1275 |
-
"lgaalves/gpt2_guanaco-dolly-platypus",
|
| 1276 |
-
"Corianas/Quokka_590m",
|
| 1277 |
-
"lgaalves/gpt2_platypus-dolly-guanaco",
|
| 1278 |
-
"cyberagent/open-calm-7b",
|
| 1279 |
-
"RWKV/rwkv-4-169m-pile",
|
| 1280 |
-
"gpt2",
|
| 1281 |
-
"roneneldan/TinyStories-28M",
|
| 1282 |
-
"lgaalves/gpt2_open-platypus",
|
| 1283 |
-
"gpt2",
|
| 1284 |
-
"SaylorTwift/gpt2_test",
|
| 1285 |
-
"roneneldan/TinyStories-3M",
|
| 1286 |
-
"nthngdy/pythia-owt2-70m-50k",
|
| 1287 |
-
"Corianas/256_5epoch",
|
| 1288 |
-
"roneneldan/TinyStories-8M",
|
| 1289 |
-
"lgaalves/gpt2-dolly",
|
| 1290 |
-
"nthngdy/pythia-owt2-70m-100k",
|
| 1291 |
-
"aisquared/dlite-v2-124m",
|
| 1292 |
-
"mncai/SGPT-1.3B-insurance-epoch10",
|
| 1293 |
-
"huggingtweets/gladosystem",
|
| 1294 |
-
"abhiramtirumala/DialoGPT-sarcastic-medium",
|
| 1295 |
-
"MBZUAI/lamini-cerebras-256m",
|
| 1296 |
-
"cerebras/Cerebras-GPT-111M",
|
| 1297 |
-
"uberkie/metharme-1.3b-finetuned",
|
| 1298 |
-
"MBZUAI/lamini-cerebras-111m",
|
| 1299 |
-
"psyche/kogpt",
|
| 1300 |
-
"Corianas/Quokka_256m",
|
| 1301 |
-
"vicgalle/gpt2-alpaca-gpt4",
|
| 1302 |
-
"aisquared/dlite-v1-124m",
|
| 1303 |
-
"Mikivis/xuanxuan",
|
| 1304 |
-
"MBZUAI/LaMini-GPT-124M",
|
| 1305 |
-
"vicgalle/gpt2-alpaca",
|
| 1306 |
-
"huashiyiqike/testmodel",
|
| 1307 |
-
"Corianas/111m",
|
| 1308 |
-
"baseline",
|
| 1309 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/tools/plots.py
DELETED
|
@@ -1,158 +0,0 @@
|
|
| 1 |
-
import numpy as np
|
| 2 |
-
import pandas as pd
|
| 3 |
-
import plotly.express as px
|
| 4 |
-
from plotly.graph_objs import Figure
|
| 5 |
-
|
| 6 |
-
from src.display.utils import AutoEvalColumn, Task, Tasks
|
| 7 |
-
from src.display.utils import human_baseline_row as HUMAN_BASELINE
|
| 8 |
-
from src.leaderboard.filter_models import FLAGGED_MODELS
|
| 9 |
-
from src.leaderboard.read_evals import EvalResult
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
def create_scores_df(raw_data: list[EvalResult]) -> pd.DataFrame:
|
| 13 |
-
"""
|
| 14 |
-
Generates a DataFrame containing the maximum scores until each date.
|
| 15 |
-
|
| 16 |
-
:param results_df: A DataFrame containing result information including metric scores and dates.
|
| 17 |
-
:return: A new DataFrame containing the maximum scores until each date for every metric.
|
| 18 |
-
"""
|
| 19 |
-
# Step 1: Ensure 'date' is in datetime format and sort the DataFrame by it
|
| 20 |
-
results_df = pd.DataFrame(raw_data)
|
| 21 |
-
# results_df["date"] = pd.to_datetime(results_df["date"], format="mixed", utc=True)
|
| 22 |
-
results_df.sort_values(by="date", inplace=True)
|
| 23 |
-
|
| 24 |
-
# Step 2: Initialize the scores dictionary
|
| 25 |
-
scores = {k: [] for k in BENCHMARK_COLS + [AutoEvalColumn.average.name]}
|
| 26 |
-
|
| 27 |
-
# Step 3: Iterate over the rows of the DataFrame and update the scores dictionary
|
| 28 |
-
for task in [t.value for t in Tasks] + [Task("Average", "avg", AutoEvalColumn.average.name)]:
|
| 29 |
-
current_max = 0
|
| 30 |
-
last_date = ""
|
| 31 |
-
column = task.col_name
|
| 32 |
-
for _, row in results_df.iterrows():
|
| 33 |
-
current_model = row["full_model"]
|
| 34 |
-
# We ignore models that are flagged/no longer on the hub/not finished
|
| 35 |
-
to_ignore = (
|
| 36 |
-
not row["still_on_hub"]
|
| 37 |
-
or not row["not_flagged"]
|
| 38 |
-
or current_model in FLAGGED_MODELS
|
| 39 |
-
or row["status"] != "FINISHED"
|
| 40 |
-
)
|
| 41 |
-
if to_ignore:
|
| 42 |
-
continue
|
| 43 |
-
|
| 44 |
-
current_date = row["date"]
|
| 45 |
-
if task.benchmark == "Average":
|
| 46 |
-
current_score = np.mean(list(row["results"].values()))
|
| 47 |
-
else:
|
| 48 |
-
current_score = row["results"][task.benchmark]
|
| 49 |
-
|
| 50 |
-
if current_score > current_max:
|
| 51 |
-
if current_date == last_date and len(scores[column]) > 0:
|
| 52 |
-
scores[column][-1] = {"model": current_model, "date": current_date, "score": current_score}
|
| 53 |
-
else:
|
| 54 |
-
scores[column].append({"model": current_model, "date": current_date, "score": current_score})
|
| 55 |
-
current_max = current_score
|
| 56 |
-
last_date = current_date
|
| 57 |
-
|
| 58 |
-
# Step 4: Return all dictionaries as DataFrames
|
| 59 |
-
return {k: pd.DataFrame(v) for k, v in scores.items()}
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
def create_plot_df(scores_df: dict[str : pd.DataFrame]) -> pd.DataFrame:
|
| 63 |
-
"""
|
| 64 |
-
Transforms the scores DataFrame into a new format suitable for plotting.
|
| 65 |
-
|
| 66 |
-
:param scores_df: A DataFrame containing metric scores and dates.
|
| 67 |
-
:return: A new DataFrame reshaped for plotting purposes.
|
| 68 |
-
"""
|
| 69 |
-
# Initialize the list to store DataFrames
|
| 70 |
-
dfs = []
|
| 71 |
-
# Iterate over the cols and create a new DataFrame for each column
|
| 72 |
-
for col in BENCHMARK_COLS + [AutoEvalColumn.average.name]:
|
| 73 |
-
d = scores_df[col].reset_index(drop=True)
|
| 74 |
-
d["task"] = col
|
| 75 |
-
dfs.append(d)
|
| 76 |
-
|
| 77 |
-
# Concatenate all the created DataFrames
|
| 78 |
-
concat_df = pd.concat(dfs, ignore_index=True)
|
| 79 |
-
|
| 80 |
-
# Sort values by 'date'
|
| 81 |
-
concat_df.sort_values(by="date", inplace=True)
|
| 82 |
-
concat_df.reset_index(drop=True, inplace=True)
|
| 83 |
-
return concat_df
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
def create_metric_plot_obj(df: pd.DataFrame, metrics: list[str], title: str) -> Figure:
|
| 87 |
-
"""
|
| 88 |
-
Create a Plotly figure object with lines representing different metrics
|
| 89 |
-
and horizontal dotted lines representing human baselines.
|
| 90 |
-
|
| 91 |
-
:param df: The DataFrame containing the metric values, names, and dates.
|
| 92 |
-
:param metrics: A list of strings representing the names of the metrics
|
| 93 |
-
to be included in the plot.
|
| 94 |
-
:param title: A string representing the title of the plot.
|
| 95 |
-
:return: A Plotly figure object with lines representing metrics and
|
| 96 |
-
horizontal dotted lines representing human baselines.
|
| 97 |
-
"""
|
| 98 |
-
|
| 99 |
-
# Filter the DataFrame based on the specified metrics
|
| 100 |
-
df = df[df["task"].isin(metrics)]
|
| 101 |
-
|
| 102 |
-
# Filter the human baselines based on the specified metrics
|
| 103 |
-
filtered_human_baselines = {k: v for k, v in HUMAN_BASELINE.items() if k in metrics}
|
| 104 |
-
|
| 105 |
-
# Create a line figure using plotly express with specified markers and custom data
|
| 106 |
-
fig = px.line(
|
| 107 |
-
df,
|
| 108 |
-
x="date",
|
| 109 |
-
y="score",
|
| 110 |
-
color="task",
|
| 111 |
-
markers=True,
|
| 112 |
-
custom_data=["task", "score", "model"],
|
| 113 |
-
title=title,
|
| 114 |
-
)
|
| 115 |
-
|
| 116 |
-
# Update hovertemplate for better hover interaction experience
|
| 117 |
-
fig.update_traces(
|
| 118 |
-
hovertemplate="<br>".join(
|
| 119 |
-
[
|
| 120 |
-
"Model Name: %{customdata[2]}",
|
| 121 |
-
"Metric Name: %{customdata[0]}",
|
| 122 |
-
"Date: %{x}",
|
| 123 |
-
"Metric Value: %{y}",
|
| 124 |
-
]
|
| 125 |
-
)
|
| 126 |
-
)
|
| 127 |
-
|
| 128 |
-
# Update the range of the y-axis
|
| 129 |
-
fig.update_layout(yaxis_range=[0, 100])
|
| 130 |
-
|
| 131 |
-
# Create a dictionary to hold the color mapping for each metric
|
| 132 |
-
metric_color_mapping = {}
|
| 133 |
-
|
| 134 |
-
# Map each metric name to its color in the figure
|
| 135 |
-
for trace in fig.data:
|
| 136 |
-
metric_color_mapping[trace.name] = trace.line.color
|
| 137 |
-
|
| 138 |
-
# Iterate over filtered human baselines and add horizontal lines to the figure
|
| 139 |
-
for metric, value in filtered_human_baselines.items():
|
| 140 |
-
color = metric_color_mapping.get(metric, "blue") # Retrieve color from mapping; default to blue if not found
|
| 141 |
-
location = "top left" if metric == "HellaSwag" else "bottom left" # Set annotation position
|
| 142 |
-
# Add horizontal line with matched color and positioned annotation
|
| 143 |
-
fig.add_hline(
|
| 144 |
-
y=value,
|
| 145 |
-
line_dash="dot",
|
| 146 |
-
annotation_text=f"{metric} human baseline",
|
| 147 |
-
annotation_position=location,
|
| 148 |
-
annotation_font_size=10,
|
| 149 |
-
annotation_font_color=color,
|
| 150 |
-
line_color=color,
|
| 151 |
-
)
|
| 152 |
-
|
| 153 |
-
return fig
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
# Example Usage:
|
| 157 |
-
# human_baselines dictionary is defined.
|
| 158 |
-
# chart = create_metric_plot_obj(scores_df, ["ARC", "HellaSwag", "MMLU", "TruthfulQA"], human_baselines, "Graph Title")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
style.css
DELETED
|
@@ -1,28 +0,0 @@
|
|
| 1 |
-
body {
|
| 2 |
-
padding: 2rem;
|
| 3 |
-
font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
|
| 4 |
-
}
|
| 5 |
-
|
| 6 |
-
h1 {
|
| 7 |
-
font-size: 16px;
|
| 8 |
-
margin-top: 0;
|
| 9 |
-
}
|
| 10 |
-
|
| 11 |
-
p {
|
| 12 |
-
color: rgb(107, 114, 128);
|
| 13 |
-
font-size: 15px;
|
| 14 |
-
margin-bottom: 10px;
|
| 15 |
-
margin-top: 5px;
|
| 16 |
-
}
|
| 17 |
-
|
| 18 |
-
.card {
|
| 19 |
-
max-width: 620px;
|
| 20 |
-
margin: 0 auto;
|
| 21 |
-
padding: 16px;
|
| 22 |
-
border: 1px solid lightgray;
|
| 23 |
-
border-radius: 16px;
|
| 24 |
-
}
|
| 25 |
-
|
| 26 |
-
.card p:last-child {
|
| 27 |
-
margin-bottom: 0;
|
| 28 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
uv.lock
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|