Text Generation
Transformers
Safetensors
qwen2
llama-factory
full
Generated from Trainer
conversational
text-generation-inference
Instructions to use pepoo20/WordProblem with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use pepoo20/WordProblem with Transformers:
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="pepoo20/WordProblem") messages = [ {"role": "user", "content": "Who are you?"}, ] pipe(messages)# Load model directly from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("pepoo20/WordProblem") model = AutoModelForCausalLM.from_pretrained("pepoo20/WordProblem") messages = [ {"role": "user", "content": "Who are you?"}, ] inputs = tokenizer.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", ).to(model.device) outputs = model.generate(**inputs, max_new_tokens=40) print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:])) - Inference
- Notebooks
- Google Colab
- Kaggle
- Local Apps
- vLLM
How to use pepoo20/WordProblem with vLLM:
Install from pip and serve model
# Install vLLM from pip: pip install vllm # Start the vLLM server: vllm serve "pepoo20/WordProblem" # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:8000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "pepoo20/WordProblem", "messages": [ { "role": "user", "content": "What is the capital of France?" } ] }'Use Docker
docker model run hf.co/pepoo20/WordProblem
- SGLang
How to use pepoo20/WordProblem with SGLang:
Install from pip and serve model
# Install SGLang from pip: pip install sglang # Start the SGLang server: python3 -m sglang.launch_server \ --model-path "pepoo20/WordProblem" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "pepoo20/WordProblem", "messages": [ { "role": "user", "content": "What is the capital of France?" } ] }'Use Docker images
docker run --gpus all \ --shm-size 32g \ -p 30000:30000 \ -v ~/.cache/huggingface:/root/.cache/huggingface \ --env "HF_TOKEN=<secret>" \ --ipc=host \ lmsysorg/sglang:latest \ python3 -m sglang.launch_server \ --model-path "pepoo20/WordProblem" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "pepoo20/WordProblem", "messages": [ { "role": "user", "content": "What is the capital of France?" } ] }' - Docker Model Runner
How to use pepoo20/WordProblem with Docker Model Runner:
docker model run hf.co/pepoo20/WordProblem
| { | |
| "best_metric": 0.1676628440618515, | |
| "best_model_checkpoint": "saves/Qwen1.5-1.8B/WordProblem/checkpoint-9000", | |
| "epoch": 0.9999725884707108, | |
| "eval_steps": 1500, | |
| "global_step": 9120, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03289383514706285, | |
| "grad_norm": 4.8125, | |
| "learning_rate": 3e-05, | |
| "loss": 0.7143, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.0657876702941257, | |
| "grad_norm": 3.5, | |
| "learning_rate": 4.998339850669331e-05, | |
| "loss": 0.2219, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.09868150544118856, | |
| "grad_norm": 4.40625, | |
| "learning_rate": 4.9734816848192624e-05, | |
| "loss": 0.2074, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.1315753405882514, | |
| "grad_norm": 4.03125, | |
| "learning_rate": 4.9190839785031474e-05, | |
| "loss": 0.1906, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.16446917573531428, | |
| "grad_norm": 3.78125, | |
| "learning_rate": 4.835796376008569e-05, | |
| "loss": 0.1923, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.16446917573531428, | |
| "eval_loss": 0.1849033087491989, | |
| "eval_runtime": 79.6037, | |
| "eval_samples_per_second": 37.423, | |
| "eval_steps_per_second": 9.359, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.19736301088237712, | |
| "grad_norm": 4.15625, | |
| "learning_rate": 4.7246135390382216e-05, | |
| "loss": 0.1839, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.23025684602944, | |
| "grad_norm": 4.5, | |
| "learning_rate": 4.586863267968384e-05, | |
| "loss": 0.1938, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.2631506811765028, | |
| "grad_norm": 3.1875, | |
| "learning_rate": 4.4241906446007296e-05, | |
| "loss": 0.1863, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.2960445163235657, | |
| "grad_norm": 2.71875, | |
| "learning_rate": 4.238538385782601e-05, | |
| "loss": 0.1797, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.32893835147062855, | |
| "grad_norm": 3.65625, | |
| "learning_rate": 4.032123642522486e-05, | |
| "loss": 0.176, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.32893835147062855, | |
| "eval_loss": 0.1760552078485489, | |
| "eval_runtime": 79.6333, | |
| "eval_samples_per_second": 37.409, | |
| "eval_steps_per_second": 9.355, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.3618321866176914, | |
| "grad_norm": 4.0, | |
| "learning_rate": 3.8074115216771435e-05, | |
| "loss": 0.1791, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 0.39472602176475424, | |
| "grad_norm": 3.90625, | |
| "learning_rate": 3.567085646427478e-05, | |
| "loss": 0.1808, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.4276198569118171, | |
| "grad_norm": 3.421875, | |
| "learning_rate": 3.3140161071244915e-05, | |
| "loss": 0.1805, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 0.46051369205888, | |
| "grad_norm": 2.640625, | |
| "learning_rate": 3.05122518525215e-05, | |
| "loss": 0.1738, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 0.49340752720594283, | |
| "grad_norm": 4.5, | |
| "learning_rate": 2.781851259848554e-05, | |
| "loss": 0.1736, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.49340752720594283, | |
| "eval_loss": 0.17090687155723572, | |
| "eval_runtime": 79.6329, | |
| "eval_samples_per_second": 37.409, | |
| "eval_steps_per_second": 9.355, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.5263013623530056, | |
| "grad_norm": 3.578125, | |
| "learning_rate": 2.509111327432736e-05, | |
| "loss": 0.1709, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 0.5591951975000685, | |
| "grad_norm": 3.515625, | |
| "learning_rate": 2.236262583042668e-05, | |
| "loss": 0.1775, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 0.5920890326471314, | |
| "grad_norm": 4.9375, | |
| "learning_rate": 1.966563521202681e-05, | |
| "loss": 0.1759, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 0.6249828677941942, | |
| "grad_norm": 3.640625, | |
| "learning_rate": 1.7032350213717874e-05, | |
| "loss": 0.1754, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 0.6578767029412571, | |
| "grad_norm": 3.578125, | |
| "learning_rate": 1.4494218826096939e-05, | |
| "loss": 0.1688, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.6578767029412571, | |
| "eval_loss": 0.16823573410511017, | |
| "eval_runtime": 79.6163, | |
| "eval_samples_per_second": 37.417, | |
| "eval_steps_per_second": 9.357, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.6907705380883199, | |
| "grad_norm": 3.6875, | |
| "learning_rate": 1.2081552668325321e-05, | |
| "loss": 0.1707, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 0.7236643732353828, | |
| "grad_norm": 4.0625, | |
| "learning_rate": 9.82316499179518e-06, | |
| "loss": 0.171, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 0.7565582083824457, | |
| "grad_norm": 3.984375, | |
| "learning_rate": 7.74602657804425e-06, | |
| "loss": 0.1702, | |
| "step": 6900 | |
| }, | |
| { | |
| "epoch": 0.7894520435295085, | |
| "grad_norm": 3.421875, | |
| "learning_rate": 5.874943640356082e-06, | |
| "loss": 0.1718, | |
| "step": 7200 | |
| }, | |
| { | |
| "epoch": 0.8223458786765714, | |
| "grad_norm": 4.1875, | |
| "learning_rate": 4.232261575703861e-06, | |
| "loss": 0.1689, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.8223458786765714, | |
| "eval_loss": 0.16773280501365662, | |
| "eval_runtime": 79.6198, | |
| "eval_samples_per_second": 37.415, | |
| "eval_steps_per_second": 9.357, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.8552397138236342, | |
| "grad_norm": 2.703125, | |
| "learning_rate": 2.83759810497852e-06, | |
| "loss": 0.1692, | |
| "step": 7800 | |
| }, | |
| { | |
| "epoch": 0.8881335489706971, | |
| "grad_norm": 4.0625, | |
| "learning_rate": 1.70760898847247e-06, | |
| "loss": 0.1787, | |
| "step": 8100 | |
| }, | |
| { | |
| "epoch": 0.92102738411776, | |
| "grad_norm": 4.71875, | |
| "learning_rate": 8.557891145603042e-07, | |
| "loss": 0.1733, | |
| "step": 8400 | |
| }, | |
| { | |
| "epoch": 0.9539212192648228, | |
| "grad_norm": 3.6875, | |
| "learning_rate": 2.923113370737779e-07, | |
| "loss": 0.1741, | |
| "step": 8700 | |
| }, | |
| { | |
| "epoch": 0.9868150544118857, | |
| "grad_norm": 4.3125, | |
| "learning_rate": 2.3904986054812396e-08, | |
| "loss": 0.168, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.9868150544118857, | |
| "eval_loss": 0.1676628440618515, | |
| "eval_runtime": 79.7085, | |
| "eval_samples_per_second": 37.374, | |
| "eval_steps_per_second": 9.347, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.9999725884707108, | |
| "step": 9120, | |
| "total_flos": 3.6976201313039155e+17, | |
| "train_loss": 0.19690959160788019, | |
| "train_runtime": 20589.0374, | |
| "train_samples_per_second": 7.087, | |
| "train_steps_per_second": 0.443 | |
| } | |
| ], | |
| "logging_steps": 300, | |
| "max_steps": 9120, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 3000, | |
| "total_flos": 3.6976201313039155e+17, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |