Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| import re | |
| import random | |
| import os | |
| from dotenv import load_dotenv | |
| # Load environment variables from .env file if it exists | |
| load_dotenv() | |
| # Check if we're running locally with custom model settings | |
| BASE_URL = os.getenv('BASE_URL') | |
| LOCAL_TOKEN = os.getenv('TOKEN') | |
| LOCAL_MODE = bool(BASE_URL and LOCAL_TOKEN) | |
| MODEL_NAME = os.getenv('MODEL_NAME', 'meta-llama/Llama-3.2-3B-Instruct') | |
| # List of countries for the game | |
| COUNTRIES = [ | |
| "Afghanistan", "Albania", "Algeria", "Angola", "Argentina", "Armenia", "Australia", "Austria", | |
| "Bangladesh", "Belgium", "Belize", "Bolivia", "Bosnia and Herzegovina", "Botswana", "Brazil", | |
| "Bulgaria", "Burma", "Burundi", "Cambodia", "Canada", "Central African Republic", "Chad", "Chile", | |
| "China", "Colombia", "Costa Rica", "Croatia", "Cuba", "Czech Republic", "Democratic Republic of the Congo", | |
| "Denmark", "Dominican Republic", "Ecuador", "Egypt", "El Salvador", "Estonia", "Ethiopia", "Fiji", | |
| "Finland", "France", "Georgia", "Germany", "Ghana", "Greece", "Grenada", "Guatemala", "Guinea", | |
| "Guyana", "Haiti", "Honduras", "Hungary", "Iceland", "India", "Indonesia", "Iran", "Iraq", "Ireland", | |
| "Israel", "Italy", "Jamaica", "Japan", "Jordan", "Kenya", "Kuwait", "Kyrgyzstan", "Laos", "Latvia", | |
| "Lebanon", "Liberia", "Libya", "Liechtenstein", "Lithuania", "Luxembourg", "Macedonia", "Madagascar", | |
| "Malaysia", "Mali", "Malta", "Mexico", "Moldova", "Monaco", "Mongolia", "Montenegro", "Morocco", | |
| "Mozambique", "Nepal", "Netherlands", "New Zealand", "Nicaragua", "Niger", "Nigeria", "North Korea", | |
| "Norway", "Oman", "Pakistan", "Palestine", "Panama", "Papua New Guinea", "Paraguay", "Peru", | |
| "Philippines", "Poland", "Portugal", "Qatar", "Republic of the Congo", "Romania", "Russia", "Rwanda", | |
| "Samoa", "Saudi Arabia", "Serbia", "Singapore", "Slovakia", "South Korea", "Slovenia", "Somalia", | |
| "South Africa", "Spain", "Sri Lanka", "Sudan", "Suriname", "Swaziland", "Sweden", "Switzerland", | |
| "Syria", "Tajikistan", "Tanzania", "Thailand", "Tonga", "Trinidad and Tobago", "Tunisia", "Turkey", | |
| "Turkmenistan", "Uganda", "Ukraine", "United Arab Emirates", "United Kingdom", "United States", | |
| "Uruguay", "Uzbekistan", "Vanuatu", "Vatican City", "Venezuela", "Vietnam", "Yemen", "Zambia", "Zimbabwe" | |
| ] | |
| def get_system_message_with_country(): | |
| """Generate a system message with a randomly selected country""" | |
| global selected_country | |
| selected_country = random.choice(COUNTRIES) | |
| COUNTRIES.remove(selected_country) # Ensure the same country isn't picked again | |
| print(selected_country) | |
| return f"""You are a friendly geography game host playing 20 questions with students. You are thinking of the country: {selected_country} | |
| RULES: | |
| 1. NEVER reveal the country name ({selected_country}) in your responses | |
| 2. Answer only 'Yes' or 'No' to their questions | |
| 3. Keep track of how many questions they've asked | |
| 4. When they correctly guess {selected_country}, respond with: 'Congratulations! The country was <<{selected_country}>>' | |
| 5. If they reach 20 questions without guessing correctly, respond with: 'Game over! The country was <<{selected_country}>>' | |
| 6. Be encouraging and give helpful hints through your yes/no answers | |
| 7. If they want to play again tell them they need to reload the page. | |
| 8. IMPORTANT: Only accept the country name "{selected_country}" as correct, but Spelling is not important and they can ask a question like it is? Do NOT accept neighboring countries, similar countries, or regions that contain this country. | |
| 9. If they guess a neighboring country or similar country, respond with "No" and continue the game. | |
| 10. Be very strict about the exact country match - only "{selected_country}" is the correct answer.""" | |
| current_system = get_system_message_with_country() | |
| def format_game_result(response): | |
| """Format the game result with proper styling""" | |
| if "The country was" in response: | |
| print(f"π DEBUG - Game end detected! Country extracted: {selected_country}") | |
| else: | |
| print("π DEBUG - Regular response (no game end)") | |
| if "Congratulations" in response: | |
| return f"π **Congratulations!** You correctly guessed **{selected_country}**! Well done! π\n\nWould you like to play another round?" | |
| elif "Game over" in response: | |
| return f"π **Game Over!** You've used all 20 questions. The country I was thinking of was **{selected_country}**. π\n\nBetter luck next time! Would you like to try again?" | |
| global current_system # Ensure we're using the global variable | |
| current_system = get_system_message_with_country() | |
| return response | |
| def respond( | |
| message, | |
| history: list[dict[str, str]], | |
| system_message, | |
| max_tokens, | |
| temperature, | |
| top_p, | |
| hf_token: gr.OAuthToken | None = None, | |
| ): | |
| """ | |
| For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference | |
| """ | |
| messages = [{"role": "system", "content": current_system}] | |
| messages.extend(history) | |
| messages.append({"role": "user", "content": message}) | |
| # Debug: Calculate approximate input token count | |
| total_input_chars = sum(len(str(msg.get("content", ""))) for msg in messages) | |
| estimated_input_tokens = total_input_chars // 4 # Rough approximation: 4 chars per token | |
| print(f"π DEBUG - Estimated input tokens: {estimated_input_tokens}") | |
| print(f"π DEBUG - Messages count: {len(messages)}") | |
| print(f"π DEBUG - Max tokens setting: {max_tokens}") | |
| # Debug: Show each message type and length | |
| for i, msg in enumerate(messages): | |
| role = msg.get("role", "unknown") | |
| content = str(msg.get("content", "")) | |
| print(f"π DEBUG - Message {i+1} ({role}): {len(content)} chars") | |
| if role == "system": | |
| print(f"π DEBUG - System message preview: {content[:100]}...") | |
| elif role == "user": | |
| print(f"π DEBUG - User message: {content}") | |
| elif role == "assistant": | |
| print(f"π DEBUG - Assistant message: {content[:50]}...") | |
| # Choose client based on whether we're running locally or in the cloud | |
| if LOCAL_MODE: | |
| # Running locally with custom model settings | |
| try: | |
| # Use local inference server | |
| client = InferenceClient(model=BASE_URL, token=LOCAL_TOKEN) | |
| except Exception as e: | |
| return f"Error connecting to local model: {str(e)}" | |
| else: | |
| # Running in cloud mode with HuggingFace | |
| if not hf_token or not hf_token.token: | |
| return "Please log in with your HuggingFace account to play the geography game!" | |
| client = InferenceClient(token=hf_token.token, model=MODEL_NAME) | |
| response = "" | |
| output_token_count = 0 | |
| try: | |
| for message_chunk in client.chat_completion( | |
| messages, | |
| model=MODEL_NAME, | |
| max_tokens=max_tokens, | |
| stream=True, | |
| temperature=temperature, | |
| top_p=top_p, | |
| ): | |
| choices = message_chunk.choices | |
| token = "" | |
| if len(choices) and choices[0].delta.content: | |
| token = choices[0].delta.content | |
| output_token_count += 1 | |
| response += token | |
| # Debug: Show output token statistics | |
| estimated_output_tokens = len(response) // 4 # Rough approximation | |
| print(f"π DEBUG - Output token chunks received: {output_token_count}") | |
| print(f"π DEBUG - Estimated output tokens (by chars): {estimated_output_tokens}") | |
| print(f"π DEBUG - Response length: {len(response)} characters") | |
| print(f"π DEBUG - Raw response: {response}") | |
| # Check if this is a game end response and format it nicely | |
| if "The country was" in response: | |
| print(f"π DEBUG - Game end detected! Country extracted: {selected_country}") | |
| return format_game_result(response) | |
| else: | |
| print("π DEBUG - Regular response (no game end)") | |
| return response | |
| except Exception as e: | |
| return f"Error during inference: {str(e)}" | |
| """ | |
| For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface | |
| """ | |
| # Create description based on mode | |
| if LOCAL_MODE: | |
| description = f"π Running locally with {MODEL_NAME}. I am thinking of a country, you have 20 yes or no questions to ask me to help you figure out what the country is" | |
| else: | |
| description = "I am thinking of a country, you have 20 yes or no questions to ask me to help you figure out what the country is" | |
| # Common examples and settings | |
| examples = [ | |
| ["Is the country located in Europe?"], | |
| ["Is it in the Northern Hemisphere?"], | |
| ["Is the official language Spanish?"], | |
| ["Is the capital city Rome?"], | |
| ["Is this country bordered by an ocean?"], | |
| ["Does this country have more than 100 million people?"], | |
| ["Is this country known for producing coffee?"], | |
| ["Was this country ever a colony of the United Kingdom?"], | |
| ["Is this country located on an island?"], | |
| ["Is the currency the Euro?"], | |
| ] | |
| # Create wrapper function for local mode that doesn't expect OAuth token | |
| def custom_respond(message, history): | |
| # Hardcoded values - no additional inputs needed | |
| system_message = "" | |
| max_tokens = 2048 | |
| temperature = 0.3 # Lower temperature for more consistent responses | |
| top_p = 0.7 # Lower top_p for more deterministic behavior | |
| return respond(message, history, system_message, max_tokens, temperature, top_p, None if LOCAL_MODE else hf_token) | |
| chatbot = gr.ChatInterface( | |
| custom_respond, | |
| type="messages", | |
| description=description, | |
| examples=examples, | |
| cache_examples=False, | |
| # No additional_inputs - values are hardcoded in wrapper functions | |
| ) | |
| with gr.Blocks() as demo: | |
| if not LOCAL_MODE: | |
| # Only show login button when running in cloud mode | |
| with gr.Sidebar(): | |
| gr.LoginButton() | |
| chatbot.render() | |
| if __name__ == "__main__": | |
| demo.launch() | |