Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,7 +3,7 @@ import requests
|
|
| 3 |
import json
|
| 4 |
import os
|
| 5 |
|
| 6 |
-
# Funci贸n para generar respuestas usando
|
| 7 |
def generate_response(user_message):
|
| 8 |
try:
|
| 9 |
if not user_message.strip():
|
|
@@ -13,7 +13,7 @@ def generate_response(user_message):
|
|
| 13 |
with open("prompt.txt", "r", encoding="utf-8") as f:
|
| 14 |
system_prompt = f.read().strip()
|
| 15 |
|
| 16 |
-
# Configurar la solicitud a DeepInfra
|
| 17 |
api_key = os.environ.get("DEEPINFRA_API_KEY", "")
|
| 18 |
if not api_key:
|
| 19 |
return "Error: No se ha configurado la clave API. Por favor, configura la variable de entorno DEEPINFRA_API_KEY."
|
|
@@ -23,14 +23,15 @@ def generate_response(user_message):
|
|
| 23 |
"Content-Type": "application/json"
|
| 24 |
}
|
| 25 |
|
| 26 |
-
# Formato de prompt para
|
| 27 |
messages = [
|
| 28 |
{"role": "system", "content": system_prompt},
|
| 29 |
{"role": "user", "content": user_message}
|
| 30 |
]
|
| 31 |
|
|
|
|
| 32 |
data = {
|
| 33 |
-
"model": "
|
| 34 |
"messages": messages,
|
| 35 |
"max_tokens": 500,
|
| 36 |
"temperature": 0.7,
|
|
@@ -49,7 +50,19 @@ def generate_response(user_message):
|
|
| 49 |
result = response.json()
|
| 50 |
return result["choices"][0]["message"]["content"]
|
| 51 |
else:
|
| 52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
|
| 54 |
except Exception as e:
|
| 55 |
return f"Lo siento, ha ocurrido un error: {str(e)}"
|
|
@@ -75,4 +88,4 @@ demo = gr.Interface(
|
|
| 75 |
|
| 76 |
# Lanzar la aplicaci贸n
|
| 77 |
if __name__ == "__main__":
|
| 78 |
-
demo.queue(max_size=1).launch(share=False, debug=False)
|
|
|
|
| 3 |
import json
|
| 4 |
import os
|
| 5 |
|
| 6 |
+
# Funci贸n para generar respuestas usando DeepInfra
|
| 7 |
def generate_response(user_message):
|
| 8 |
try:
|
| 9 |
if not user_message.strip():
|
|
|
|
| 13 |
with open("prompt.txt", "r", encoding="utf-8") as f:
|
| 14 |
system_prompt = f.read().strip()
|
| 15 |
|
| 16 |
+
# Configurar la solicitud a DeepInfra
|
| 17 |
api_key = os.environ.get("DEEPINFRA_API_KEY", "")
|
| 18 |
if not api_key:
|
| 19 |
return "Error: No se ha configurado la clave API. Por favor, configura la variable de entorno DEEPINFRA_API_KEY."
|
|
|
|
| 23 |
"Content-Type": "application/json"
|
| 24 |
}
|
| 25 |
|
| 26 |
+
# Formato de prompt para modelos de chat
|
| 27 |
messages = [
|
| 28 |
{"role": "system", "content": system_prompt},
|
| 29 |
{"role": "user", "content": user_message}
|
| 30 |
]
|
| 31 |
|
| 32 |
+
# Usar un modelo que s铆 est谩 disponible en DeepInfra
|
| 33 |
data = {
|
| 34 |
+
"model": "meta-llama/Llama-2-7b-chat-hf", # Modelo disponible en DeepInfra
|
| 35 |
"messages": messages,
|
| 36 |
"max_tokens": 500,
|
| 37 |
"temperature": 0.7,
|
|
|
|
| 50 |
result = response.json()
|
| 51 |
return result["choices"][0]["message"]["content"]
|
| 52 |
else:
|
| 53 |
+
# Intentar con otro modelo si el primero falla
|
| 54 |
+
data["model"] = "mistralai/Mistral-7B-Instruct-v0.2"
|
| 55 |
+
response = requests.post(
|
| 56 |
+
"https://api.deepinfra.com/v1/openai/chat/completions",
|
| 57 |
+
headers=headers,
|
| 58 |
+
json=data
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
if response.status_code == 200:
|
| 62 |
+
result = response.json()
|
| 63 |
+
return result["choices"][0]["message"]["content"]
|
| 64 |
+
else:
|
| 65 |
+
return f"Error en la API: {response.status_code} - {response.text}"
|
| 66 |
|
| 67 |
except Exception as e:
|
| 68 |
return f"Lo siento, ha ocurrido un error: {str(e)}"
|
|
|
|
| 88 |
|
| 89 |
# Lanzar la aplicaci贸n
|
| 90 |
if __name__ == "__main__":
|
| 91 |
+
demo.queue(max_size=1).launch(share=False, debug=False)
|