Spaces:
Runtime error
Runtime error
idan shenfeld
commited on
Commit
·
baee2b2
1
Parent(s):
ef1952f
prompt a question when change language
Browse files- app/app.py +15 -4
app/app.py
CHANGED
|
@@ -158,16 +158,27 @@ def add_user_message(history, message):
|
|
| 158 |
|
| 159 |
def format_system_message(language: str, history: list):
|
| 160 |
system_message = [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 161 |
{
|
| 162 |
"role": "system",
|
| 163 |
"content": LANGUAGES.get(language, LANGUAGES["English"]),
|
| 164 |
},
|
| 165 |
{
|
| 166 |
"role": "assistant",
|
| 167 |
-
"content":
|
| 168 |
}
|
| 169 |
]
|
| 170 |
-
return
|
| 171 |
|
| 172 |
|
| 173 |
def format_history_as_messages(history: list):
|
|
@@ -270,7 +281,7 @@ def add_fake_like_data(
|
|
| 270 |
|
| 271 |
|
| 272 |
@spaces.GPU
|
| 273 |
-
def call_pipeline(messages: list
|
| 274 |
"""Call the appropriate model pipeline based on configuration"""
|
| 275 |
if ZERO_GPU:
|
| 276 |
tokenizer = CLIENT["tokenizer"]
|
|
@@ -339,7 +350,7 @@ def respond(
|
|
| 339 |
messages = format_history_as_messages(history)
|
| 340 |
|
| 341 |
if ZERO_GPU:
|
| 342 |
-
content = call_pipeline(messages
|
| 343 |
else:
|
| 344 |
response = CLIENT.chat.completions.create(
|
| 345 |
messages=messages,
|
|
|
|
| 158 |
|
| 159 |
def format_system_message(language: str, history: list):
|
| 160 |
system_message = [
|
| 161 |
+
{
|
| 162 |
+
"role": "system",
|
| 163 |
+
"content": LANGUAGES.get(language, LANGUAGES["English"]),
|
| 164 |
+
},
|
| 165 |
+
{
|
| 166 |
+
"role": "user",
|
| 167 |
+
"content": f"Start by asking me a question in {language}."
|
| 168 |
+
}
|
| 169 |
+
]
|
| 170 |
+
response = call_pipeline(system_message)
|
| 171 |
+
new_system_message = [
|
| 172 |
{
|
| 173 |
"role": "system",
|
| 174 |
"content": LANGUAGES.get(language, LANGUAGES["English"]),
|
| 175 |
},
|
| 176 |
{
|
| 177 |
"role": "assistant",
|
| 178 |
+
"content": response
|
| 179 |
}
|
| 180 |
]
|
| 181 |
+
return new_system_message
|
| 182 |
|
| 183 |
|
| 184 |
def format_history_as_messages(history: list):
|
|
|
|
| 281 |
|
| 282 |
|
| 283 |
@spaces.GPU
|
| 284 |
+
def call_pipeline(messages: list):
|
| 285 |
"""Call the appropriate model pipeline based on configuration"""
|
| 286 |
if ZERO_GPU:
|
| 287 |
tokenizer = CLIENT["tokenizer"]
|
|
|
|
| 350 |
messages = format_history_as_messages(history)
|
| 351 |
|
| 352 |
if ZERO_GPU:
|
| 353 |
+
content = call_pipeline(messages)
|
| 354 |
else:
|
| 355 |
response = CLIENT.chat.completions.create(
|
| 356 |
messages=messages,
|