Christoph Holthaus
commited on
Commit
·
10006aa
1
Parent(s):
f8a999d
dev: better debug
Browse files
app.py
CHANGED
|
@@ -30,6 +30,7 @@ print(f"debug: init model: {model_uri_hf}")
|
|
| 30 |
|
| 31 |
# Check if the model file already exists
|
| 32 |
if not os.path.isfile('model.bin'):
|
|
|
|
| 33 |
# Download the model
|
| 34 |
response = requests.get(model_uri_hf)
|
| 35 |
|
|
@@ -38,10 +39,10 @@ if not os.path.isfile('model.bin'):
|
|
| 38 |
file.write(response.content)
|
| 39 |
|
| 40 |
llm = Llama(model_path="./model.bin") # LLaMa model
|
| 41 |
-
print("
|
| 42 |
|
| 43 |
# Preparing things to work
|
| 44 |
-
title = "# Demo for 7B Models - Quantized"
|
| 45 |
descr = '''
|
| 46 |
Quantized to run in the free tier hosting.
|
| 47 |
Have a quick way to test models or share them with others without hassle.
|
|
|
|
| 30 |
|
| 31 |
# Check if the model file already exists
|
| 32 |
if not os.path.isfile('model.bin'):
|
| 33 |
+
print(f"debug: can't find model locally, downloading ...")
|
| 34 |
# Download the model
|
| 35 |
response = requests.get(model_uri_hf)
|
| 36 |
|
|
|
|
| 39 |
file.write(response.content)
|
| 40 |
|
| 41 |
llm = Llama(model_path="./model.bin") # LLaMa model
|
| 42 |
+
print("debug: model loaded and ready")
|
| 43 |
|
| 44 |
# Preparing things to work
|
| 45 |
+
title = f"# Demo for 7B Models - Quantized {model_uri_hf}"
|
| 46 |
descr = '''
|
| 47 |
Quantized to run in the free tier hosting.
|
| 48 |
Have a quick way to test models or share them with others without hassle.
|