File size: 1,331 Bytes
bfe739b
30073ff
02a92ef
bfe739b
30073ff
 
02a92ef
 
 
bfe739b
02a92ef
bfe739b
 
02a92ef
 
 
 
 
 
 
 
 
 
bfe739b
 
 
 
 
02a92ef
 
 
bfe739b
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
import gradio as gr

model_name = "TheBloke/Llama-2-7B-Chat-GGML"

use_triton = False

tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)

model = AutoModelForCausalLM.from_pretrained(model_name)
model.to('cuda')

# Creating pipeline
pipe = pipeline(
    "text-generation",
    model=model,
    tokenizer=tokenizer,
    max_new_tokens=512,
    temperature=0.6,
    top_p=0.95,
    repetition_penalty=1.15
)

# Define your system message
system_message = """
You are an AI Query bot that will answer all of the user questions to the best of your ability. You are helpful, respectful, and honest. Your job is to answer the user's query to the best of your ability. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content.
You will be asked questions randomly on any topic. Just answer them to the best of your ability without a single lie and harmful information.
"""


def run(input, history=memory):
    return chat.predict(prompt=input)

output = gr.ChatInterface(
    run,
    inputs="text",
    outputs="text",
    title="AI Query Bot",
    show_system_message=True,  # Display the system message
    system_message=system_message,  # Set the system message
)
output.launch(share=True)