Update app.py
Browse files
app.py
CHANGED
|
@@ -1,34 +1,39 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
| 3 |
|
| 4 |
-
MODEL_ID = "microsoft/DialoGPT-medium" # small
|
| 5 |
|
| 6 |
# Load model & tokenizer
|
| 7 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
|
| 8 |
model = AutoModelForCausalLM.from_pretrained(MODEL_ID)
|
| 9 |
-
|
| 10 |
generator = pipeline("text-generation", model=model, tokenizer=tokenizer, return_full_text=False)
|
| 11 |
|
| 12 |
def chat(history, message):
|
|
|
|
| 13 |
prompt = ""
|
| 14 |
-
for
|
| 15 |
-
|
| 16 |
-
prompt += f"User: {history[i]}\nBot: {history[i+1]}\n"
|
| 17 |
-
except:
|
| 18 |
-
pass
|
| 19 |
prompt += f"User: {message}\nBot:"
|
|
|
|
| 20 |
out = generator(prompt, max_length=200, do_sample=True, top_k=50, top_p=0.95, num_return_sequences=1)
|
| 21 |
reply = out[0]['generated_text']
|
|
|
|
| 22 |
if "Bot:" in reply:
|
| 23 |
reply = reply.split("Bot:")[-1].strip()
|
| 24 |
-
|
|
|
|
|
|
|
| 25 |
return history, history
|
| 26 |
|
| 27 |
with gr.Blocks() as demo:
|
| 28 |
-
gr.Markdown("## Chatapt")
|
|
|
|
| 29 |
chatbot = gr.Chatbot()
|
| 30 |
state = gr.State([])
|
| 31 |
-
|
|
|
|
|
|
|
|
|
|
| 32 |
msg.submit(chat, [state, msg], [state, chatbot])
|
| 33 |
|
| 34 |
demo.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
| 3 |
|
| 4 |
+
MODEL_ID = "microsoft/DialoGPT-medium" # small chatbot model
|
| 5 |
|
| 6 |
# Load model & tokenizer
|
| 7 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
|
| 8 |
model = AutoModelForCausalLM.from_pretrained(MODEL_ID)
|
|
|
|
| 9 |
generator = pipeline("text-generation", model=model, tokenizer=tokenizer, return_full_text=False)
|
| 10 |
|
| 11 |
def chat(history, message):
|
| 12 |
+
# Convert list of tuples -> plain text context
|
| 13 |
prompt = ""
|
| 14 |
+
for user, bot in history:
|
| 15 |
+
prompt += f"User: {user}\nBot: {bot}\n"
|
|
|
|
|
|
|
|
|
|
| 16 |
prompt += f"User: {message}\nBot:"
|
| 17 |
+
|
| 18 |
out = generator(prompt, max_length=200, do_sample=True, top_k=50, top_p=0.95, num_return_sequences=1)
|
| 19 |
reply = out[0]['generated_text']
|
| 20 |
+
|
| 21 |
if "Bot:" in reply:
|
| 22 |
reply = reply.split("Bot:")[-1].strip()
|
| 23 |
+
|
| 24 |
+
# Append new message pair as tuple (user, bot)
|
| 25 |
+
history.append((message, reply))
|
| 26 |
return history, history
|
| 27 |
|
| 28 |
with gr.Blocks() as demo:
|
| 29 |
+
gr.Markdown("## 🤖 Chatapt")
|
| 30 |
+
|
| 31 |
chatbot = gr.Chatbot()
|
| 32 |
state = gr.State([])
|
| 33 |
+
|
| 34 |
+
with gr.Row():
|
| 35 |
+
msg = gr.Textbox(show_label=False, placeholder="Type a message and press Enter...")
|
| 36 |
+
|
| 37 |
msg.submit(chat, [state, msg], [state, chatbot])
|
| 38 |
|
| 39 |
demo.launch()
|