|
|
import gradio as gr |
|
|
from transformers import pipeline |
|
|
|
|
|
try: |
|
|
generator = pipeline( |
|
|
'text-generation', |
|
|
model='Kolumbus713/OpaGPT', |
|
|
device=0 |
|
|
) |
|
|
except Exception as e: |
|
|
print(f"Fehler beim Laden des Modells: {e}") |
|
|
generator = None |
|
|
|
|
|
def chat_with_opa(message, history): |
|
|
if not generator: |
|
|
return "Entschuldigung, das Modell konnte nicht geladen werden." |
|
|
|
|
|
response = generator( |
|
|
message, |
|
|
max_new_tokens=100, |
|
|
do_sample=True |
|
|
) |
|
|
|
|
|
return response[0]['generated_text'].replace(message, '').strip() |
|
|
|
|
|
|
|
|
if generator: |
|
|
gr.ChatInterface( |
|
|
fn=chat_with_opa, |
|
|
title="OpaGPT - Dein feingetunter Chatbot", |
|
|
description="Frag OpaGPT nach weisen Ratschlägen oder Geschichten.", |
|
|
).launch() |