File size: 419 Bytes
4a3df21
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
import gradio as gr
from transformers import AutoTokenizer, AutoModelForTokenClassification
from pipeline import DiscoursePipeline  # ton code custom

model_id = "poyum/test_discut"
tokenizer = AutoTokenizer.from_pretrained(model_id)
pipe = DiscoursePipeline(model_id=model_id, tokenizer=tokenizer)

def predict(text):
    return pipe(text)

demo = gr.Interface(fn=predict, inputs="text", outputs="text")
demo.launch()