bipulsardar421 commited on
Commit
261ddb8
·
1 Parent(s): e4fdbf4
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -8,17 +8,15 @@ MODEL = "google/gemma-3-270m"
8
  tokenizer = AutoTokenizer.from_pretrained(MODEL, token=HF_TOKEN)
9
  model = AutoModelForCausalLM.from_pretrained(MODEL, token=HF_TOKEN)
10
 
11
- def chat(message, history):
12
  inputs = tokenizer(message, return_tensors="pt")
13
  outputs = model.generate(**inputs, max_new_tokens=128)
14
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
15
-
16
- history = history + [(message, response)]
17
- return response, history
18
 
19
  gr.Interface(
20
  fn=chat,
21
- inputs=["text", "state"],
22
- outputs=["text", "state"], # <-- required
23
  title="Gemma 3 (270M)"
24
  ).launch()
 
8
  tokenizer = AutoTokenizer.from_pretrained(MODEL, token=HF_TOKEN)
9
  model = AutoModelForCausalLM.from_pretrained(MODEL, token=HF_TOKEN)
10
 
11
+ def chat(message):
12
  inputs = tokenizer(message, return_tensors="pt")
13
  outputs = model.generate(**inputs, max_new_tokens=128)
14
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
15
+ return response
 
 
16
 
17
  gr.Interface(
18
  fn=chat,
19
+ inputs="text",
20
+ outputs="text",
21
  title="Gemma 3 (270M)"
22
  ).launch()