Spaces:
Sleeping
Sleeping
fix warning error
Browse files- app.py +4 -5
- requirements.txt +1 -0
app.py
CHANGED
|
@@ -3,10 +3,9 @@ import streamlit as st
|
|
| 3 |
from PyPDF2 import PdfReader
|
| 4 |
from langchain.text_splitter import CharacterTextSplitter
|
| 5 |
from langchain.chains.question_answering import load_qa_chain
|
| 6 |
-
from
|
| 7 |
from langchain_community.vectorstores import FAISS
|
| 8 |
from langchain_community.llms import HuggingFacePipeline
|
| 9 |
-
|
| 10 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
| 11 |
|
| 12 |
|
|
@@ -32,13 +31,14 @@ def split_text(text):
|
|
| 32 |
|
| 33 |
# FAISS ๋ฒกํฐ ์ ์ฅ์ ์์ฑ
|
| 34 |
def create_knowledge_base(chunks):
|
| 35 |
-
|
|
|
|
| 36 |
return FAISS.from_texts(chunks, embeddings)
|
| 37 |
|
| 38 |
# Hugging Face ๋ชจ๋ธ ๋ก๋
|
| 39 |
def load_model():
|
| 40 |
model_name = "halyn/gemma2-2b-it-finetuned-paperqa"
|
| 41 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 42 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
| 43 |
return pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=150, temperature=0.1)
|
| 44 |
|
|
@@ -49,7 +49,6 @@ def setup_qa_chain():
|
|
| 49 |
llm = HuggingFacePipeline(pipeline=pipe)
|
| 50 |
qa_chain = load_qa_chain(llm, chain_type="stuff")
|
| 51 |
|
| 52 |
-
|
| 53 |
# ๋ฉ์ธ ํ์ด์ง UI
|
| 54 |
def main_page():
|
| 55 |
st.title("Welcome to GemmaPaperQA")
|
|
|
|
| 3 |
from PyPDF2 import PdfReader
|
| 4 |
from langchain.text_splitter import CharacterTextSplitter
|
| 5 |
from langchain.chains.question_answering import load_qa_chain
|
| 6 |
+
from langchain_huggingface import HuggingFaceEmbeddings
|
| 7 |
from langchain_community.vectorstores import FAISS
|
| 8 |
from langchain_community.llms import HuggingFacePipeline
|
|
|
|
| 9 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
| 10 |
|
| 11 |
|
|
|
|
| 31 |
|
| 32 |
# FAISS ๋ฒกํฐ ์ ์ฅ์ ์์ฑ
|
| 33 |
def create_knowledge_base(chunks):
|
| 34 |
+
model_name = "halyn/gemma2-2b-it-finetuned-paperqa"
|
| 35 |
+
embeddings = HuggingFaceEmbeddings(model_name=model_name)
|
| 36 |
return FAISS.from_texts(chunks, embeddings)
|
| 37 |
|
| 38 |
# Hugging Face ๋ชจ๋ธ ๋ก๋
|
| 39 |
def load_model():
|
| 40 |
model_name = "halyn/gemma2-2b-it-finetuned-paperqa"
|
| 41 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, clean_up_tokenization_spaces=False)
|
| 42 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
| 43 |
return pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=150, temperature=0.1)
|
| 44 |
|
|
|
|
| 49 |
llm = HuggingFacePipeline(pipeline=pipe)
|
| 50 |
qa_chain = load_qa_chain(llm, chain_type="stuff")
|
| 51 |
|
|
|
|
| 52 |
# ๋ฉ์ธ ํ์ด์ง UI
|
| 53 |
def main_page():
|
| 54 |
st.title("Welcome to GemmaPaperQA")
|
requirements.txt
CHANGED
|
@@ -11,3 +11,4 @@ requests==2.32.3
|
|
| 11 |
huggingface-hub==0.25.1
|
| 12 |
sentence-transformers==3.1.1
|
| 13 |
peft==0.2.0
|
|
|
|
|
|
| 11 |
huggingface-hub==0.25.1
|
| 12 |
sentence-transformers==3.1.1
|
| 13 |
peft==0.2.0
|
| 14 |
+
langchain-huggingface
|