Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -88,19 +88,24 @@ class RAGPipeline:
|
|
| 88 |
self.retriever = SentenceTransformerRetriever()
|
| 89 |
self.documents = []
|
| 90 |
self.device = torch.device("cpu")
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
|
| 95 |
except Exception as e:
|
| 96 |
logging.error(f"Error in RAGPipeline initialization: {str(e)}")
|
| 97 |
raise
|
| 98 |
|
| 99 |
-
@st.cache_resource
|
| 100 |
-
def
|
| 101 |
-
"""
|
| 102 |
try:
|
| 103 |
if not os.path.exists(_self.model_path):
|
|
|
|
| 104 |
st.info("Downloading model... This may take a while.")
|
| 105 |
direct_url = "https://huggingface.co/TheBloke/Mistral-7B-v0.1-GGUF/resolve/main/mistral-7b-v0.1.Q4_K_M.gguf"
|
| 106 |
_self.download_file_with_progress(direct_url, _self.model_path)
|
|
@@ -112,17 +117,19 @@ class RAGPipeline:
|
|
| 112 |
if os.path.getsize(_self.model_path) < 1000000: # Less than 1MB
|
| 113 |
os.remove(_self.model_path)
|
| 114 |
raise ValueError("Downloaded model file is too small, likely corrupted")
|
| 115 |
-
|
| 116 |
llm_config = {
|
|
|
|
| 117 |
"n_ctx": 2048,
|
| 118 |
"n_threads": 4,
|
| 119 |
"n_batch": 512,
|
| 120 |
"n_gpu_layers": 0,
|
| 121 |
"verbose": False
|
| 122 |
}
|
| 123 |
-
|
| 124 |
-
|
| 125 |
st.success("Model loaded successfully!")
|
|
|
|
| 126 |
|
| 127 |
except Exception as e:
|
| 128 |
st.error(f"Error initializing model: {str(e)}")
|
|
@@ -393,7 +400,7 @@ def initialize_rag_pipeline():
|
|
| 393 |
for directory in ['models', 'ESPN_data', 'embeddings_cache']:
|
| 394 |
os.makedirs(directory, exist_ok=True)
|
| 395 |
|
| 396 |
-
# Load embeddings from Drive
|
| 397 |
drive_file_id = "1MuV63AE9o6zR9aBvdSDQOUextp71r2NN"
|
| 398 |
with st.spinner("Loading embeddings from Google Drive..."):
|
| 399 |
cache_data = load_from_drive(drive_file_id)
|
|
@@ -401,26 +408,20 @@ def initialize_rag_pipeline():
|
|
| 401 |
st.error("Failed to load embeddings from Google Drive")
|
| 402 |
st.stop()
|
| 403 |
|
| 404 |
-
#
|
| 405 |
data_folder = "ESPN_data"
|
| 406 |
-
rag = RAGPipeline(data_folder)
|
| 407 |
|
| 408 |
# Store embeddings
|
| 409 |
rag.documents = cache_data['documents']
|
| 410 |
rag.retriever.store_embeddings(cache_data['embeddings'])
|
| 411 |
|
| 412 |
-
st.success("System initialized successfully!")
|
| 413 |
return rag
|
| 414 |
|
| 415 |
except Exception as e:
|
| 416 |
logging.error(f"Pipeline initialization error: {str(e)}")
|
| 417 |
st.error(f"Failed to initialize the system: {str(e)}")
|
| 418 |
raise
|
| 419 |
-
|
| 420 |
-
except Exception as e:
|
| 421 |
-
logging.error(f"Pipeline initialization error: {str(e)}")
|
| 422 |
-
st.error(f"Failed to initialize the system: {str(e)}")
|
| 423 |
-
raise
|
| 424 |
|
| 425 |
def main():
|
| 426 |
try:
|
|
|
|
| 88 |
self.retriever = SentenceTransformerRetriever()
|
| 89 |
self.documents = []
|
| 90 |
self.device = torch.device("cpu")
|
| 91 |
+
|
| 92 |
+
# Model path with absolute path
|
| 93 |
+
current_dir = os.path.dirname(os.path.abspath(__file__))
|
| 94 |
+
self.model_path = os.path.join(current_dir, "models", "mistral-7b-v0.1.Q4_K_M.gguf")
|
| 95 |
+
|
| 96 |
+
# Initialize model
|
| 97 |
+
self.llm = self.get_model()
|
| 98 |
|
| 99 |
except Exception as e:
|
| 100 |
logging.error(f"Error in RAGPipeline initialization: {str(e)}")
|
| 101 |
raise
|
| 102 |
|
| 103 |
+
@st.cache_resource(show_spinner=False)
|
| 104 |
+
def get_model(_self):
|
| 105 |
+
"""Get or initialize the model with caching"""
|
| 106 |
try:
|
| 107 |
if not os.path.exists(_self.model_path):
|
| 108 |
+
os.makedirs(os.path.dirname(_self.model_path), exist_ok=True)
|
| 109 |
st.info("Downloading model... This may take a while.")
|
| 110 |
direct_url = "https://huggingface.co/TheBloke/Mistral-7B-v0.1-GGUF/resolve/main/mistral-7b-v0.1.Q4_K_M.gguf"
|
| 111 |
_self.download_file_with_progress(direct_url, _self.model_path)
|
|
|
|
| 117 |
if os.path.getsize(_self.model_path) < 1000000: # Less than 1MB
|
| 118 |
os.remove(_self.model_path)
|
| 119 |
raise ValueError("Downloaded model file is too small, likely corrupted")
|
| 120 |
+
|
| 121 |
llm_config = {
|
| 122 |
+
"model_path": _self.model_path,
|
| 123 |
"n_ctx": 2048,
|
| 124 |
"n_threads": 4,
|
| 125 |
"n_batch": 512,
|
| 126 |
"n_gpu_layers": 0,
|
| 127 |
"verbose": False
|
| 128 |
}
|
| 129 |
+
|
| 130 |
+
model = Llama(**llm_config)
|
| 131 |
st.success("Model loaded successfully!")
|
| 132 |
+
return model
|
| 133 |
|
| 134 |
except Exception as e:
|
| 135 |
st.error(f"Error initializing model: {str(e)}")
|
|
|
|
| 400 |
for directory in ['models', 'ESPN_data', 'embeddings_cache']:
|
| 401 |
os.makedirs(directory, exist_ok=True)
|
| 402 |
|
| 403 |
+
# Load embeddings from Drive first
|
| 404 |
drive_file_id = "1MuV63AE9o6zR9aBvdSDQOUextp71r2NN"
|
| 405 |
with st.spinner("Loading embeddings from Google Drive..."):
|
| 406 |
cache_data = load_from_drive(drive_file_id)
|
|
|
|
| 408 |
st.error("Failed to load embeddings from Google Drive")
|
| 409 |
st.stop()
|
| 410 |
|
| 411 |
+
# Now initialize pipeline
|
| 412 |
data_folder = "ESPN_data"
|
| 413 |
+
rag = RAGPipeline(data_folder)
|
| 414 |
|
| 415 |
# Store embeddings
|
| 416 |
rag.documents = cache_data['documents']
|
| 417 |
rag.retriever.store_embeddings(cache_data['embeddings'])
|
| 418 |
|
|
|
|
| 419 |
return rag
|
| 420 |
|
| 421 |
except Exception as e:
|
| 422 |
logging.error(f"Pipeline initialization error: {str(e)}")
|
| 423 |
st.error(f"Failed to initialize the system: {str(e)}")
|
| 424 |
raise
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 425 |
|
| 426 |
def main():
|
| 427 |
try:
|