Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -38,25 +38,39 @@ TEXTO DE LA FACTURA:
|
|
| 38 |
|
| 39 |
Responde en un solo párrafo claro y conciso en español:"""
|
| 40 |
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
|
| 55 |
-
|
| 56 |
-
|
|
|
|
| 57 |
|
| 58 |
-
|
| 59 |
-
return f"❌ Error: {str(e)}"
|
| 60 |
|
| 61 |
# ============= FUNCIÓN PRINCIPAL =============
|
| 62 |
def procesar_factura(pdf_file):
|
|
|
|
| 38 |
|
| 39 |
Responde en un solo párrafo claro y conciso en español:"""
|
| 40 |
|
| 41 |
+
# Lista de modelos que SÍ funcionan (probados)
|
| 42 |
+
modelos = [
|
| 43 |
+
"Qwen/Qwen2.5-72B-Instruct",
|
| 44 |
+
"meta-llama/Llama-3.2-3B-Instruct",
|
| 45 |
+
"mistralai/Mistral-Nemo-Instruct-2407",
|
| 46 |
+
"HuggingFaceH4/zephyr-7b-beta"
|
| 47 |
+
]
|
| 48 |
+
|
| 49 |
+
for modelo in modelos:
|
| 50 |
+
try:
|
| 51 |
+
print(f"🤖 Probando: {modelo}")
|
| 52 |
+
client = InferenceClient(token=token)
|
| 53 |
+
|
| 54 |
+
# Llamar al modelo
|
| 55 |
+
response = client.chat.completions.create(
|
| 56 |
+
model=modelo,
|
| 57 |
+
messages=[
|
| 58 |
+
{"role": "user", "content": prompt}
|
| 59 |
+
],
|
| 60 |
+
max_tokens=500,
|
| 61 |
+
temperature=0.3
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
# Extraer respuesta
|
| 65 |
+
resultado = response.choices[0].message.content
|
| 66 |
+
print(f"✅ Funcionó con {modelo}")
|
| 67 |
+
return resultado
|
| 68 |
|
| 69 |
+
except Exception as e:
|
| 70 |
+
print(f"❌ {modelo} falló: {str(e)[:100]}")
|
| 71 |
+
continue
|
| 72 |
|
| 73 |
+
return "❌ Ningún modelo LLM funcionó. Verifica tu HF_TOKEN o intenta más tarde."
|
|
|
|
| 74 |
|
| 75 |
# ============= FUNCIÓN PRINCIPAL =============
|
| 76 |
def procesar_factura(pdf_file):
|