Upload InternVL2 implementation
Browse files- app_internvl2.py +36 -23
app_internvl2.py
CHANGED
|
@@ -197,31 +197,44 @@ def analyze_image(image, prompt):
|
|
| 197 |
# If somehow it's already a PIL Image
|
| 198 |
image_pil = image.convert('RGB')
|
| 199 |
|
| 200 |
-
#
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
response = response.result()
|
| 215 |
-
else:
|
| 216 |
-
# Standard synchronous execution
|
| 217 |
-
print("Using standard execution for model inference")
|
| 218 |
-
response = internvl2_pipeline((prompt, image_pil))
|
| 219 |
|
| 220 |
-
#
|
| 221 |
-
|
|
|
|
|
|
|
|
|
|
| 222 |
|
| 223 |
-
|
| 224 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 225 |
|
| 226 |
except Exception as e:
|
| 227 |
print(f"Error in image analysis: {str(e)}")
|
|
|
|
| 197 |
# If somehow it's already a PIL Image
|
| 198 |
image_pil = image.convert('RGB')
|
| 199 |
|
| 200 |
+
# Completely bypass asyncio by using a dedicated thread for model inference
|
| 201 |
+
import threading
|
| 202 |
+
import queue
|
| 203 |
+
|
| 204 |
+
result_queue = queue.Queue()
|
| 205 |
+
|
| 206 |
+
def run_inference_in_thread():
|
| 207 |
+
try:
|
| 208 |
+
# Run the model in a dedicated thread
|
| 209 |
+
response = internvl2_pipeline((prompt, image_pil))
|
| 210 |
+
result_text = response.text if hasattr(response, "text") else str(response)
|
| 211 |
+
result_queue.put(("success", result_text))
|
| 212 |
+
except Exception as e:
|
| 213 |
+
result_queue.put(("error", str(e)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 214 |
|
| 215 |
+
# Start a dedicated thread for inference
|
| 216 |
+
print("Running model inference in a dedicated thread")
|
| 217 |
+
inference_thread = threading.Thread(target=run_inference_in_thread)
|
| 218 |
+
inference_thread.daemon = True # Allow the thread to be terminated when the main program exits
|
| 219 |
+
inference_thread.start()
|
| 220 |
|
| 221 |
+
# Wait for the thread to complete (with timeout)
|
| 222 |
+
inference_thread.join(timeout=120) # 2 minute timeout
|
| 223 |
+
|
| 224 |
+
if inference_thread.is_alive():
|
| 225 |
+
# If the thread is still running after timeout
|
| 226 |
+
return "Model inference timed out after 120 seconds. The model might be too slow on this hardware."
|
| 227 |
+
|
| 228 |
+
# Get the result from the queue
|
| 229 |
+
if not result_queue.empty():
|
| 230 |
+
status, result = result_queue.get()
|
| 231 |
+
if status == "error":
|
| 232 |
+
return f"Error in model inference: {result}"
|
| 233 |
+
else:
|
| 234 |
+
elapsed_time = time.time() - start_time
|
| 235 |
+
return result
|
| 236 |
+
else:
|
| 237 |
+
return "Unknown error: Model inference did not produce a result"
|
| 238 |
|
| 239 |
except Exception as e:
|
| 240 |
print(f"Error in image analysis: {str(e)}")
|