lucid-hf commited on
Commit
ebf66a5
·
verified ·
1 Parent(s): 13a86c1

CI: deploy Docker/PDM Space

Browse files
deploy-development.yaml CHANGED
@@ -70,7 +70,6 @@ steps:
70
  "training_model/*"
71
  ]
72
  )
73
- PY
74
  displayName: "Deploy to Hugging Face Space (Docker/PDM)"
75
  env:
76
  HF_TOKEN: $(HF_TOKEN_DEV) # Add this as a secret variable in Pipeline settings
 
70
  "training_model/*"
71
  ]
72
  )
 
73
  displayName: "Deploy to Hugging Face Space (Docker/PDM)"
74
  env:
75
  HF_TOKEN: $(HF_TOKEN_DEV) # Add this as a secret variable in Pipeline settings
deploy-main.yaml CHANGED
@@ -37,8 +37,8 @@ steps:
37
  import os
38
  from huggingface_hub import HfApi, upload_folder
39
 
40
- token = os.environ["HF_TOKEN_PROD"] # provided via Pipeline variable (Secret)
41
- space_id = os.environ["HF_SPACE_ID_PROD"] # from variables above
42
 
43
  api = HfApi(token=token)
44
 
@@ -61,7 +61,6 @@ steps:
61
  ignore_patterns=[
62
  ".git/*",
63
  "__pycache__/*",
64
- "*.mp4", "*.avi", "*.mov",
65
  "*.zip", "*.tar", "*.tar.gz",
66
  "*.ipynb", "*.ipynb_checkpoints/*",
67
  "venv/*", ".venv/*",
@@ -71,7 +70,6 @@ steps:
71
  "training_model/*"
72
  ]
73
  )
74
- PY
75
  displayName: "Deploy to Hugging Face Space (Docker/PDM)"
76
  env:
77
  HF_TOKEN: $(HF_TOKEN_PROD) # Add this as a secret variable in Pipeline settings
 
37
  import os
38
  from huggingface_hub import HfApi, upload_folder
39
 
40
+ token = os.environ["HF_TOKEN"] # provided via Pipeline variable (Secret)
41
+ space_id = os.environ["HF_SPACE_ID"] # from variables above
42
 
43
  api = HfApi(token=token)
44
 
 
61
  ignore_patterns=[
62
  ".git/*",
63
  "__pycache__/*",
 
64
  "*.zip", "*.tar", "*.tar.gz",
65
  "*.ipynb", "*.ipynb_checkpoints/*",
66
  "venv/*", ".venv/*",
 
70
  "training_model/*"
71
  ]
72
  )
 
73
  displayName: "Deploy to Hugging Face Space (Docker/PDM)"
74
  env:
75
  HF_TOKEN: $(HF_TOKEN_PROD) # Add this as a secret variable in Pipeline settings
services/app_service/deim_model.py CHANGED
@@ -64,7 +64,28 @@ class DeimHgnetV2MDrone(BaseModel):
64
  + (", ".join(sorted(set(available))) or "<none>")
65
  )
66
 
67
- self.device = "cuda" if device == "gpu" and torch.cuda.is_available() else "cpu"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  cfg_path = weights_path.with_suffix(".json")
69
  if not cfg_path.exists():
70
  raise FileNotFoundError(
@@ -88,7 +109,12 @@ class DeimHgnetV2MDrone(BaseModel):
88
  )
89
 
90
  self.cfg = json.load(open(cfg_path, "r"))
 
 
91
  self.model = torch.jit.load(weights_path, map_location=self.device).eval()
 
 
 
92
 
93
  def _preprocess_image(self, image: Image):
94
  transforms = T.Compose(
@@ -173,8 +199,21 @@ class DeimHgnetV2MDrone(BaseModel):
173
 
174
  def predict_image(self, image: Image, min_confidence: float) -> Image:
175
  tensor = self._preprocess_image(image.copy())
176
- with torch.no_grad():
177
- labels, bboxes = self.model(tensor)
 
 
 
 
 
 
 
 
 
 
 
 
 
178
  dets = self._postprocess_detections(labels, bboxes, min_confidence, image.size)
179
  dets = self._nms(dets)
180
  image_np: np.ndarray = np.array(image)
 
64
  + (", ".join(sorted(set(available))) or "<none>")
65
  )
66
 
67
+ # Check if CUDA is available and if we have CUDA model weights
68
+ cuda_available = torch.cuda.is_available()
69
+ cuda_model_exists = any(
70
+ "cuda" in str(p) for p in candidate_paths if p and p.exists()
71
+ )
72
+
73
+ if device == "gpu" and cuda_available and cuda_model_exists:
74
+ self.device = "cuda"
75
+ print(f"Using GPU device: {self.device}")
76
+ else:
77
+ self.device = "cpu"
78
+ if device == "gpu":
79
+ if not cuda_available:
80
+ print("Warning: GPU requested but CUDA not available, using CPU")
81
+ elif not cuda_model_exists:
82
+ print(
83
+ "Warning: GPU requested but CUDA model not found, using CPU model"
84
+ )
85
+ else:
86
+ print("Warning: GPU requested but falling back to CPU")
87
+ else:
88
+ print(f"Using CPU device: {self.device}")
89
  cfg_path = weights_path.with_suffix(".json")
90
  if not cfg_path.exists():
91
  raise FileNotFoundError(
 
109
  )
110
 
111
  self.cfg = json.load(open(cfg_path, "r"))
112
+ print(f"Loading model from: {weights_path}")
113
+ print(f"Model device: {self.device}")
114
  self.model = torch.jit.load(weights_path, map_location=self.device).eval()
115
+ # Ensure model is on the correct device
116
+ self.model = self.model.to(self.device)
117
+ print(f"Model loaded successfully on device: {self.device}")
118
 
119
  def _preprocess_image(self, image: Image):
120
  transforms = T.Compose(
 
199
 
200
  def predict_image(self, image: Image, min_confidence: float) -> Image:
201
  tensor = self._preprocess_image(image.copy())
202
+ try:
203
+ with torch.no_grad():
204
+ labels, bboxes = self.model(tensor)
205
+ except RuntimeError as e:
206
+ if "device" in str(e).lower():
207
+ print(f"Device error: {e}")
208
+ print(
209
+ f"Tensor device: {tensor.device}, Model device: {next(self.model.parameters()).device}"
210
+ )
211
+ # Try to move tensor to model's device
212
+ tensor = tensor.to(next(self.model.parameters()).device)
213
+ with torch.no_grad():
214
+ labels, bboxes = self.model(tensor)
215
+ else:
216
+ raise
217
  dets = self._postprocess_detections(labels, bboxes, min_confidence, image.size)
218
  dets = self._nms(dets)
219
  image_np: np.ndarray = np.array(image)