VeuReu commited on
Commit
82a79d6
·
verified ·
1 Parent(s): 2c33bb9

Upload 8 files

Browse files
Files changed (3) hide show
  1. app.py +31 -45
  2. examples/demo.jpg +0 -0
  3. requirements.txt +5 -0
app.py CHANGED
@@ -1,85 +1,71 @@
1
  # app.py
2
  import os
3
- from typing import Tuple, List
4
-
5
  import gradio as gr
6
- import spaces # <- habilita ZeroGPU decorators
7
  import torch
8
  from PIL import Image
9
  from transformers import AutoProcessor, LlavaOnevisionForConditionalGeneration
10
 
11
  MODEL_ID = os.environ.get("MODEL_ID", "BSC-LT/salamandra-7b-vision")
12
- DTYPE = torch.float16 # half precision para H200/A100
13
- DEVICE = "cuda" # ZeroGPU asigna gpu por llamada en @spaces.GPU
14
 
15
- # Carga perezosa: sólo la primera vez que se invoca en GPU
16
  _model = None
17
  _processor = None
18
 
19
  def _lazy_load():
20
  global _model, _processor
21
  if _model is None or _processor is None:
22
- _processor = AutoProcessor.from_pretrained(MODEL_ID)
23
  _model = LlavaOnevisionForConditionalGeneration.from_pretrained(
24
  MODEL_ID,
25
  torch_dtype=DTYPE,
26
  low_cpu_mem_usage=True,
27
  trust_remote_code=True,
28
- device_map=None, # movemos explícitamente a cuda con @spaces.GPU
29
  use_safetensors=True,
 
30
  )
31
  return _model, _processor
32
 
33
- @spaces.GPU # <- asegura que la función se ejecute con GPU asignada
34
- def describe(image: Image.Image, prompt_text: str, max_new_tokens: int, temperature: float) -> str:
35
- """
36
- Devuelve una descripción a partir de imagen + prompt en texto.
37
- """
38
- model, processor = _lazy_load()
39
-
40
- # Formateo estilo chat template recomendado por el model card
41
- conversation = [
42
- {
43
- "role": "user",
44
- "content": [
45
- {"type": "image"},
46
- {"type": "text", "text": prompt_text or "Descriu la imatge amb el màxim detall possible."},
47
- ],
48
- }
49
- ]
50
- prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
51
 
52
- # A GPU justo antes de inferir (ZeroGPU)
 
 
 
53
  model = model.to(DEVICE)
54
  inputs = processor(images=image, text=prompt, return_tensors="pt").to(DEVICE, DTYPE)
55
-
56
  with torch.inference_mode():
57
- output = model.generate(
58
- **inputs,
59
- max_new_tokens=int(max_new_tokens),
60
- temperature=float(temperature),
61
- )
62
-
63
- text = processor.decode(output[0], skip_special_tokens=True)
64
- return text.strip()
65
-
66
- with gr.Blocks(title="Salamandra Vision 7B (ZeroGPU)") as demo:
67
- gr.Markdown("# Salamandra-Vision 7B · ZeroGPU\nEnvía una imagen y un texto/prompta, recibe una descripción.")
68
 
 
 
 
69
  with gr.Row():
70
  with gr.Column():
71
  in_img = gr.Image(label="Imagen", type="pil")
72
- in_txt = gr.Textbox(
73
- label="Texto/prompta",
74
- value="Describe la imagen con el mayor detalle posible (en catalán o español)."
75
- )
76
  max_new = gr.Slider(16, 1024, value=256, step=16, label="max_new_tokens")
77
  temp = gr.Slider(0.0, 1.5, value=0.7, step=0.05, label="temperature")
78
  btn = gr.Button("Generar", variant="primary")
79
  with gr.Column():
80
  out = gr.Textbox(label="Descripción", lines=18)
 
81
 
82
- btn.click(describe, inputs=[in_img, in_txt, max_new, temp], outputs=out, api_name="describe")
 
 
 
 
 
 
 
 
 
83
 
84
- # Cola de Gradio: útil para ZeroGPU y picos de demanda
85
  demo.queue(concurrency_count=1, max_size=16).launch()
 
1
  # app.py
2
  import os
3
+ from typing import Dict
 
4
  import gradio as gr
5
+ import spaces
6
  import torch
7
  from PIL import Image
8
  from transformers import AutoProcessor, LlavaOnevisionForConditionalGeneration
9
 
10
  MODEL_ID = os.environ.get("MODEL_ID", "BSC-LT/salamandra-7b-vision")
11
+ DTYPE = torch.float16
12
+ DEVICE = "cuda"
13
 
 
14
  _model = None
15
  _processor = None
16
 
17
  def _lazy_load():
18
  global _model, _processor
19
  if _model is None or _processor is None:
20
+ _processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
21
  _model = LlavaOnevisionForConditionalGeneration.from_pretrained(
22
  MODEL_ID,
23
  torch_dtype=DTYPE,
24
  low_cpu_mem_usage=True,
25
  trust_remote_code=True,
 
26
  use_safetensors=True,
27
+ device_map=None,
28
  )
29
  return _model, _processor
30
 
31
+ def _compose_prompt(user_text: str):
32
+ convo = [{"role": "user", "content": [{"type": "image"},
33
+ {"type": "text", "text": user_text or "Describe la imagen con detalle."}]}]
34
+ return convo
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
+ @spaces.GPU
37
+ def infer_core(image: Image.Image, text: str, max_new_tokens: int = 256, temperature: float = 0.7) -> str:
38
+ model, processor = _lazy_load()
39
+ prompt = processor.apply_chat_template(_compose_prompt(text), add_generation_prompt=True)
40
  model = model.to(DEVICE)
41
  inputs = processor(images=image, text=prompt, return_tensors="pt").to(DEVICE, DTYPE)
 
42
  with torch.inference_mode():
43
+ out = model.generate(**inputs, max_new_tokens=int(max_new_tokens), temperature=float(temperature))
44
+ return processor.decode(out[0], skip_special_tokens=True).strip()
 
 
 
 
 
 
 
 
 
45
 
46
+ # ---------- UI ----------
47
+ with gr.Blocks(title="Salamandra Vision 7B · ZeroGPU") as demo:
48
+ gr.Markdown("## Salamandra-Vision 7B · ZeroGPU\nImagen + texto → descripción.")
49
  with gr.Row():
50
  with gr.Column():
51
  in_img = gr.Image(label="Imagen", type="pil")
52
+ in_txt = gr.Textbox(label="Texto/prompt", value="Describe la imagen con detalle (ES/CA).")
 
 
 
53
  max_new = gr.Slider(16, 1024, value=256, step=16, label="max_new_tokens")
54
  temp = gr.Slider(0.0, 1.5, value=0.7, step=0.05, label="temperature")
55
  btn = gr.Button("Generar", variant="primary")
56
  with gr.Column():
57
  out = gr.Textbox(label="Descripción", lines=18)
58
+ btn.click(infer_core, [in_img, in_txt, max_new, temp], out, api_name="describe")
59
 
60
+ # ---------- API pura (sin UI) ----------
61
+ # Exponemos un endpoint REST nítido (multipart/form-data o JSON base64) sin depender de componentes UI.
62
+ # /api/describe_raw -> recibe {image,file} y campos simples.
63
+ @gr.api()
64
+ @spaces.GPU
65
+ def describe_raw(image: gr.File, text: str = "Describe la imagen con detalle.",
66
+ max_new_tokens: int = 256, temperature: float = 0.7) -> Dict[str, str]:
67
+ img = Image.open(image)
68
+ result = infer_core(img, text, max_new_tokens, temperature)
69
+ return {"text": result}
70
 
 
71
  demo.queue(concurrency_count=1, max_size=16).launch()
examples/demo.jpg ADDED
requirements.txt CHANGED
@@ -1,3 +1,4 @@
 
1
  gradio>=4.44.0
2
  spaces>=0.25.0
3
  transformers>=4.44.0
@@ -5,3 +6,7 @@ torch>=2.2
5
  accelerate>=0.30.0
6
  safetensors>=0.4.2
7
  pillow>=10.3
 
 
 
 
 
1
+ # app (ZeroGPU Gradio)
2
  gradio>=4.44.0
3
  spaces>=0.25.0
4
  transformers>=4.44.0
 
6
  accelerate>=0.30.0
7
  safetensors>=0.4.2
8
  pillow>=10.3
9
+
10
+ # clients
11
+ #requests>=2.31.0
12
+ #streamlit>=1.36.0