Spaces:
Sleeping
Sleeping
Update streamlit_app.py
Browse files- streamlit_app.py +181 -247
streamlit_app.py
CHANGED
|
@@ -1,315 +1,249 @@
|
|
| 1 |
-
# ---
|
| 2 |
-
import base64
|
| 3 |
-
import requests
|
| 4 |
|
| 5 |
import os
|
| 6 |
-
os.environ.setdefault("HOME", "/tmp") # avoid '/.streamlit' permission issue
|
| 7 |
os.environ["STREAMLIT_BROWSER_GATHER_USAGE_STATS"] = "false"
|
| 8 |
|
| 9 |
import io
|
| 10 |
import time
|
| 11 |
-
import
|
| 12 |
-
|
| 13 |
-
|
| 14 |
import pandas as pd
|
| 15 |
import streamlit as st
|
| 16 |
-
from PIL import Image
|
| 17 |
from supabase import create_client, Client
|
| 18 |
-
from huggingface_hub import InferenceClient
|
| 19 |
|
| 20 |
# ------------------------ Page ------------------------
|
| 21 |
st.set_page_config(page_title="Care Count Inventory", layout="centered")
|
| 22 |
st.title("π¦ Care Count Inventory")
|
| 23 |
-
st.caption("
|
| 24 |
|
| 25 |
-
# ------------------------ Secrets
|
| 26 |
def get_secret(name: str, default: str | None = None) -> str | None:
|
|
|
|
| 27 |
return os.getenv(name) or st.secrets.get(name, default)
|
| 28 |
|
| 29 |
SUPABASE_URL = get_secret("SUPABASE_URL")
|
| 30 |
SUPABASE_KEY = get_secret("SUPABASE_KEY")
|
| 31 |
-
HF_TOKEN = get_secret("HF_TOKEN", "")
|
| 32 |
-
|
| 33 |
if not SUPABASE_URL or not SUPABASE_KEY:
|
| 34 |
-
st.error("
|
| 35 |
-
st.stop()
|
| 36 |
-
|
| 37 |
-
if not HF_TOKEN:
|
| 38 |
-
st.error("β Missing HF_TOKEN. Create a **Read** token at https://huggingface.co/settings/tokens "
|
| 39 |
-
"and add it in Space β Settings β Secrets β HF_TOKEN. Then restart the Space.")
|
| 40 |
st.stop()
|
| 41 |
|
| 42 |
sb: Client = create_client(SUPABASE_URL, SUPABASE_KEY)
|
| 43 |
-
hf = InferenceClient(token=HF_TOKEN)
|
| 44 |
|
| 45 |
-
#
|
| 46 |
-
|
| 47 |
-
VQA_MODEL
|
| 48 |
-
CAP_MODEL = "Salesforce/blip-image-captioning-base" # Caption fallback
|
| 49 |
|
| 50 |
-
# ------------------------
|
| 51 |
def _to_png_bytes(img: Image.Image) -> bytes:
|
| 52 |
-
|
| 53 |
-
img.save(
|
| 54 |
-
return
|
| 55 |
-
|
| 56 |
-
def resize_short(img: Image.Image, short=640) -> Image.Image:
|
| 57 |
-
w, h = img.size
|
| 58 |
-
s = min(w, h)
|
| 59 |
-
if s <= short:
|
| 60 |
-
return img
|
| 61 |
-
scale = short / s
|
| 62 |
-
return img.resize((int(w * scale), int(h * scale)))
|
| 63 |
-
|
| 64 |
-
def autoprocess(img: Image.Image) -> Image.Image:
|
| 65 |
-
# Light preprocessing for better OCR on phone shots
|
| 66 |
-
img = resize_short(img, 640)
|
| 67 |
-
img = ImageOps.autocontrast(img)
|
| 68 |
-
return img
|
| 69 |
-
|
| 70 |
-
def center_crops(img: Image.Image, n=2, frac=0.80) -> List[Image.Image]:
|
| 71 |
-
"""Few center crops to keep API usage low."""
|
| 72 |
-
crops = []
|
| 73 |
-
w, h = img.size
|
| 74 |
-
for i in range(n):
|
| 75 |
-
f = frac + i * 0.1
|
| 76 |
-
cw, ch = int(w * f), int(h * f)
|
| 77 |
-
x0 = (w - cw) // 2
|
| 78 |
-
y0 = (h - ch) // 2
|
| 79 |
-
crops.append(img.crop((x0, y0, x0 + cw, y0 + ch)))
|
| 80 |
-
return crops
|
| 81 |
-
|
| 82 |
-
# ------------------------ Remote calls (HF Inference API) ------------------------
|
| 83 |
-
def remote_trocr(img: Image.Image) -> tuple[str, str | None]:
|
| 84 |
-
"""OCR with TrOCR via image_to_text. Works across hub client versions."""
|
| 85 |
-
try:
|
| 86 |
-
out = hf.image_to_text(image=_to_png_bytes(img), model=TROCR_MODEL)
|
| 87 |
-
# normalize possible return shapes
|
| 88 |
-
if isinstance(out, str):
|
| 89 |
-
text = out.strip()
|
| 90 |
-
elif isinstance(out, list) and out:
|
| 91 |
-
# some deployments return [{"generated_text": "..."}]
|
| 92 |
-
text = (out[0].get("generated_text") or out[0].get("text") or "").strip()
|
| 93 |
-
elif isinstance(out, dict):
|
| 94 |
-
text = (out.get("generated_text") or out.get("text") or "").strip()
|
| 95 |
-
else:
|
| 96 |
-
text = ""
|
| 97 |
-
return text, None
|
| 98 |
-
except Exception as e:
|
| 99 |
-
return "", f"TROCR error: {e}"
|
| 100 |
|
| 101 |
-
|
| 102 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
try:
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 114 |
elif isinstance(out, dict):
|
| 115 |
-
ans =
|
| 116 |
else:
|
| 117 |
ans = ""
|
| 118 |
-
return ans, None
|
| 119 |
-
except Exception as e:
|
| 120 |
-
return "", f"VQA error: {e}"
|
| 121 |
-
|
| 122 |
-
def remote_caption(img: Image.Image) -> tuple[str, str | None]:
|
| 123 |
-
"""BLIP caption fallback; normalize return shapes."""
|
| 124 |
-
try:
|
| 125 |
-
out = hf.image_to_text(image=_to_png_bytes(img), model=CAP_MODEL)
|
| 126 |
-
if isinstance(out, str):
|
| 127 |
-
cap = out.strip()
|
| 128 |
-
elif isinstance(out, list) and out:
|
| 129 |
-
cap = (out[0].get("generated_text") or out[0].get("text") or "").strip()
|
| 130 |
-
elif isinstance(out, dict):
|
| 131 |
-
cap = (out.get("generated_text") or out.get("text") or "").strip()
|
| 132 |
-
else:
|
| 133 |
-
cap = ""
|
| 134 |
-
return cap, None
|
| 135 |
except Exception as e:
|
| 136 |
-
return "", f"
|
| 137 |
-
|
| 138 |
-
# ------------------------
|
| 139 |
-
|
| 140 |
-
"
|
| 141 |
-
"
|
| 142 |
-
"
|
| 143 |
-
"
|
| 144 |
-
"
|
| 145 |
-
"
|
| 146 |
-
"
|
| 147 |
-
# Add food-bank brands as you encounter themβ¦
|
| 148 |
}
|
| 149 |
|
| 150 |
-
|
| 151 |
-
"
|
| 152 |
-
"
|
| 153 |
-
"
|
| 154 |
-
"
|
| 155 |
-
"
|
| 156 |
-
"
|
| 157 |
-
"
|
| 158 |
-
"
|
| 159 |
-
"
|
| 160 |
-
|
|
|
|
| 161 |
}
|
| 162 |
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
def
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
def suggest_item_name(img: Image.Image, economy: bool = True) -> Dict[str, Any]:
|
| 179 |
"""
|
| 180 |
-
|
| 181 |
-
name, ocr_text, vqa_brand, vqa_type, caption, errors[], latency_s, calls{ocr,vqa,cap}
|
| 182 |
"""
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
# OCR over a couple of crops
|
| 189 |
-
crops = center_crops(img, n=2 if economy else 4, frac=0.8)
|
| 190 |
-
ocr_texts = []
|
| 191 |
-
for c in crops:
|
| 192 |
-
txt, e = remote_trocr(autoprocess(c))
|
| 193 |
-
if e: err_list.append(e)
|
| 194 |
-
if txt: ocr_texts.append(txt)
|
| 195 |
-
ocr = clean(" ".join(ocr_texts))
|
| 196 |
-
|
| 197 |
-
# VQA
|
| 198 |
-
vqa_brand, e1 = remote_vqa(img, "What brand name is printed on the product label? One or two words only.")
|
| 199 |
-
if e1: err_list.append(e1)
|
| 200 |
-
vqa_type, e2 = remote_vqa(img, "What kind of product is this? Use a short noun phrase (e.g., Cereal, Pasta, Soup, Antiperspirant Spray).")
|
| 201 |
-
if e2: err_list.append(e2)
|
| 202 |
-
|
| 203 |
-
# Caption fallback
|
| 204 |
-
cap, e3 = remote_caption(img)
|
| 205 |
-
if e3: err_list.append(e3)
|
| 206 |
-
|
| 207 |
-
# Normalize
|
| 208 |
-
fused = " ".join(filter(None, [ocr, vqa_brand, vqa_type, cap]))
|
| 209 |
-
brand = _match(BRANDS, fused)
|
| 210 |
-
ptype = _match(TYPES, fused)
|
| 211 |
-
var_m = re.search(VARIANT, fused, flags=re.I)
|
| 212 |
-
parts = [brand, ptype, var_m.group(0).upper() if var_m else None]
|
| 213 |
-
name = " ".join([p for p in parts if p]).strip() or (vqa_brand or ocr or cap or "Unknown").title()
|
| 214 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 215 |
return {
|
| 216 |
-
"name": name,
|
| 217 |
-
"
|
| 218 |
-
"
|
| 219 |
-
"
|
| 220 |
-
"
|
| 221 |
-
"errors": err_list,
|
| 222 |
-
"latency_s": round(time.time() - t0, 2),
|
| 223 |
-
"calls": {"ocr": len(crops), "vqa": 2, "cap": 1},
|
| 224 |
}
|
| 225 |
|
| 226 |
-
# ------------------------ Volunteer
|
| 227 |
st.subheader("π€ Volunteer")
|
|
|
|
| 228 |
with st.form("vol_form", clear_on_submit=True):
|
| 229 |
username = st.text_input("Username")
|
| 230 |
full_name = st.text_input("Full name")
|
| 231 |
-
|
|
|
|
| 232 |
if not (username and full_name):
|
| 233 |
st.error("Please fill both fields.")
|
| 234 |
else:
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 241 |
|
| 242 |
if "volunteer" not in st.session_state:
|
| 243 |
st.info("Add yourself above to start logging.")
|
| 244 |
st.stop()
|
| 245 |
|
| 246 |
-
# ------------------------
|
| 247 |
st.subheader("πΈ Scan label to auto-fill item")
|
|
|
|
| 248 |
c1, c2 = st.columns(2)
|
| 249 |
with c1:
|
| 250 |
-
cam = st.camera_input("Use your
|
| 251 |
with c2:
|
| 252 |
-
up = st.file_uploader("β¦or upload
|
| 253 |
-
|
| 254 |
-
economy = st.toggle("Economy mode (fewer API calls)", value=True)
|
| 255 |
|
| 256 |
-
suggested_name = st.session_state.get("suggested_name", "")
|
| 257 |
img_file = cam or up
|
| 258 |
-
|
| 259 |
if img_file:
|
| 260 |
img = Image.open(img_file).convert("RGB")
|
| 261 |
-
st.image(img,
|
| 262 |
-
|
| 263 |
-
if st.button("
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
st.success(f"π§ Suggested: **{
|
| 267 |
-
with st.expander("Debug (
|
| 268 |
-
st.json(
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
item_name = st.text_input("Item name", value=
|
| 275 |
-
quantity
|
| 276 |
-
category
|
| 277 |
-
|
| 278 |
|
| 279 |
if st.button("β
Log item"):
|
| 280 |
-
if item_name.strip():
|
|
|
|
|
|
|
|
|
|
| 281 |
try:
|
| 282 |
-
sb.table("
|
| 283 |
-
"volunteer": st.session_state.get("volunteer", "unknown"),
|
| 284 |
-
"barcode": None,
|
| 285 |
"item_name": item_name.strip(),
|
|
|
|
| 286 |
"category": category.strip() or None,
|
| 287 |
-
"
|
| 288 |
-
"
|
| 289 |
-
# "timestamp": database default (now())
|
| 290 |
}).execute()
|
| 291 |
-
st.success("Logged!")
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
st.
|
| 300 |
-
|
| 301 |
-
|
| 302 |
-
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
|
| 310 |
-
|
| 311 |
-
)
|
| 312 |
-
|
| 313 |
-
|
| 314 |
-
|
| 315 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --- Care Count Inventory (VQA-only suggestion, free HF API) ---
|
|
|
|
|
|
|
| 2 |
|
| 3 |
import os
|
| 4 |
+
os.environ.setdefault("HOME", "/tmp") # avoid '/.streamlit' permission issue on Spaces
|
| 5 |
os.environ["STREAMLIT_BROWSER_GATHER_USAGE_STATS"] = "false"
|
| 6 |
|
| 7 |
import io
|
| 8 |
import time
|
| 9 |
+
import base64
|
| 10 |
+
import requests
|
|
|
|
| 11 |
import pandas as pd
|
| 12 |
import streamlit as st
|
| 13 |
+
from PIL import Image
|
| 14 |
from supabase import create_client, Client
|
|
|
|
| 15 |
|
| 16 |
# ------------------------ Page ------------------------
|
| 17 |
st.set_page_config(page_title="Care Count Inventory", layout="centered")
|
| 18 |
st.title("π¦ Care Count Inventory")
|
| 19 |
+
st.caption("BLIP-VQAβassisted inventory logging with Supabase (free HF Inference API)")
|
| 20 |
|
| 21 |
+
# ------------------------ Secrets & clients ------------------------
|
| 22 |
def get_secret(name: str, default: str | None = None) -> str | None:
|
| 23 |
+
# Reads from env first (HF Variables), then from st.secrets (HF Secrets)
|
| 24 |
return os.getenv(name) or st.secrets.get(name, default)
|
| 25 |
|
| 26 |
SUPABASE_URL = get_secret("SUPABASE_URL")
|
| 27 |
SUPABASE_KEY = get_secret("SUPABASE_KEY")
|
|
|
|
|
|
|
| 28 |
if not SUPABASE_URL or not SUPABASE_KEY:
|
| 29 |
+
st.error("Missing Supabase creds. Add SUPABASE_URL & SUPABASE_KEY in Settings β Secrets.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
st.stop()
|
| 31 |
|
| 32 |
sb: Client = create_client(SUPABASE_URL, SUPABASE_KEY)
|
|
|
|
| 33 |
|
| 34 |
+
# ---- VQA model config (free serverless endpoint) ----
|
| 35 |
+
HF_TOKEN = get_secret("HF_TOKEN") # βReadβ token is fine
|
| 36 |
+
VQA_MODEL = os.getenv("VQA_MODEL", "Salesforce/blip-vqa-capfilt-large") # better than base; still free
|
|
|
|
| 37 |
|
| 38 |
+
# ------------------------ Tiny image util ------------------------
|
| 39 |
def _to_png_bytes(img: Image.Image) -> bytes:
|
| 40 |
+
b = io.BytesIO()
|
| 41 |
+
img.save(b, format="PNG")
|
| 42 |
+
return b.getvalue()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
|
| 44 |
+
# ------------------------ HTTP VQA helper ------------------------
|
| 45 |
+
def vqa_http(img: Image.Image, question: str) -> tuple[str, str | None]:
|
| 46 |
+
"""
|
| 47 |
+
Calls HF Inference API for 'image-question-answering' (BLIP-VQA).
|
| 48 |
+
No huggingface_hub client; pure requests to avoid kwarg/version issues.
|
| 49 |
+
Returns (answer, error).
|
| 50 |
+
"""
|
| 51 |
try:
|
| 52 |
+
img_b64 = base64.b64encode(_to_png_bytes(img)).decode("utf-8")
|
| 53 |
+
url = f"https://api-inference.huggingface.co/models/{VQA_MODEL}"
|
| 54 |
+
headers = {"Accept": "application/json"}
|
| 55 |
+
if HF_TOKEN:
|
| 56 |
+
headers["Authorization"] = f"Bearer {HF_TOKEN}"
|
| 57 |
+
payload = {"inputs": {"question": question, "image": img_b64}}
|
| 58 |
+
|
| 59 |
+
r = requests.post(url, headers=headers, json=payload, timeout=60)
|
| 60 |
+
if r.status_code != 200:
|
| 61 |
+
return "", f"VQA HTTP {r.status_code}: {r.text[:200]}"
|
| 62 |
+
|
| 63 |
+
out = r.json()
|
| 64 |
+
# API sometimes returns list[dict] or dict
|
| 65 |
+
if isinstance(out, list) and out:
|
| 66 |
+
ans = out[0].get("answer") or out[0].get("generated_text") or ""
|
| 67 |
elif isinstance(out, dict):
|
| 68 |
+
ans = out.get("answer") or out.get("generated_text") or ""
|
| 69 |
else:
|
| 70 |
ans = ""
|
| 71 |
+
return (ans or "").strip(), None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
except Exception as e:
|
| 73 |
+
return "", f"VQA HTTP error: {e}"
|
| 74 |
+
|
| 75 |
+
# ------------------------ Normalizer ------------------------
|
| 76 |
+
BRAND_ALIASES = {
|
| 77 |
+
"degree": "Degree",
|
| 78 |
+
"campbell's": "Campbell's",
|
| 79 |
+
"heinz": "Heinz",
|
| 80 |
+
"kellogg's": "Kellogg's",
|
| 81 |
+
"quaker": "Quaker",
|
| 82 |
+
"pepsi": "Pepsi",
|
| 83 |
+
"coke": "Coca-Cola",
|
|
|
|
| 84 |
}
|
| 85 |
|
| 86 |
+
TYPE_ALIASES = {
|
| 87 |
+
"antiperspirant": "Antiperspirant",
|
| 88 |
+
"deodorant": "Deodorant",
|
| 89 |
+
"toothpaste": "Toothpaste",
|
| 90 |
+
"tooth brush": "Toothbrush",
|
| 91 |
+
"cereal": "Cereal",
|
| 92 |
+
"soup": "Soup",
|
| 93 |
+
"beans": "Beans",
|
| 94 |
+
"rice": "Rice",
|
| 95 |
+
"pasta": "Pasta",
|
| 96 |
+
"sauce": "Sauce",
|
| 97 |
+
"soda": "Soda",
|
| 98 |
}
|
| 99 |
|
| 100 |
+
def _clean_name(s: str) -> str:
|
| 101 |
+
return (s or "").strip().lower()
|
| 102 |
+
|
| 103 |
+
def normalize_item(brand: str, ptype: str, fallback_text: str = "") -> str:
|
| 104 |
+
b = BRAND_ALIASES.get(_clean_name(brand), brand.strip())
|
| 105 |
+
t = TYPE_ALIASES.get(_clean_name(ptype), ptype.strip())
|
| 106 |
+
parts = [p for p in [b, t] if p]
|
| 107 |
+
if parts:
|
| 108 |
+
return " ".join(parts)
|
| 109 |
+
if fallback_text:
|
| 110 |
+
return " ".join(fallback_text.strip().split()[:5])
|
| 111 |
+
return "Unknown"
|
| 112 |
+
|
| 113 |
+
# ------------------------ VQA-only suggestion pipeline ------------------------
|
| 114 |
+
def suggest_name_vqa_only(img: Image.Image) -> dict:
|
|
|
|
| 115 |
"""
|
| 116 |
+
Ask two concise VQA questions + one fallback, then normalize.
|
|
|
|
| 117 |
"""
|
| 118 |
+
errors = []
|
| 119 |
+
q_brand = "What is the brand name on the product label? Answer with one or two words."
|
| 120 |
+
q_type = "What type of product is this? Answer briefly, like 'Soup', 'Pasta', 'Antiperspirant'."
|
| 121 |
+
q_name = "What is the product name or flavor written on the label? Answer with a few words."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 122 |
|
| 123 |
+
brand, e1 = vqa_http(img, q_brand); errors += [e1] if e1 else []
|
| 124 |
+
ptype, e2 = vqa_http(img, q_type); errors += [e2] if e2 else []
|
| 125 |
+
pname, e3 = vqa_http(img, q_name); errors += [e3] if e3 else []
|
| 126 |
+
|
| 127 |
+
name = normalize_item(brand, ptype, pname)
|
| 128 |
return {
|
| 129 |
+
"name": name if name else "Unknown",
|
| 130 |
+
"vqa_brand": brand,
|
| 131 |
+
"vqa_type": ptype,
|
| 132 |
+
"vqa_pname": pname,
|
| 133 |
+
"errors": errors,
|
|
|
|
|
|
|
|
|
|
| 134 |
}
|
| 135 |
|
| 136 |
+
# ------------------------ Volunteer login ------------------------
|
| 137 |
st.subheader("π€ Volunteer")
|
| 138 |
+
|
| 139 |
with st.form("vol_form", clear_on_submit=True):
|
| 140 |
username = st.text_input("Username")
|
| 141 |
full_name = st.text_input("Full name")
|
| 142 |
+
submitted = st.form_submit_button("Add / Continue")
|
| 143 |
+
if submitted:
|
| 144 |
if not (username and full_name):
|
| 145 |
st.error("Please fill both fields.")
|
| 146 |
else:
|
| 147 |
+
try:
|
| 148 |
+
existing = sb.table("volunteers").select("full_name").execute().data or []
|
| 149 |
+
names = {v["full_name"].strip().lower() for v in existing}
|
| 150 |
+
if full_name.strip().lower() not in names:
|
| 151 |
+
sb.table("volunteers").insert({"username": username, "full_name": full_name}).execute()
|
| 152 |
+
st.session_state["volunteer"] = username
|
| 153 |
+
st.session_state["volunteer_name"] = full_name
|
| 154 |
+
st.success(f"Welcome, {full_name}!")
|
| 155 |
+
except Exception as e:
|
| 156 |
+
st.error(f"Volunteer add/check failed: {e}")
|
| 157 |
|
| 158 |
if "volunteer" not in st.session_state:
|
| 159 |
st.info("Add yourself above to start logging.")
|
| 160 |
st.stop()
|
| 161 |
|
| 162 |
+
# ------------------------ Capture / Upload ------------------------
|
| 163 |
st.subheader("πΈ Scan label to auto-fill item")
|
| 164 |
+
|
| 165 |
c1, c2 = st.columns(2)
|
| 166 |
with c1:
|
| 167 |
+
cam = st.camera_input("Use your webcam")
|
| 168 |
with c2:
|
| 169 |
+
up = st.file_uploader("β¦or upload an image", type=["png", "jpg", "jpeg"])
|
|
|
|
|
|
|
| 170 |
|
|
|
|
| 171 |
img_file = cam or up
|
|
|
|
| 172 |
if img_file:
|
| 173 |
img = Image.open(img_file).convert("RGB")
|
| 174 |
+
st.image(img, use_container_width=True)
|
| 175 |
+
|
| 176 |
+
if st.button("π Suggest name"):
|
| 177 |
+
t0 = time.time()
|
| 178 |
+
result = suggest_name_vqa_only(img)
|
| 179 |
+
st.success(f"π§ Suggested: **{result['name']}** Β· β±οΈ {time.time()-t0:.2f}s")
|
| 180 |
+
with st.expander("π Debug (VQA)"):
|
| 181 |
+
st.json(result)
|
| 182 |
+
st.session_state["scanned_item_name"] = result["name"]
|
| 183 |
+
|
| 184 |
+
# ------------------------ Add inventory item (form unchanged) ------------------------
|
| 185 |
+
st.subheader("π₯ Add inventory item")
|
| 186 |
+
|
| 187 |
+
item_name = st.text_input("Item name", value=st.session_state.get("scanned_item_name", ""))
|
| 188 |
+
quantity = st.number_input("Quantity", min_value=1, step=1, value=1)
|
| 189 |
+
category = st.text_input("Category (optional)")
|
| 190 |
+
expiry = st.date_input("Expiry date (optional)")
|
| 191 |
|
| 192 |
if st.button("β
Log item"):
|
| 193 |
+
if not item_name.strip():
|
| 194 |
+
st.warning("Enter an item name.")
|
| 195 |
+
else:
|
| 196 |
+
# Try 'inventory' table first; if missing, fall back to 'visit_items'
|
| 197 |
try:
|
| 198 |
+
sb.table("inventory").insert({
|
|
|
|
|
|
|
| 199 |
"item_name": item_name.strip(),
|
| 200 |
+
"quantity": int(quantity),
|
| 201 |
"category": category.strip() or None,
|
| 202 |
+
"expiry_date": str(expiry) if expiry else None,
|
| 203 |
+
"added_by": st.session_state.get("volunteer", "Unknown"),
|
|
|
|
| 204 |
}).execute()
|
| 205 |
+
st.success("Logged to 'inventory'!")
|
| 206 |
+
except Exception as e1:
|
| 207 |
+
# Fallback: visit_items (id, visit_id, timestamp, volunteer, weather_type, temp_c, barcode, item_name, category, unit, qty)
|
| 208 |
+
try:
|
| 209 |
+
payload_vi = {
|
| 210 |
+
"item_name": item_name.strip(),
|
| 211 |
+
"qty": int(quantity),
|
| 212 |
+
"category": category.strip() or None,
|
| 213 |
+
"volunteer": st.session_state.get("volunteer_name") or st.session_state.get("volunteer") or "Unknown",
|
| 214 |
+
}
|
| 215 |
+
sb.table("visit_items").insert(payload_vi).execute()
|
| 216 |
+
st.success("Logged to 'visit_items'!")
|
| 217 |
+
except Exception as e2:
|
| 218 |
+
st.error(f"Insert failed: {e1}\nFallback failed: {e2}")
|
| 219 |
+
|
| 220 |
+
# ------------------------ Live inventory (tries multiple tables) ------------------------
|
| 221 |
+
st.subheader("π Live inventory")
|
| 222 |
+
|
| 223 |
+
def _try_fetch(table: str):
|
| 224 |
+
try:
|
| 225 |
+
return sb.table(table).select("*").order("created_at", desc=True).execute().data
|
| 226 |
+
except Exception:
|
| 227 |
+
try:
|
| 228 |
+
# some tables donβt have created_at
|
| 229 |
+
return sb.table(table).select("*").limit(1000).execute().data
|
| 230 |
+
except Exception:
|
| 231 |
+
return None
|
| 232 |
+
|
| 233 |
+
data = _try_fetch("inventory")
|
| 234 |
+
if not data:
|
| 235 |
+
data = _try_fetch("visit_items")
|
| 236 |
+
if not data:
|
| 237 |
+
data = _try_fetch("inventory_master")
|
| 238 |
+
|
| 239 |
+
if data:
|
| 240 |
+
df = pd.DataFrame(data)
|
| 241 |
+
st.dataframe(df, use_container_width=True)
|
| 242 |
+
st.download_button(
|
| 243 |
+
"β¬οΈ Export CSV",
|
| 244 |
+
df.to_csv(index=False).encode("utf-8"),
|
| 245 |
+
"care_count_inventory.csv",
|
| 246 |
+
"text/csv",
|
| 247 |
+
)
|
| 248 |
+
else:
|
| 249 |
+
st.caption("No items yet or tables not found. (Tried: inventory, visit_items, inventory_master)")
|