Spaces:
Running
Running
File size: 1,643 Bytes
833aed9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
"""
Pydantic models for request/response validation
"""
from typing import Dict, List, Optional
from pydantic import BaseModel, Field
class ModerateRequest(BaseModel):
text: str = Field(..., description="Text to moderate")
model: str = Field(
default="lionguard-2.1",
description="Model to use: lionguard-2, lionguard-2.1, or lionguard-2-lite"
)
class CategoryScore(BaseModel):
name: str
emoji: str
max_score: float
class ModerateResponse(BaseModel):
binary_score: float
binary_verdict: str # "pass", "warn", "fail"
binary_percentage: int
categories: List[CategoryScore]
text_id: str
model_used: str
class FeedbackRequest(BaseModel):
text_id: str = Field(..., description="ID of the text being voted on")
agree: bool = Field(..., description="True for thumbs up, False for thumbs down")
class FeedbackResponse(BaseModel):
success: bool
message: str
class ChatMessage(BaseModel):
role: str
content: str
class ChatHistories(BaseModel):
no_moderation: List[ChatMessage] = Field(default_factory=list)
openai_moderation: List[ChatMessage] = Field(default_factory=list)
lionguard: List[ChatMessage] = Field(default_factory=list)
class ChatRequest(BaseModel):
message: str = Field(..., description="Message to send to all guardrails")
model: str = Field(
default="lionguard-2.1",
description="LionGuard model variant to use"
)
histories: ChatHistories = Field(default_factory=ChatHistories)
class ChatResponse(BaseModel):
histories: ChatHistories
lionguard_score: Optional[float] = None
|