File size: 3,170 Bytes
ee16c60
f2fce5e
139a0fa
ee16c60
139a0fa
 
ee16c60
139a0fa
 
ee16c60
 
 
 
 
 
 
 
 
139a0fa
 
 
ee16c60
139a0fa
 
ee16c60
139a0fa
 
f2fce5e
139a0fa
 
 
f2fce5e
 
139a0fa
 
 
 
 
 
ee16c60
139a0fa
 
 
 
 
ee16c60
 
139a0fa
f2fce5e
 
 
139a0fa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ee16c60
139a0fa
 
 
 
 
f2fce5e
139a0fa
ee16c60
139a0fa
f2fce5e
139a0fa
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import os
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
from huggingface_hub import snapshot_download

# =========================================================
# 1️⃣ Download model to local cache once
# =========================================================
MODEL_ID = "gokaygokay/prompt-enhancer-gemma-3-270m-it"
print(f"📦 Downloading model {MODEL_ID} ...")
model_path = snapshot_download(repo_id=MODEL_ID)
print(f"✅ Model downloaded to: {model_path}")

# =========================================================
# 2️⃣ Load tokenizer and model from local path
# =========================================================
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForCausalLM.from_pretrained(model_path)
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)

# =========================================================
# 3️⃣ Define function to enhance prompt
# =========================================================
def enhance_prompt(prompt: str):
    """Enhance and expand a user prompt with more details and context."""
    if not prompt.strip():
        return "⚠️ Please enter a prompt to enhance."

    messages = [
        {"role": "system", "content": "Enhance and expand the following prompt with more details and context:"},
        {"role": "user", "content": prompt.strip()},
    ]

    chat_input = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
    output = pipe(chat_input, max_new_tokens=256, do_sample=True, temperature=0.8)
    result = output[0]["generated_text"]
    return result.strip()

# =========================================================
# 4️⃣ Gradio UI
# =========================================================
with gr.Blocks(theme=gr.themes.Soft(), title="Prompt Enhancer ✨") as demo:
    gr.Markdown(
        """
        # ✨ Prompt Enhancer — Gemma 3 270M IT  
        Give your idea a creative boost!  
        Enter a simple prompt, and this app will enhance it with vivid detail and context.
        """
    )

    with gr.Row():
        with gr.Column(scale=1):
            input_text = gr.Textbox(
                label="Enter your prompt",
                placeholder="e.g. a cat sitting on a chair",
                lines=4,
            )
            enhance_button = gr.Button("🚀 Enhance Prompt", variant="primary")
        with gr.Column(scale=1):
            output_text = gr.Textbox(
                label="Enhanced Prompt",
                placeholder="Your enhanced prompt will appear here...",
                lines=8,
            )

    enhance_button.click(enhance_prompt, inputs=input_text, outputs=output_text)

    gr.Markdown(
        """
        ---
        🧠 **Try these examples:**  
        - “a futuristic city at sunset”  
        - “a woman reading under a tree”  
        - “a magical forest with glowing mushrooms”
        """
    )

# =========================================================
# 5️⃣ Launch app
# =========================================================
if __name__ == "__main__":
    demo.launch()