File size: 1,338 Bytes
03ac85a
 
 
addff1b
03ac85a
addff1b
 
03ac85a
 
 
 
 
addff1b
03ac85a
 
addff1b
722af33
03ac85a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
722af33
03ac85a
 
 
addff1b
03ac85a
 
 
 
 
 
addff1b
 
03ac85a
 
 
 
 
addff1b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
import streamlit as st

@st.cache_resource
def build_chat_chain(_llm):
    prompt = ChatPromptTemplate.from_messages([
        ("system", "You are AbideVerse, an AI companion helping with devotion and Bible study."),
        ("human", "{message}")
    ])

    return prompt | _llm | StrOutputParser()


@st.cache_resource
def build_rag_chain(_llm, _retriever):
    system_prompt = """
You are AbideVerse RAG Assistant. Use ONLY the provided context to answer questions biblically.
If the answer is not present, say so.

CONTEXT:
{context}
"""

    prompt = ChatPromptTemplate.from_messages([
        ("system", system_prompt),
        ("human", "{question}")
    ])

    rag_chain = (
        RunnableParallel({
            "context": _retriever,
            "question": RunnablePassthrough()
        })
        | prompt
        | _llm
        | StrOutputParser()
    )

    return rag_chain


@st.cache_resource
def build_quiz_chain(_llm):
    prompt = ChatPromptTemplate.from_messages([
        ("system", "Generate a Bible verse memorization quiz (cloze)."),
        ("human", "Verse: {verse}")
    ])

    return prompt | _llm | StrOutputParser()