Write full Python programs for each question.
Build an End-to-End Generative AI Application
Reference implementations for instructors.
# Streamlit Prompt Playground
import streamlit as st
st.set_page_config(page_title="GenAI Prompt Playground")
st.title("GenAI Prompt Playground")
prompt_type = st.selectbox(
"Select Prompt Type",
["Text Generation", "Summarization", "Explanation"]
)
prompt = st.text_area("Enter your Generative AI prompt")
if st.button("Submit"):
if prompt.strip() == "":
st.error("Prompt cannot be empty.")
else:
st.success("Prompt Submitted Successfully!")
st.write("### Prompt Type:")
st.write(prompt_type)
st.write("### Prompt:")
st.write(prompt)
# Streamlit Prompt History Tracker
import streamlit as st
st.title("Prompt History Tracker")
if "history" not in st.session_state:
st.session_state.history = []
prompt = st.text_input("Enter Prompt")
if st.button("Add Prompt"):
if prompt:
st.session_state.history.append(prompt)
st.write("## Prompt History")
for i, p in enumerate(st.session_state.history, 1):
st.write(f"{i}. {p}")
# OpenAI / Groq / Gemini API Text Generator (OpenAI example)
from openai import OpenAI
client = OpenAI(api_key="YOUR_OPENAI_API_KEY")
prompt = input("Enter prompt: ")
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": prompt}]
)
print("AI Response:")
print(response.choices[0].message.content)
# API-Based Prompt Length Controller
from openai import OpenAI
client = OpenAI(api_key="YOUR_OPENAI_API_KEY")
prompt = input("Enter prompt: ")
response = client.chat.completions.create(
model="gpt-4o-mini",
max_tokens=100,
messages=[{"role": "user", "content": prompt}]
)
output = response.choices[0].message.content
print("Response Length:", len(output.split()))
print(output)
# Multi-Model API Switcher (OpenAI / Groq / Gemini)
choice = input("Choose model (openai/groq/gemini): ").lower()
prompt = input("Enter prompt: ")
if choice == "openai":
from openai import OpenAI
client = OpenAI(api_key="OPENAI_KEY")
result = client.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": prompt}]
).choices[0].message.content
elif choice == "gemini":
import google.generativeai as genai
genai.configure(api_key="GEMINI_KEY")
model = genai.GenerativeModel("gemini-pro")
result = model.generate_content(prompt).text
elif choice == "groq":
from groq import Groq
client = Groq(api_key="GROQ_KEY")
result = client.chat.completions.create(
model="llama3-8b-8192",
messages=[{"role": "user", "content": prompt}]
).choices[0].message.content
else:
result = "Invalid choice"
print(result)
# LangChain Prompt Template Generator
from langchain.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(api_key="OPENAI_KEY", model="gpt-4o-mini")
template = PromptTemplate(
input_variables=["topic"],
template="Explain {topic} in simple terms for beginners."
)
prompt = template.format(topic="Generative AI")
response = llm.invoke(prompt)
print(response.content)
# LangChain Prompt Chaining
from langchain_openai import ChatOpenAI
from langchain.chains import SequentialChain, LLMChain
from langchain.prompts import PromptTemplate
llm = ChatOpenAI(api_key="OPENAI_KEY")
summary_prompt = PromptTemplate(
input_variables=["topic"],
template="Summarize {topic} in 3 sentences."
)
question_prompt = PromptTemplate(
input_variables=["summary"],
template="Create 3 questions from this summary:\n{summary}"
)
chain1 = LLMChain(llm=llm, prompt=summary_prompt, output_key="summary")
chain2 = LLMChain(llm=llm, prompt=question_prompt, output_key="questions")
overall_chain = SequentialChain(
chains=[chain1, chain2],
input_variables=["topic"],
output_variables=["summary", "questions"]
)
result = overall_chain({"topic": "Generative AI"})
print(result)
# LangChain Conversation Memory App
from langchain_openai import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationChain
llm = ChatOpenAI(api_key="OPENAI_KEY")
memory = ConversationBufferMemory()
chat = ConversationChain(llm=llm, memory=memory)
while True:
user_input = input("You: ")
if user_input.lower() == "exit":
break
response = chat.predict(input=user_input)
print("AI:", response)
# LlamaIndex Document Loader
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
documents = SimpleDirectoryReader("docs").load_data()
index = VectorStoreIndex.from_documents(documents)
print("Documents indexed successfully.")
# LlamaIndex Question Answering System
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
docs = SimpleDirectoryReader("docs").load_data()
index = VectorStoreIndex.from_documents(docs)
query_engine = index.as_query_engine()
question = input("Ask a question: ")
response = query_engine.query(question)
print(response)
# RAG Pipeline using LlamaIndex
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
docs = SimpleDirectoryReader("docs").load_data()
index = VectorStoreIndex.from_documents(docs)
engine = index.as_query_engine()
prompt = input("Enter question: ")
response = engine.query(prompt)
print("RAG Answer:")
print(response)
# Ollama Local LLM Runner
import subprocess
prompt = input("Enter prompt: ")
result = subprocess.run(
["ollama", "run", "llama3", prompt],
capture_output=True,
text=True
)
print(result.stdout)
# Ollama Chat Application
import subprocess
print("Ollama Chat (type exit to quit)")
while True:
prompt = input("You: ")
if prompt.lower() == "exit":
break
response = subprocess.run(
["ollama", "run", "llama3", prompt],
capture_output=True,
text=True
)
print("AI:", response.stdout)
# Streamlit + Ollama AI Assistant
import streamlit as st
import subprocess
st.title("Offline GenAI Assistant (Ollama)")
prompt = st.text_area("Ask something")
if st.button("Generate"):
result = subprocess.run(
["ollama", "run", "llama3", prompt],
capture_output=True,
text=True
)
st.write(result.stdout)
# End-to-End GenAI Application
"""
Architecture:
- Streamlit UI
- LangChain prompt handling
- Ollama local LLM
"""
import streamlit as st
import subprocess
st.title("End-to-End GenAI App")
topic = st.text_input("Enter topic")
if st.button("Generate Explanation"):
prompt = f"Explain {topic} in simple terms."
result = subprocess.run(
["ollama", "run", "llama3", prompt],
capture_output=True,
text=True
)
st.write(result.stdout)