import streamlit as st
import os
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.prompts import ChatPromptTemplate
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
st.set_page_config(
page_title="AI Interview Assistant",
page_icon="🤖",
layout="wide",
initial_sidebar_state="collapsed"
)
st.markdown("""
""", unsafe_allow_html=True)
if 'page' not in st.session_state:
st.session_state.page = 'setup'
if 'chat_history' not in st.session_state:
st.session_state.chat_history = []
if 'llm' not in st.session_state:
st.session_state.llm = None
if 'system_messages' not in st.session_state:
st.session_state.system_messages = []
if 'last_processed_input' not in st.session_state:
st.session_state.last_processed_input = ""
if 'processing' not in st.session_state:
st.session_state.processing = False
def initialize_llm():
"""Initialize the language model"""
try:
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
if not GEMINI_API_KEY:
st.error("⚠️ GEMINI_API_KEY not found in environment variables!")
return None
llm = ChatGoogleGenerativeAI(
model="gemini-2.0-flash",
api_key=GEMINI_API_KEY
)
return llm
except Exception as e:
st.error(f"❌ Error initializing AI model: {str(e)}")
return None
def setup_page():
"""Tech stack selection and setup page"""
st.markdown("""
""", unsafe_allow_html=True)
st.markdown('', unsafe_allow_html=True)
col1, col2 = st.columns([1, 1])
with col1:
st.markdown("### 🛠️ Select Candidate's Tech Stack")
st.markdown("Choose the technologies relevant to the position:")
tech_options = [
"Python", "JavaScript", "Java", "C++", "C#", "Go", "Rust", "TypeScript",
"React", "Angular", "Vue.js", "Node.js", "Django", "Flask", "Spring Boot",
"Machine Learning", "Deep Learning", "Data Science", "AI/ML", "NLP",
"AWS", "Azure", "Google Cloud", "Docker", "Kubernetes", "DevOps",
"SQL", "MongoDB", "PostgreSQL", "Redis", "GraphQL", "REST APIs",
"Git", "Agile", "Scrum", "CI/CD", "Testing", "Microservices"
]
selected_tech = st.multiselect(
"Technologies:",
options=tech_options,
default=["Python", "Machine Learning", "Deep Learning"],
help="Select all relevant technologies for the position"
)
with col2:
st.markdown("### 🎯 Interview Configuration")
interview_level = st.selectbox(
"Interview Level:",
["Junior", "Mid-level", "Senior", "Lead"],
index=1
)
question_count = st.slider(
"Number of Technical Questions:",
min_value=3,
max_value=8,
value=4,
help="Number of technical questions to ask"
)
interview_duration = st.selectbox(
"Expected Duration:",
["15 minutes", "30 minutes", "45 minutes", "60 minutes"],
index=1
)
api_key_status = os.getenv("GEMINI_API_KEY")
if api_key_status:
st.markdown("""
✅ AI Assistant Ready
""", unsafe_allow_html=True)
else:
st.markdown("""
⚠️ Please set GEMINI_API_KEY environment variable
""", unsafe_allow_html=True)
st.markdown('
', unsafe_allow_html=True)
col1, col2, col3 = st.columns([1, 1, 1])
with col2:
if st.button("🚀 Start Interview"):
if selected_tech and api_key_status:
st.session_state.selected_tech = selected_tech
st.session_state.interview_level = interview_level
st.session_state.question_count = question_count
st.session_state.interview_duration = interview_duration
st.session_state.llm = initialize_llm()
if st.session_state.llm:
system_prompt = f"""You are an intelligent interview assistant designed to facilitate the candidate information gathering process for recruitment, simulating an interview-like flow. Your main objective is to take the interview based as per the SOP given below.
SOP:
Step 1: Start the conversation by greeting the candidate warmly.
Step 2: Once you greeted, ALWAYS collect the following candidate details such as full name, email address, phone number, years of experience, desired position(s), and current location.
Step 3: After collecting the candidate details, start ask the questions that related to candidate's tech stack, you're allowed to ask whatever question you want that are related to candidate's tech stack. Ask {question_count} questions related to candidates tech stack for {interview_level} level.
Step 4: After asking {question_count} questions, politely conclude the interview with a polite closing message.
Additional Guidelines:
- Maintain the flow of the conversation by handling follow-up questions while keeping the context intact.
- When collecting candidate's details, collect the details one by one, NEVER ask all the info at a single time.
- If the candidate provides unexpected input, offer a fallback response to guide them back to the process.
- Upon detecting a conversation-ending keyword, gracefully conclude the conversation by thanking the candidate, informing them of the next steps, and ensuring a positive close.
- Always maintain a professional tone, avoid deviating from the task, and ensure the conversation is smooth and seamless throughout.
- The interview is for {interview_level} level position with expected duration of {interview_duration}.
Candidate's tech stack: {selected_tech}
"""
st.session_state.system_messages = [
SystemMessage(content=system_prompt),
AIMessage("Hello! Welcome to the interview. I'm your AI interview assistant. Let's begin with a warm greeting. How are you doing today?")
]
st.session_state.chat_history = [
{"role": "assistant", "content": "Hello! Welcome to the interview. I'm your AI interview assistant. Let's begin with a warm greeting. How are you doing today?"}
]
st.session_state.page = 'chat'
st.rerun()
else:
st.error("❌ Failed to initialize AI assistant. Please check your API key.")
else:
if not selected_tech:
st.error("⚠️ Please select at least one technology!")
if not api_key_status:
st.error("⚠️ Please set your GEMINI_API_KEY environment variable!")
def chat_page():
col1, col2, col3 = st.columns([1, 6, 1])
with col1:
if st.button("← Back", key="back_btn"):
st.session_state.page = 'setup'
st.session_state.chat_history = []
st.session_state.system_messages = []
st.session_state.last_processed_input = ""
st.session_state.processing = False
st.rerun()
with col2:
st.markdown("""
""".format(", ".join(st.session_state.selected_tech)), unsafe_allow_html=True)
st.markdown('', unsafe_allow_html=True)
for message in st.session_state.chat_history:
if message["role"] == "user":
st.markdown(f'
{message["content"]}
', unsafe_allow_html=True)
else:
st.markdown(f'
{message["content"]}
', unsafe_allow_html=True)
if 'processing' in st.session_state and st.session_state.processing:
st.markdown("""
""", unsafe_allow_html=True)
st.markdown('
', unsafe_allow_html=True)
with st.form(key="chat_form", clear_on_submit=True):
user_input = st.text_input(
"Type your message..."
)
send_button = st.form_submit_button("Send", type="primary")
if send_button and user_input.strip():
if user_input != st.session_state.get('last_processed_input', ''):
st.session_state.last_processed_input = user_input
st.session_state.chat_history.append({"role": "user", "content": user_input})
if 'bye' in user_input.lower():
st.session_state.chat_history.append({
"role": "assistant",
"content": "Thank you for your time! The interview has been completed. Our team will review your responses and get back to you soon. Have a great day!"
})
st.rerun()
else:
try:
st.session_state.processing = True
user_message_with_context = f"{user_input} Candidate's tech stack: {st.session_state.selected_tech}"
st.session_state.system_messages.append(HumanMessage(content=user_message_with_context))
response = st.session_state.llm.invoke(st.session_state.system_messages)
st.session_state.chat_history.append({"role": "assistant", "content": response.content})
st.session_state.system_messages.append(AIMessage(content=response.content))
st.session_state.processing = False
st.rerun()
except Exception as e:
st.session_state.processing = False
st.error(f"❌ An error occurred: {str(e)}")
st.session_state.chat_history.append({
"role": "assistant",
"content": "I apologize, but I encountered an error. Please try again or contact support if the issue persists."
})
st.rerun()
def main():
if st.session_state.page == 'setup':
setup_page()
elif st.session_state.page == 'chat':
chat_page()
if __name__ == "__main__":
main()