Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import os | |
| from langchain_google_genai import ChatGoogleGenerativeAI | |
| from langchain.prompts import ChatPromptTemplate | |
| from langchain_core.messages import SystemMessage, HumanMessage, AIMessage | |
| st.set_page_config( | |
| page_title="AI Interview Assistant", | |
| page_icon="🤖", | |
| layout="wide", | |
| initial_sidebar_state="collapsed" | |
| ) | |
| st.markdown(""" | |
| <style> | |
| /* Main container styling */ | |
| .main { | |
| padding: 0rem 1rem; | |
| } | |
| /* Header styling */ | |
| .header { | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
| padding: 2rem; | |
| border-radius: 15px; | |
| margin-bottom: 2rem; | |
| text-align: center; | |
| box-shadow: 0 10px 30px rgba(0,0,0,0.2); | |
| } | |
| .header h1 { | |
| color: white; | |
| font-size: 2.5rem; | |
| margin-bottom: 0.5rem; | |
| text-shadow: 2px 2px 4px rgba(0,0,0,0.3); | |
| } | |
| .header p { | |
| color: #e0e6ff; | |
| font-size: 1.2rem; | |
| margin-bottom: 0; | |
| } | |
| /* Chat container styling */ | |
| .chat-container { | |
| background: white; | |
| border-radius: 15px; | |
| padding: 1.5rem; | |
| box-shadow: 0 5px 20px rgba(0,0,0,0.1); | |
| border: 1px solid #e1e8ed; | |
| max-height: 500px; | |
| overflow-y: auto; | |
| margin-bottom: 1rem; | |
| } | |
| /* Message styling */ | |
| .user-message { | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
| color: white; | |
| padding: 1rem 1.5rem; | |
| border-radius: 20px 20px 5px 20px; | |
| margin: 0.5rem 0; | |
| margin-left: 40%; | |
| max-width: 60%; | |
| box-shadow: 0 3px 10px rgba(102, 126, 234, 0.3); | |
| word-wrap: break-word; | |
| } | |
| .bot-message { | |
| background: linear-gradient(135deg, #f093fb 0%, #f5576c 100%); | |
| color: white; | |
| padding: 1rem 1.5rem; | |
| border-radius: 20px 20px 20px 5px; | |
| margin: 0.5rem 0; | |
| margin-right: 40%; | |
| max-width: 60%; | |
| box-shadow: 0 3px 10px rgba(245, 87, 108, 0.3); | |
| word-wrap: break-word; | |
| } | |
| /* Form submit button styling - Hidden */ | |
| .stForm > div > div > button { | |
| display: none !important; | |
| } | |
| /* Input styling with full width */ | |
| .stTextInput > div > div > input { | |
| border-radius: 25px; | |
| border: 2px solid #e1e8ed; | |
| padding: 0.75rem 1rem; | |
| font-size: 1rem; | |
| width: 100%; | |
| } | |
| .stTextInput > div > div > input:focus { | |
| border-color: #667eea; | |
| box-shadow: 0 0 0 3px rgba(102, 126, 234, 0.1); | |
| } | |
| /* Button styling */ | |
| .stButton > button { | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
| color: white; | |
| border: none; | |
| border-radius: 25px; | |
| padding: 0.75rem 2rem; | |
| font-size: 1rem; | |
| font-weight: 600; | |
| transition: all 0.3s ease; | |
| box-shadow: 0 4px 15px rgba(102, 126, 234, 0.4); | |
| } | |
| .stButton > button:hover { | |
| transform: translateY(-2px); | |
| box-shadow: 0 6px 20px rgba(102, 126, 234, 0.6); | |
| } | |
| /* Multiselect styling */ | |
| .stMultiSelect > div > div { | |
| border-radius: 10px; | |
| border: 2px solid #e1e8ed; | |
| } | |
| /* Tech stack card styling */ | |
| .tech-card { | |
| background: white; | |
| border-radius: 15px; | |
| padding: 2rem; | |
| box-shadow: 0 5px 20px rgba(0,0,0,0.1); | |
| border: 1px solid #e1e8ed; | |
| margin-bottom: 2rem; | |
| } | |
| /* Status indicator */ | |
| .status-indicator { | |
| display: flex; | |
| align-items: center; | |
| justify-content: center; | |
| padding: 0.5rem; | |
| border-radius: 10px; | |
| margin-bottom: 1rem; | |
| font-weight: 600; | |
| } | |
| .status-connected { | |
| background: #d4edda; | |
| color: #155724; | |
| border: 1px solid #c3e6cb; | |
| } | |
| .status-waiting { | |
| background: #fff3cd; | |
| color: #856404; | |
| border: 1px solid #ffeaa7; | |
| } | |
| /* Hide streamlit branding */ | |
| #MainMenu {visibility: hidden;} | |
| footer {visibility: hidden;} | |
| header {visibility: hidden;} | |
| /* Typing indicator */ | |
| .typing-indicator { | |
| display: flex; | |
| align-items: center; | |
| margin: 1rem 0; | |
| color: #666; | |
| } | |
| .typing-dots { | |
| display: flex; | |
| gap: 3px; | |
| margin-left: 10px; | |
| } | |
| .typing-dot { | |
| width: 6px; | |
| height: 6px; | |
| border-radius: 50%; | |
| background: #667eea; | |
| animation: typing 1.4s infinite; | |
| } | |
| .typing-dot:nth-child(2) { | |
| animation-delay: 0.2s; | |
| } | |
| .typing-dot:nth-child(3) { | |
| animation-delay: 0.4s; | |
| } | |
| @keyframes typing { | |
| 0%, 60%, 100% { | |
| transform: translateY(0); | |
| } | |
| 30% { | |
| transform: translateY(-10px); | |
| } | |
| } | |
| </style> | |
| """, unsafe_allow_html=True) | |
| if 'page' not in st.session_state: | |
| st.session_state.page = 'setup' | |
| if 'chat_history' not in st.session_state: | |
| st.session_state.chat_history = [] | |
| if 'llm' not in st.session_state: | |
| st.session_state.llm = None | |
| if 'system_messages' not in st.session_state: | |
| st.session_state.system_messages = [] | |
| if 'last_processed_input' not in st.session_state: | |
| st.session_state.last_processed_input = "" | |
| if 'processing' not in st.session_state: | |
| st.session_state.processing = False | |
| def initialize_llm(): | |
| """Initialize the language model""" | |
| try: | |
| GEMINI_API_KEY = os.getenv("GEMINI_API_KEY") | |
| if not GEMINI_API_KEY: | |
| st.error("⚠️ GEMINI_API_KEY not found in environment variables!") | |
| return None | |
| llm = ChatGoogleGenerativeAI( | |
| model="gemini-2.0-flash", | |
| api_key=GEMINI_API_KEY | |
| ) | |
| return llm | |
| except Exception as e: | |
| st.error(f"❌ Error initializing AI model: {str(e)}") | |
| return None | |
| def setup_page(): | |
| """Tech stack selection and setup page""" | |
| st.markdown(""" | |
| <div class="header"> | |
| <h1>🤖 AI Interview Assistant</h1> | |
| <p>Intelligent recruitment tool for seamless candidate evaluation</p> | |
| </div> | |
| """, unsafe_allow_html=True) | |
| st.markdown('<div class="tech-card">', unsafe_allow_html=True) | |
| col1, col2 = st.columns([1, 1]) | |
| with col1: | |
| st.markdown("### 🛠️ Select Candidate's Tech Stack") | |
| st.markdown("Choose the technologies relevant to the position:") | |
| tech_options = [ | |
| "Python", "JavaScript", "Java", "C++", "C#", "Go", "Rust", "TypeScript", | |
| "React", "Angular", "Vue.js", "Node.js", "Django", "Flask", "Spring Boot", | |
| "Machine Learning", "Deep Learning", "Data Science", "AI/ML", "NLP", | |
| "AWS", "Azure", "Google Cloud", "Docker", "Kubernetes", "DevOps", | |
| "SQL", "MongoDB", "PostgreSQL", "Redis", "GraphQL", "REST APIs", | |
| "Git", "Agile", "Scrum", "CI/CD", "Testing", "Microservices" | |
| ] | |
| selected_tech = st.multiselect( | |
| "Technologies:", | |
| options=tech_options, | |
| default=["Python", "Machine Learning", "Deep Learning"], | |
| help="Select all relevant technologies for the position" | |
| ) | |
| with col2: | |
| st.markdown("### 🎯 Interview Configuration") | |
| interview_level = st.selectbox( | |
| "Interview Level:", | |
| ["Junior", "Mid-level", "Senior", "Lead"], | |
| index=1 | |
| ) | |
| question_count = st.slider( | |
| "Number of Technical Questions:", | |
| min_value=3, | |
| max_value=8, | |
| value=4, | |
| help="Number of technical questions to ask" | |
| ) | |
| interview_duration = st.selectbox( | |
| "Expected Duration:", | |
| ["15 minutes", "30 minutes", "45 minutes", "60 minutes"], | |
| index=1 | |
| ) | |
| api_key_status = os.getenv("GEMINI_API_KEY") | |
| if api_key_status: | |
| st.markdown(""" | |
| <div class="status-indicator status-connected"> | |
| ✅ AI Assistant Ready | |
| </div> | |
| """, unsafe_allow_html=True) | |
| else: | |
| st.markdown(""" | |
| <div class="status-indicator status-waiting"> | |
| ⚠️ Please set GEMINI_API_KEY environment variable | |
| </div> | |
| """, unsafe_allow_html=True) | |
| st.markdown('</div>', unsafe_allow_html=True) | |
| col1, col2, col3 = st.columns([1, 1, 1]) | |
| with col2: | |
| if st.button("🚀 Start Interview"): | |
| if selected_tech and api_key_status: | |
| st.session_state.selected_tech = selected_tech | |
| st.session_state.interview_level = interview_level | |
| st.session_state.question_count = question_count | |
| st.session_state.interview_duration = interview_duration | |
| st.session_state.llm = initialize_llm() | |
| if st.session_state.llm: | |
| system_prompt = f"""You are an intelligent interview assistant designed to facilitate the candidate information gathering process for recruitment, simulating an interview-like flow. Your main objective is to take the interview based as per the SOP given below. | |
| SOP: | |
| Step 1: Start the conversation by greeting the candidate warmly. | |
| Step 2: Once you greeted, ALWAYS collect the following candidate details such as full name, email address, phone number, years of experience, desired position(s), and current location. | |
| Step 3: After collecting the candidate details, start ask the questions that related to candidate's tech stack, you're allowed to ask whatever question you want that are related to candidate's tech stack. Ask {question_count} questions related to candidates tech stack for {interview_level} level. | |
| Step 4: After asking {question_count} questions, politely conclude the interview with a polite closing message. | |
| Additional Guidelines: | |
| - Maintain the flow of the conversation by handling follow-up questions while keeping the context intact. | |
| - When collecting candidate's details, collect the details one by one, NEVER ask all the info at a single time. | |
| - If the candidate provides unexpected input, offer a fallback response to guide them back to the process. | |
| - Upon detecting a conversation-ending keyword, gracefully conclude the conversation by thanking the candidate, informing them of the next steps, and ensuring a positive close. | |
| - Always maintain a professional tone, avoid deviating from the task, and ensure the conversation is smooth and seamless throughout. | |
| - The interview is for {interview_level} level position with expected duration of {interview_duration}. | |
| Candidate's tech stack: {selected_tech} | |
| """ | |
| st.session_state.system_messages = [ | |
| SystemMessage(content=system_prompt), | |
| AIMessage("Hello! Welcome to the interview. I'm your AI interview assistant. Let's begin with a warm greeting. How are you doing today?") | |
| ] | |
| st.session_state.chat_history = [ | |
| {"role": "assistant", "content": "Hello! Welcome to the interview. I'm your AI interview assistant. Let's begin with a warm greeting. How are you doing today?"} | |
| ] | |
| st.session_state.page = 'chat' | |
| st.rerun() | |
| else: | |
| st.error("❌ Failed to initialize AI assistant. Please check your API key.") | |
| else: | |
| if not selected_tech: | |
| st.error("⚠️ Please select at least one technology!") | |
| if not api_key_status: | |
| st.error("⚠️ Please set your GEMINI_API_KEY environment variable!") | |
| def chat_page(): | |
| col1, col2, col3 = st.columns([1, 6, 1]) | |
| with col1: | |
| if st.button("← Back", key="back_btn"): | |
| st.session_state.page = 'setup' | |
| st.session_state.chat_history = [] | |
| st.session_state.system_messages = [] | |
| st.session_state.last_processed_input = "" | |
| st.session_state.processing = False | |
| st.rerun() | |
| with col2: | |
| st.markdown(""" | |
| <div class="header"> | |
| <h1>💬 Interview in Progress</h1> | |
| <p>Tech Stack: {}</p> | |
| </div> | |
| """.format(", ".join(st.session_state.selected_tech)), unsafe_allow_html=True) | |
| st.markdown('<div class="chat-container">', unsafe_allow_html=True) | |
| for message in st.session_state.chat_history: | |
| if message["role"] == "user": | |
| st.markdown(f'<div class="user-message">{message["content"]}</div>', unsafe_allow_html=True) | |
| else: | |
| st.markdown(f'<div class="bot-message">{message["content"]}</div>', unsafe_allow_html=True) | |
| if 'processing' in st.session_state and st.session_state.processing: | |
| st.markdown(""" | |
| <div class="typing-indicator"> | |
| AI is thinking... | |
| <div class="typing-dots"> | |
| <div class="typing-dot"></div> | |
| <div class="typing-dot"></div> | |
| <div class="typing-dot"></div> | |
| </div> | |
| </div> | |
| """, unsafe_allow_html=True) | |
| st.markdown('</div>', unsafe_allow_html=True) | |
| with st.form(key="chat_form", clear_on_submit=True): | |
| user_input = st.text_input( | |
| "Type your message..." | |
| ) | |
| send_button = st.form_submit_button("Send", type="primary") | |
| if send_button and user_input.strip(): | |
| if user_input != st.session_state.get('last_processed_input', ''): | |
| st.session_state.last_processed_input = user_input | |
| st.session_state.chat_history.append({"role": "user", "content": user_input}) | |
| if 'bye' in user_input.lower(): | |
| st.session_state.chat_history.append({ | |
| "role": "assistant", | |
| "content": "Thank you for your time! The interview has been completed. Our team will review your responses and get back to you soon. Have a great day!" | |
| }) | |
| st.rerun() | |
| else: | |
| try: | |
| st.session_state.processing = True | |
| user_message_with_context = f"{user_input} Candidate's tech stack: {st.session_state.selected_tech}" | |
| st.session_state.system_messages.append(HumanMessage(content=user_message_with_context)) | |
| response = st.session_state.llm.invoke(st.session_state.system_messages) | |
| st.session_state.chat_history.append({"role": "assistant", "content": response.content}) | |
| st.session_state.system_messages.append(AIMessage(content=response.content)) | |
| st.session_state.processing = False | |
| st.rerun() | |
| except Exception as e: | |
| st.session_state.processing = False | |
| st.error(f"❌ An error occurred: {str(e)}") | |
| st.session_state.chat_history.append({ | |
| "role": "assistant", | |
| "content": "I apologize, but I encountered an error. Please try again or contact support if the issue persists." | |
| }) | |
| st.rerun() | |
| def main(): | |
| if st.session_state.page == 'setup': | |
| setup_page() | |
| elif st.session_state.page == 'chat': | |
| chat_page() | |
| if __name__ == "__main__": | |
| main() |