ChAbhishek28 commited on
Commit
ce2d17d
·
1 Parent(s): 2b5cde1

Add 8999999999999999999999999999

Browse files
Files changed (1) hide show
  1. enhanced_websocket_handler.py +17 -10
enhanced_websocket_handler.py CHANGED
@@ -94,19 +94,18 @@ def analyze_query_context(query: str) -> dict:
94
  doc_matches = sum(1 for kw in doc_keywords if kw in query_lower)
95
  general_matches = sum(1 for kw in general_keywords if kw in query_lower)
96
 
97
- # Determine query type
98
  if doc_matches > 0 or detected_role:
99
  query_type = "document_related"
100
  confidence = max(min(doc_matches * 0.3, 1.0), role_confidence)
101
- elif general_matches > 0:
 
102
  query_type = "general_conversation"
103
  confidence = min(general_matches * 0.4, 1.0)
104
- elif len(query.strip().split()) < 3:
105
- query_type = "general_conversation" # Short queries likely general
106
- confidence = 0.6
107
  else:
108
- query_type = "document_related" # Default to document search for longer queries
109
- confidence = 0.3
 
110
 
111
  return {
112
  "type": query_type,
@@ -911,9 +910,17 @@ async def get_hybrid_response(user_message: str, context: str, config: dict, kno
911
  # Check if we have relevant documents
912
  has_relevant_docs = docs and any(doc.get("score", 0) > 0.5 for doc in docs)
913
 
914
- # For general conversation queries, use LLM even if we have some documents
915
- if query_context.get("type") == "general_conversation" and query_context.get("confidence", 0) > 0.6:
916
- logger.info("📱 Detected general conversation, using LLM directly")
 
 
 
 
 
 
 
 
917
  llm_response = await generate_llm_fallback_response(user_message, query_context)
918
  yield {
919
  "clause_text": llm_response,
 
94
  doc_matches = sum(1 for kw in doc_keywords if kw in query_lower)
95
  general_matches = sum(1 for kw in general_keywords if kw in query_lower)
96
 
97
+ # Determine query type - FIXED: Be more aggressive about document searches
98
  if doc_matches > 0 or detected_role:
99
  query_type = "document_related"
100
  confidence = max(min(doc_matches * 0.3, 1.0), role_confidence)
101
+ elif general_matches > 0 and doc_matches == 0:
102
+ # Only treat as general if there are ZERO document keywords
103
  query_type = "general_conversation"
104
  confidence = min(general_matches * 0.4, 1.0)
 
 
 
105
  else:
106
+ # DEFAULT to document search - this is a government document system
107
+ query_type = "document_related"
108
+ confidence = 0.5 # Higher confidence for document search by default
109
 
110
  return {
111
  "type": query_type,
 
910
  # Check if we have relevant documents
911
  has_relevant_docs = docs and any(doc.get("score", 0) > 0.5 for doc in docs)
912
 
913
+ # FIXED: Always try document search first, even for apparent "general" queries
914
+ # This is a government document system - most queries should check documents
915
+ # Only use pure LLM for very clear greetings/thanks with NO document matches
916
+ very_general_keywords = ['hello', 'hi', 'thank you', 'thanks', 'goodbye', 'bye']
917
+ is_very_general = (query_context.get("type") == "general_conversation" and
918
+ query_context.get("confidence", 0) > 0.8 and
919
+ any(keyword in user_message.lower() for keyword in very_general_keywords) and
920
+ not docs)
921
+
922
+ if is_very_general:
923
+ logger.info("📱 Detected pure greeting/thanks with no documents, using LLM directly")
924
  llm_response = await generate_llm_fallback_response(user_message, query_context)
925
  yield {
926
  "clause_text": llm_response,