Spaces:
Sleeping
Sleeping
Commit
·
a0b0f78
1
Parent(s):
4c17c6a
Add 89999999999999
Browse files- enhanced_websocket_handler.py +14 -18
enhanced_websocket_handler.py
CHANGED
|
@@ -240,20 +240,18 @@ async def handle_text_message(websocket: WebSocket, data: dict, session_data: di
|
|
| 240 |
try:
|
| 241 |
if use_hybrid:
|
| 242 |
# Stream hybrid LLM service response
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
async for chunk, provider in get_hybrid_response(
|
| 246 |
user_message, session_data["context"], config, knowledge_base
|
| 247 |
):
|
| 248 |
-
|
| 249 |
-
provider_used = provider
|
| 250 |
-
full_response += chunk
|
| 251 |
await websocket.send_json({
|
| 252 |
"type": "streaming_response",
|
| 253 |
-
"message": chunk
|
| 254 |
-
"provider_used": provider_used
|
| 255 |
})
|
| 256 |
-
response_text =
|
|
|
|
|
|
|
| 257 |
else:
|
| 258 |
# Use traditional graph approach
|
| 259 |
session_data["messages"].append(HumanMessage(content=user_message))
|
|
@@ -361,20 +359,18 @@ async def handle_voice_message(websocket: WebSocket, data: dict, session_data: d
|
|
| 361 |
|
| 362 |
# Process as text message with language context
|
| 363 |
if use_hybrid:
|
| 364 |
-
|
| 365 |
-
|
| 366 |
-
async for chunk, provider in get_hybrid_response(
|
| 367 |
enhanced_message, session_data["context"], config, knowledge_base
|
| 368 |
):
|
| 369 |
-
|
| 370 |
-
provider_used = provider
|
| 371 |
-
full_response += chunk
|
| 372 |
await websocket.send_json({
|
| 373 |
"type": "streaming_response",
|
| 374 |
-
"message": chunk
|
| 375 |
-
"provider_used": provider_used
|
| 376 |
})
|
| 377 |
-
response_text =
|
|
|
|
|
|
|
| 378 |
else:
|
| 379 |
session_data["messages"].append(HumanMessage(content=enhanced_message))
|
| 380 |
result = await graph.ainvoke({"messages": session_data["messages"]}, config)
|
|
|
|
| 240 |
try:
|
| 241 |
if use_hybrid:
|
| 242 |
# Stream hybrid LLM service response
|
| 243 |
+
response_chunks = []
|
| 244 |
+
async for chunk in get_hybrid_response(
|
|
|
|
| 245 |
user_message, session_data["context"], config, knowledge_base
|
| 246 |
):
|
| 247 |
+
response_chunks.append(chunk)
|
|
|
|
|
|
|
| 248 |
await websocket.send_json({
|
| 249 |
"type": "streaming_response",
|
| 250 |
+
"message": chunk
|
|
|
|
| 251 |
})
|
| 252 |
+
response_text = "".join(response_chunks)
|
| 253 |
+
provider_used = hybrid_llm_service.choose_llm_provider(user_message)
|
| 254 |
+
provider_used = provider_used.value if provider_used else "unknown"
|
| 255 |
else:
|
| 256 |
# Use traditional graph approach
|
| 257 |
session_data["messages"].append(HumanMessage(content=user_message))
|
|
|
|
| 359 |
|
| 360 |
# Process as text message with language context
|
| 361 |
if use_hybrid:
|
| 362 |
+
response_chunks = []
|
| 363 |
+
async for chunk in get_hybrid_response(
|
|
|
|
| 364 |
enhanced_message, session_data["context"], config, knowledge_base
|
| 365 |
):
|
| 366 |
+
response_chunks.append(chunk)
|
|
|
|
|
|
|
| 367 |
await websocket.send_json({
|
| 368 |
"type": "streaming_response",
|
| 369 |
+
"message": chunk
|
|
|
|
| 370 |
})
|
| 371 |
+
response_text = "".join(response_chunks)
|
| 372 |
+
provider_used = hybrid_llm_service.choose_llm_provider(enhanced_message)
|
| 373 |
+
provider_used = provider_used.value if provider_used else "unknown"
|
| 374 |
else:
|
| 375 |
session_data["messages"].append(HumanMessage(content=enhanced_message))
|
| 376 |
result = await graph.ainvoke({"messages": session_data["messages"]}, config)
|