unsafe_sensitive_bio handleing

parent 92dffa87
......@@ -7,10 +7,8 @@ import logging
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')))
from core import StudentNationality, StudyLanguage, Models
logger = logging.getLogger(__name__)
class ResponseGenerator:
"""Handles AI response generation and conversation management"""
......@@ -70,8 +68,6 @@ class ResponseGenerator:
return formatted_base_prompt
def generate_response(
self,
user_message: str,
......@@ -99,64 +95,71 @@ class ResponseGenerator:
conversation_history = self.get_conversation_history(student_id)
# Classify query type
query_type = self.query_handler.classify_query_type(user_message, student_info)
logger.info(f"Query type: {query_type} for student {student_name} ({study_language.value})")
query_type = self.query_handler.classify_query_type(user_message, student_info, student_id)
logger.info(f"Query type: {query_type} for student {student_name} ({study_language.value}) with conversation context")
# *** HANDLE UNSAFE QUERIES IMMEDIATELY - NO SYSTEM PROMPT ***
if query_type.startswith("unsafe_"):
if query_type == "unsafe_religion":
unsafe_response = self.query_handler.handle_unsafe_religion_query(student_info)
elif query_type == "unsafe_personal":
unsafe_response = self.query_handler.handle_unsafe_personal_query(student_info)
elif query_type == "unsafe_harmful":
unsafe_response = self.query_handler.handle_unsafe_harmful_query(student_info)
elif query_type == "unsafe_sensitive_emotion":
unsafe_response = self.query_handler.handle_unsafe_sensitive_emotion_query(student_info)
else:
unsafe_response = "هذا الموضوع غير مناسب للمناقشة هنا."
# Save response directly and return - NO AI MODEL CALL
self.add_message_to_history(student_id, unsafe_response, "assistant")
logger.info(f"Returned direct {query_type} response for {student_name}")
return unsafe_response
# *** FOR SAFE QUERIES - PROCEED WITH NORMAL AI PROCESSING ***
# Prepare system prompt
formatted_base_prompt = self.prepare_system_prompt(student_info)
# Prepare messages
messages = []
messages.append({"role": "system", "content": formatted_base_prompt})
# Build base messages
messages = [{"role": "system", "content": formatted_base_prompt}]
messages.extend(conversation_history)
messages.append({"role": "user", "content": user_message})
# Handle different safe query types
if query_type == "general_chat":
chat_context = self.query_handler.handle_general_chat_query(user_message, student_info)
messages.append({"role": "system", "content": f"سياق المحادثة العامة:\n{chat_context}"})
elif query_type == "overview":
overview_response = self.query_handler.handle_overview_query(student_info, subject)
messages.append({"role": "system", "content": f"المنهج الكامل من ملف JSON:\n{overview_response}"})
elif query_type == "navigation":
navigation_response = self.query_handler.handle_navigation_query(user_message, student_info, subject)
messages.append({"role": "system", "content": f"تفاصيل الوحدة/المفهوم من JSON:\n{navigation_response}"})
elif query_type == "specific_content":
# Enhanced content search
relevant_results = self.context_generator.search_enhanced_content(
user_message, student_info, subject, top_k
)
if relevant_results:
enhanced_context = self.context_generator.generate_enhanced_context(
relevant_results, student_info, query_type
# ==========================
# HANDLE UNSAFE QUERIES
# ==========================
if query_type.startswith("unsafe_"):
if query_type == "unsafe_religion":
unsafe_context = self.query_handler.handle_unsafe_religion_query(student_info)
elif query_type == "unsafe_sensitive_emotion":
unsafe_context = self.query_handler.handle_unsafe_sensitive_emotion_query(student_info)
elif query_type == "unsafe_sensitive_bio":
unsafe_context = self.query_handler.handle_unsafe_sensitive_bio_query(student_info)
elif query_type == "unsafe_personal":
unsafe_context = self.query_handler.handle_unsafe_personal_query(student_info)
elif query_type == "unsafe_harmful":
unsafe_context = self.query_handler.handle_unsafe_harmful_query(student_info)
else:
unsafe_context = "هذا الموضوع غير مناسب للمناقشة هنا."
# نضيف التعليمات كـ system context بدل الرد المباشر
messages.append({
"role": "system",
"content": f"التعليمات للتعامل مع الموضوع الحساس:\n{unsafe_context}"
})
# ==========================
# HANDLE SAFE QUERIES
# ==========================
else:
if query_type == "general_chat":
chat_context = self.query_handler.handle_general_chat_query(user_message, student_info)
messages.append({"role": "system", "content": f"سياق المحادثة العامة:\n{chat_context}"})
elif query_type == "overview":
overview_response = self.query_handler.handle_overview_query(student_info, subject)
messages.append({"role": "system", "content": f"المنهج الكامل من ملف JSON:\n{overview_response}"})
elif query_type == "navigation":
navigation_response = self.query_handler.handle_navigation_query(user_message, student_info, subject)
messages.append({"role": "system", "content": f"تفاصيل الوحدة/المفهوم من JSON:\n{navigation_response}"})
elif query_type == "specific_content":
# Enhanced content search
relevant_results = self.context_generator.search_enhanced_content(
user_message, student_info, subject, top_k
)
messages.append({"role": "system", "content": enhanced_context})
logger.info(f"Added enhanced context with {len(relevant_results)} chunks")
# Generate response using AI model
if relevant_results:
enhanced_context = self.context_generator.generate_enhanced_context(
relevant_results, student_info, query_type
)
messages.append({"role": "system", "content": enhanced_context})
logger.info(f"Added enhanced context with {len(relevant_results)} chunks for student {student_name}")
# ==========================
# CALL AI MODEL
# ==========================
response = self.openai_service.client.chat.completions.create(
model=model,
messages=messages,
......@@ -169,12 +172,12 @@ class ResponseGenerator:
# Save AI response
self.add_message_to_history(student_id, ai_response, "assistant")
logger.info(f"Generated {query_type} response for {student_name} ({study_language.value}): {len(ai_response)} characters")
logger.info(f"Generated {query_type} response for {student_name} ({study_language.value}) with conversation context: {len(ai_response)} characters")
return ai_response
except HTTPException:
raise
except Exception as e:
logger.error(f"Error generating AI response: {e}")
raise HTTPException(status_code=500, detail=f"AI response generation failed: {str(e)}")
\ No newline at end of file
logger.error(f"Error generating response for student {student_id}: {e}")
raise HTTPException(status_code=500, detail="Error generating response")
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment