unsafe_sensitive_bio handleing

parent 92dffa87
...@@ -7,10 +7,8 @@ import logging ...@@ -7,10 +7,8 @@ import logging
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))) sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')))
from core import StudentNationality, StudyLanguage, Models from core import StudentNationality, StudyLanguage, Models
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class ResponseGenerator: class ResponseGenerator:
"""Handles AI response generation and conversation management""" """Handles AI response generation and conversation management"""
...@@ -70,8 +68,6 @@ class ResponseGenerator: ...@@ -70,8 +68,6 @@ class ResponseGenerator:
return formatted_base_prompt return formatted_base_prompt
def generate_response( def generate_response(
self, self,
user_message: str, user_message: str,
...@@ -99,64 +95,71 @@ class ResponseGenerator: ...@@ -99,64 +95,71 @@ class ResponseGenerator:
conversation_history = self.get_conversation_history(student_id) conversation_history = self.get_conversation_history(student_id)
# Classify query type # Classify query type
query_type = self.query_handler.classify_query_type(user_message, student_info) query_type = self.query_handler.classify_query_type(user_message, student_info, student_id)
logger.info(f"Query type: {query_type} for student {student_name} ({study_language.value})") logger.info(f"Query type: {query_type} for student {student_name} ({study_language.value}) with conversation context")
# *** HANDLE UNSAFE QUERIES IMMEDIATELY - NO SYSTEM PROMPT ***
if query_type.startswith("unsafe_"):
if query_type == "unsafe_religion":
unsafe_response = self.query_handler.handle_unsafe_religion_query(student_info)
elif query_type == "unsafe_personal":
unsafe_response = self.query_handler.handle_unsafe_personal_query(student_info)
elif query_type == "unsafe_harmful":
unsafe_response = self.query_handler.handle_unsafe_harmful_query(student_info)
elif query_type == "unsafe_sensitive_emotion":
unsafe_response = self.query_handler.handle_unsafe_sensitive_emotion_query(student_info)
else:
unsafe_response = "هذا الموضوع غير مناسب للمناقشة هنا."
# Save response directly and return - NO AI MODEL CALL
self.add_message_to_history(student_id, unsafe_response, "assistant")
logger.info(f"Returned direct {query_type} response for {student_name}")
return unsafe_response
# *** FOR SAFE QUERIES - PROCEED WITH NORMAL AI PROCESSING ***
# Prepare system prompt # Prepare system prompt
formatted_base_prompt = self.prepare_system_prompt(student_info) formatted_base_prompt = self.prepare_system_prompt(student_info)
# Prepare messages # Build base messages
messages = [] messages = [{"role": "system", "content": formatted_base_prompt}]
messages.append({"role": "system", "content": formatted_base_prompt})
messages.extend(conversation_history) messages.extend(conversation_history)
messages.append({"role": "user", "content": user_message}) messages.append({"role": "user", "content": user_message})
# Handle different safe query types # ==========================
if query_type == "general_chat": # HANDLE UNSAFE QUERIES
chat_context = self.query_handler.handle_general_chat_query(user_message, student_info) # ==========================
messages.append({"role": "system", "content": f"سياق المحادثة العامة:\n{chat_context}"}) if query_type.startswith("unsafe_"):
if query_type == "unsafe_religion":
elif query_type == "overview": unsafe_context = self.query_handler.handle_unsafe_religion_query(student_info)
overview_response = self.query_handler.handle_overview_query(student_info, subject) elif query_type == "unsafe_sensitive_emotion":
messages.append({"role": "system", "content": f"المنهج الكامل من ملف JSON:\n{overview_response}"}) unsafe_context = self.query_handler.handle_unsafe_sensitive_emotion_query(student_info)
elif query_type == "unsafe_sensitive_bio":
elif query_type == "navigation": unsafe_context = self.query_handler.handle_unsafe_sensitive_bio_query(student_info)
navigation_response = self.query_handler.handle_navigation_query(user_message, student_info, subject) elif query_type == "unsafe_personal":
messages.append({"role": "system", "content": f"تفاصيل الوحدة/المفهوم من JSON:\n{navigation_response}"}) unsafe_context = self.query_handler.handle_unsafe_personal_query(student_info)
elif query_type == "unsafe_harmful":
elif query_type == "specific_content": unsafe_context = self.query_handler.handle_unsafe_harmful_query(student_info)
# Enhanced content search else:
relevant_results = self.context_generator.search_enhanced_content( unsafe_context = "هذا الموضوع غير مناسب للمناقشة هنا."
user_message, student_info, subject, top_k
) # نضيف التعليمات كـ system context بدل الرد المباشر
messages.append({
if relevant_results: "role": "system",
enhanced_context = self.context_generator.generate_enhanced_context( "content": f"التعليمات للتعامل مع الموضوع الحساس:\n{unsafe_context}"
relevant_results, student_info, query_type })
# ==========================
# HANDLE SAFE QUERIES
# ==========================
else:
if query_type == "general_chat":
chat_context = self.query_handler.handle_general_chat_query(user_message, student_info)
messages.append({"role": "system", "content": f"سياق المحادثة العامة:\n{chat_context}"})
elif query_type == "overview":
overview_response = self.query_handler.handle_overview_query(student_info, subject)
messages.append({"role": "system", "content": f"المنهج الكامل من ملف JSON:\n{overview_response}"})
elif query_type == "navigation":
navigation_response = self.query_handler.handle_navigation_query(user_message, student_info, subject)
messages.append({"role": "system", "content": f"تفاصيل الوحدة/المفهوم من JSON:\n{navigation_response}"})
elif query_type == "specific_content":
# Enhanced content search
relevant_results = self.context_generator.search_enhanced_content(
user_message, student_info, subject, top_k
) )
messages.append({"role": "system", "content": enhanced_context}) if relevant_results:
logger.info(f"Added enhanced context with {len(relevant_results)} chunks") enhanced_context = self.context_generator.generate_enhanced_context(
relevant_results, student_info, query_type
# Generate response using AI model )
messages.append({"role": "system", "content": enhanced_context})
logger.info(f"Added enhanced context with {len(relevant_results)} chunks for student {student_name}")
# ==========================
# CALL AI MODEL
# ==========================
response = self.openai_service.client.chat.completions.create( response = self.openai_service.client.chat.completions.create(
model=model, model=model,
messages=messages, messages=messages,
...@@ -169,12 +172,12 @@ class ResponseGenerator: ...@@ -169,12 +172,12 @@ class ResponseGenerator:
# Save AI response # Save AI response
self.add_message_to_history(student_id, ai_response, "assistant") self.add_message_to_history(student_id, ai_response, "assistant")
logger.info(f"Generated {query_type} response for {student_name} ({study_language.value}): {len(ai_response)} characters") logger.info(f"Generated {query_type} response for {student_name} ({study_language.value}) with conversation context: {len(ai_response)} characters")
return ai_response return ai_response
except HTTPException: except HTTPException:
raise raise
except Exception as e: except Exception as e:
logger.error(f"Error generating AI response: {e}") logger.error(f"Error generating response for student {student_id}: {e}")
raise HTTPException(status_code=500, detail=f"AI response generation failed: {str(e)}") raise HTTPException(status_code=500, detail="Error generating response")
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment