Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
A
AI Tutor
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Salma Mohammed Hamed
AI Tutor
Commits
92dffa87
Commit
92dffa87
authored
Sep 22, 2025
by
SalmaMohammedHamedMustafa
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
handle unsafe query
parent
71b6ce43
Changes
6
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
830 additions
and
523 deletions
+830
-523
agent_prompts.py
...d_env/voice_agent/services/agent_helpers/agent_prompts.py
+166
-0
context_generator.py
...v/voice_agent/services/agent_helpers/context_generator.py
+92
-0
query_handlers.py
..._env/voice_agent/services/agent_helpers/query_handlers.py
+350
-0
response_generator.py
.../voice_agent/services/agent_helpers/response_generator.py
+180
-0
agent_service.py
self_hosted_env/voice_agent/services/agent_service.py
+42
-523
voice_agent.tar
self_hosted_env/voice_agent/voice_agent.tar
+0
-0
No files found.
self_hosted_env/voice_agent/services/agent_helpers/agent_prompts.py
0 → 100644
View file @
92dffa87
This diff is collapsed.
Click to expand it.
self_hosted_env/voice_agent/services/agent_helpers/context_generator.py
0 → 100644
View file @
92dffa87
import
logging
import
os
import
sys
sys
.
path
.
append
(
os
.
path
.
abspath
(
os
.
path
.
join
(
os
.
path
.
dirname
(
__file__
),
'../../'
)))
from
typing
import
Dict
,
Tuple
from
core
import
StudentNationality
,
StudyLanguage
logger
=
logging
.
getLogger
(
__name__
)
class
ContextGenerator
:
"""Handles context generation for AI responses"""
def
__init__
(
self
,
openai_service
,
pgvector_service
):
self
.
openai_service
=
openai_service
self
.
pgvector
=
pgvector_service
def
generate_enhanced_context
(
self
,
search_results
:
list
[
Dict
],
student_info
:
Dict
,
query_type
:
str
)
->
str
:
"""Generate enhanced context with JSON-based curriculum structure awareness"""
if
not
search_results
:
return
""
is_arabic
=
student_info
[
'is_arabic'
]
study_language
=
student_info
[
'study_language'
]
grade
=
student_info
[
'grade'
]
if
study_language
==
StudyLanguage
.
ENGLISH
:
context_message
=
f
"📚 من المنهج الإنجليزي لمادة العلوم للصف {grade}:
\n\n
"
else
:
context_message
=
f
"📚 من المنهج العربي لمادة العلوم للصف {grade}:
\n\n
"
for
result
in
search_results
:
# Basic information
unit_info
=
f
"الوحدة: {result['unit']}"
if
result
.
get
(
'unit'
)
else
""
concept_info
=
f
"المفهوم: {result['concept']}"
if
result
.
get
(
'concept'
)
else
""
lesson_info
=
f
"الدرس: {result['lesson']}"
if
result
.
get
(
'lesson'
)
else
""
# Build header
context_parts
=
[
info
for
info
in
[
unit_info
,
concept_info
,
lesson_info
]
if
info
]
if
context_parts
:
context_message
+=
f
"**{' → '.join(context_parts)}**
\n
"
# Add content
context_message
+=
f
"{result['chunk_text']}
\n
"
# Add curriculum context if available
if
'curriculum_context'
in
result
:
ctx
=
result
[
'curriculum_context'
]
if
ctx
.
get
(
'navigation_hint'
):
context_message
+=
f
"
\n
💡 {ctx['navigation_hint']}
\n
"
if
ctx
.
get
(
'related_concepts'
)
and
query_type
==
"specific_content"
:
related
=
', '
.
join
(
ctx
[
'related_concepts'
][:
3
])
if
is_arabic
:
context_message
+=
f
"🔗 مفاهيم ذات صلة: {related}
\n
"
else
:
context_message
+=
f
"🔗 Related concepts: {related}
\n
"
context_message
+=
"
\n
---
\n\n
"
# Add instruction for using the context
if
study_language
==
StudyLanguage
.
ENGLISH
:
context_message
+=
f
"استخدم هذه المعلومات لتقديم شرح دقيق للطفل. المنهج إنجليزي فاذكر المصطلحات الإنجليزية مع الشرح بالعربي."
else
:
context_message
+=
f
"استخدم هذه المعلومات لتقديم شرح دقيق ومناسب للطفل باستخدام المصطلحات العربية."
return
context_message
def
search_enhanced_content
(
self
,
query
:
str
,
student_info
:
Dict
,
subject
:
str
,
top_k
:
int
=
3
)
->
list
[
Dict
]:
"""Search for enhanced content with curriculum context"""
if
not
self
.
pgvector
:
return
[]
try
:
query_embedding
=
self
.
openai_service
.
generate_embedding
(
query
)
search_results
=
self
.
pgvector
.
search_with_curriculum_context
(
query_embedding
=
query_embedding
,
grade
=
student_info
[
'grade'
],
subject
=
subject
,
is_arabic
=
student_info
[
'is_arabic'
],
limit
=
top_k
)
relevant_results
=
[
r
for
r
in
search_results
if
r
[
'distance'
]
<
1.3
]
if
search_results
else
[]
return
relevant_results
except
Exception
as
e
:
logger
.
warning
(
f
"Error in enhanced content search: {e}"
)
return
[]
\ No newline at end of file
self_hosted_env/voice_agent/services/agent_helpers/query_handlers.py
0 → 100644
View file @
92dffa87
This diff is collapsed.
Click to expand it.
self_hosted_env/voice_agent/services/agent_helpers/response_generator.py
0 → 100644
View file @
92dffa87
import
os
import
sys
from
typing
import
Dict
from
fastapi
import
HTTPException
from
services.agent_helpers.agent_prompts
import
SYSTEM_PROMPTS
import
logging
sys
.
path
.
append
(
os
.
path
.
abspath
(
os
.
path
.
join
(
os
.
path
.
dirname
(
__file__
),
'../../'
)))
from
core
import
StudentNationality
,
StudyLanguage
,
Models
logger
=
logging
.
getLogger
(
__name__
)
class
ResponseGenerator
:
"""Handles AI response generation and conversation management"""
def
__init__
(
self
,
openai_service
,
db_service
,
pedagogy_service
,
query_handler
,
context_generator
):
self
.
openai_service
=
openai_service
self
.
db_service
=
db_service
self
.
pedagogy_service
=
pedagogy_service
self
.
query_handler
=
query_handler
self
.
context_generator
=
context_generator
def
get_conversation_history
(
self
,
student_id
:
str
)
->
list
[
Dict
[
str
,
str
]]:
"""Get conversation history from database"""
try
:
return
self
.
db_service
.
get_chat_history
(
student_id
)
except
Exception
as
e
:
logger
.
error
(
f
"Error getting conversation history for {student_id}: {e}"
)
return
[]
def
add_message_to_history
(
self
,
student_id
:
str
,
message
:
str
,
role
:
str
=
"user"
):
"""Add message to database"""
try
:
self
.
db_service
.
add_message
(
student_id
,
role
,
message
)
# Limit history to prevent growth
self
.
db_service
.
limit_history
(
student_id
,
max_messages
=
38
)
except
Exception
as
e
:
logger
.
error
(
f
"Error adding message to history for {student_id}: {e}"
)
def
prepare_system_prompt
(
self
,
student_info
:
Dict
)
->
str
:
"""Prepare system prompt based on student information"""
student_name
=
student_info
.
get
(
'student_name'
,
'الطالب'
)
.
split
()[
0
]
study_language
=
student_info
[
'study_language'
]
# Map nationality
nationality_lower
=
student_info
[
'nationality'
]
.
lower
()
.
strip
()
nationality_mapping
=
{
'egyptian'
:
StudentNationality
.
EGYPTIAN
,
'saudi'
:
StudentNationality
.
SAUDI
}
nationality
=
nationality_mapping
.
get
(
nationality_lower
,
StudentNationality
.
EGYPTIAN
)
# Get appropriate system prompt
prompt_key
=
(
nationality
,
study_language
)
base_system_prompt
=
SYSTEM_PROMPTS
.
get
(
prompt_key
,
SYSTEM_PROMPTS
.
get
((
StudentNationality
.
EGYPTIAN
,
StudyLanguage
.
ARABIC
),
""
))
formatted_base_prompt
=
base_system_prompt
.
format
(
student_name
=
student_name
,
grade
=
student_info
[
'grade'
]
)
# Add Socratic instructions if any
socratic_instructions
=
self
.
pedagogy_service
.
get_socratic_instructions
(
student_info
[
'grade'
],
student_info
[
'nationality'
]
)
if
socratic_instructions
:
formatted_base_prompt
+=
f
"
\n\n
{socratic_instructions}"
return
formatted_base_prompt
def
generate_response
(
self
,
user_message
:
str
,
student_id
:
str
,
subject
:
str
=
"Science"
,
model
:
str
=
Models
.
chat
,
temperature
:
float
=
0.3
,
top_k
:
int
=
3
)
->
str
:
"""Enhanced AI response generation with JSON-based curriculum structure awareness"""
if
not
self
.
openai_service
.
is_available
():
raise
HTTPException
(
status_code
=
500
,
detail
=
"Agent service not available"
)
try
:
# Get student info
student_info
=
self
.
db_service
.
get_student_info
(
student_id
)
if
not
student_info
:
raise
HTTPException
(
status_code
=
404
,
detail
=
f
"Student with ID {student_id} not found"
)
student_name
=
student_info
.
get
(
'student_name'
,
'الطالب'
)
.
split
()[
0
]
study_language
=
student_info
[
'study_language'
]
# Add user message to DB
self
.
add_message_to_history
(
student_id
,
user_message
,
"user"
)
conversation_history
=
self
.
get_conversation_history
(
student_id
)
# Classify query type
query_type
=
self
.
query_handler
.
classify_query_type
(
user_message
,
student_info
)
logger
.
info
(
f
"Query type: {query_type} for student {student_name} ({study_language.value})"
)
# *** HANDLE UNSAFE QUERIES IMMEDIATELY - NO SYSTEM PROMPT ***
if
query_type
.
startswith
(
"unsafe_"
):
if
query_type
==
"unsafe_religion"
:
unsafe_response
=
self
.
query_handler
.
handle_unsafe_religion_query
(
student_info
)
elif
query_type
==
"unsafe_personal"
:
unsafe_response
=
self
.
query_handler
.
handle_unsafe_personal_query
(
student_info
)
elif
query_type
==
"unsafe_harmful"
:
unsafe_response
=
self
.
query_handler
.
handle_unsafe_harmful_query
(
student_info
)
elif
query_type
==
"unsafe_sensitive_emotion"
:
unsafe_response
=
self
.
query_handler
.
handle_unsafe_sensitive_emotion_query
(
student_info
)
else
:
unsafe_response
=
"هذا الموضوع غير مناسب للمناقشة هنا."
# Save response directly and return - NO AI MODEL CALL
self
.
add_message_to_history
(
student_id
,
unsafe_response
,
"assistant"
)
logger
.
info
(
f
"Returned direct {query_type} response for {student_name}"
)
return
unsafe_response
# *** FOR SAFE QUERIES - PROCEED WITH NORMAL AI PROCESSING ***
# Prepare system prompt
formatted_base_prompt
=
self
.
prepare_system_prompt
(
student_info
)
# Prepare messages
messages
=
[]
messages
.
append
({
"role"
:
"system"
,
"content"
:
formatted_base_prompt
})
messages
.
extend
(
conversation_history
)
messages
.
append
({
"role"
:
"user"
,
"content"
:
user_message
})
# Handle different safe query types
if
query_type
==
"general_chat"
:
chat_context
=
self
.
query_handler
.
handle_general_chat_query
(
user_message
,
student_info
)
messages
.
append
({
"role"
:
"system"
,
"content"
:
f
"سياق المحادثة العامة:
\n
{chat_context}"
})
elif
query_type
==
"overview"
:
overview_response
=
self
.
query_handler
.
handle_overview_query
(
student_info
,
subject
)
messages
.
append
({
"role"
:
"system"
,
"content"
:
f
"المنهج الكامل من ملف JSON:
\n
{overview_response}"
})
elif
query_type
==
"navigation"
:
navigation_response
=
self
.
query_handler
.
handle_navigation_query
(
user_message
,
student_info
,
subject
)
messages
.
append
({
"role"
:
"system"
,
"content"
:
f
"تفاصيل الوحدة/المفهوم من JSON:
\n
{navigation_response}"
})
elif
query_type
==
"specific_content"
:
# Enhanced content search
relevant_results
=
self
.
context_generator
.
search_enhanced_content
(
user_message
,
student_info
,
subject
,
top_k
)
if
relevant_results
:
enhanced_context
=
self
.
context_generator
.
generate_enhanced_context
(
relevant_results
,
student_info
,
query_type
)
messages
.
append
({
"role"
:
"system"
,
"content"
:
enhanced_context
})
logger
.
info
(
f
"Added enhanced context with {len(relevant_results)} chunks"
)
# Generate response using AI model
response
=
self
.
openai_service
.
client
.
chat
.
completions
.
create
(
model
=
model
,
messages
=
messages
,
temperature
=
temperature
)
ai_response
=
response
.
choices
[
0
]
.
message
.
content
.
strip
()
if
not
ai_response
:
raise
ValueError
(
"Empty response from AI model"
)
# Save AI response
self
.
add_message_to_history
(
student_id
,
ai_response
,
"assistant"
)
logger
.
info
(
f
"Generated {query_type} response for {student_name} ({study_language.value}): {len(ai_response)} characters"
)
return
ai_response
except
HTTPException
:
raise
except
Exception
as
e
:
logger
.
error
(
f
"Error generating AI response: {e}"
)
raise
HTTPException
(
status_code
=
500
,
detail
=
f
"AI response generation failed: {str(e)}"
)
\ No newline at end of file
self_hosted_env/voice_agent/services/agent_service.py
View file @
92dffa87
This diff is collapsed.
Click to expand it.
self_hosted_env/voice_agent/voice_agent.tar
View file @
92dffa87
No preview for this file type
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment