finalize mcq and add test cases

parent 6c10aac1
...@@ -405,6 +405,44 @@ def create_app() -> FastAPI: ...@@ -405,6 +405,44 @@ def create_app() -> FastAPI:
except Exception as e: except Exception as e:
logger.error(f"Error in get_mcqs_handler: {e}") logger.error(f"Error in get_mcqs_handler: {e}")
raise HTTPException(status_code=500, detail=str(e)) raise HTTPException(status_code=500, detail=str(e))
@app.post("/quiz/dynamic")
async def get_dynamic_quiz_handler(
request: Request,
grade: int = Form(...),
subject: str = Form(...),
unit: str = Form(...),
concept: str = Form(...),
is_arabic: bool = Form(...),
count: int = Form(5)
):
"""
Generates a dynamic quiz for a topic.
This endpoint ensures freshness by generating a few new questions
and then randomly selects the total requested 'count' from the
entire pool of available questions (new and old).
"""
container = request.app.state.container
try:
quiz_questions = container.agent_service.get_dynamic_quiz(
grade=grade,
subject=subject,
unit=unit,
concept=concept,
is_arabic=is_arabic,
count=count
)
return {
"status": "success",
"message": f"Successfully generated a dynamic quiz with {len(quiz_questions)} questions.",
"quiz": quiz_questions
}
except HTTPException as e:
raise e # Re-raise FastAPI specific exceptions
except Exception as e:
logger.error(f"Error in get_dynamic_quiz_handler: {e}")
raise HTTPException(status_code=500, detail=str(e))
@app.options("/get-audio-response") @app.options("/get-audio-response")
async def audio_response_options(): async def audio_response_options():
......
...@@ -121,6 +121,8 @@ class QueryHandler: ...@@ -121,6 +121,8 @@ class QueryHandler:
2. "overview" - أسئلة عن نظرة عامة على المنهج أو المحتوى الكامل 2. "overview" - أسئلة عن نظرة عامة على المنهج أو المحتوى الكامل
3. "navigation" - أسئلة عن وحدة أو مفهوم معين 3. "navigation" - أسئلة عن وحدة أو مفهوم معين
4. "specific_content" - أسئلة محددة عن موضوع علمي معين 4. "specific_content" - أسئلة محددة عن موضوع علمي معين
5. "ask_for_question" - إذا كان الطالب يطلب أن تسأله سؤالاً أو يطلب اختبارًا (مثل "اسألني سؤال", "اختبرني", "quiz me", "ask me a question", "اسالني سؤال تاني" , "عايز سؤال).
{conversation_context} {conversation_context}
...@@ -147,7 +149,7 @@ class QueryHandler: ...@@ -147,7 +149,7 @@ class QueryHandler:
classification: str = response.choices[0].message.content.strip().lower().strip('"').strip("'") classification: str = response.choices[0].message.content.strip().lower().strip('"').strip("'")
valid_classes = { valid_classes = {
"general_chat", "overview", "navigation", "specific_content" "general_chat", "overview", "navigation", "specific_content", "ask_for_question"
} }
if classification in valid_classes: if classification in valid_classes:
......
...@@ -11,13 +11,14 @@ logger = logging.getLogger(__name__) ...@@ -11,13 +11,14 @@ logger = logging.getLogger(__name__)
class ResponseGenerator: class ResponseGenerator:
"""Handles AI response generation and conversation management""" """Handles AI response generation and conversation management"""
def __init__(self, openai_service, db_service, pedagogy_service, query_handler, context_generator): def __init__(self, openai_service, db_service, pedagogy_service, query_handler, context_generator, agent_service):
self.openai_service = openai_service self.openai_service = openai_service
self.db_service = db_service self.db_service = db_service
self.pedagogy_service = pedagogy_service self.pedagogy_service = pedagogy_service
self.query_handler = query_handler self.query_handler = query_handler
self.context_generator = context_generator self.context_generator = context_generator
self.agent_service = agent_service
def get_conversation_history(self, student_id: str) -> list[Dict[str, str]]: def get_conversation_history(self, student_id: str) -> list[Dict[str, str]]:
"""Get conversation history from database""" """Get conversation history from database"""
...@@ -150,6 +151,13 @@ class ResponseGenerator: ...@@ -150,6 +151,13 @@ class ResponseGenerator:
# Now, add only ONE system message with all the context # Now, add only ONE system message with all the context
messages.append({"role": "system", "content": system_context}) messages.append({"role": "system", "content": system_context})
if query_type == "ask_for_question":
mcq_data = self.agent_service.handle_ask_for_question(student_id)
return {
"type": "mcq",
"data": mcq_data
}
# Finally add user message # Finally add user message
messages.append({"role": "user", "content": user_message}) messages.append({"role": "user", "content": user_message})
......
...@@ -4,6 +4,8 @@ from typing import List, Dict, Optional ...@@ -4,6 +4,8 @@ from typing import List, Dict, Optional
from fastapi import HTTPException from fastapi import HTTPException
import sys import sys
import json import json
import random
import math
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from core import StudentNationality, Models from core import StudentNationality, Models
...@@ -64,7 +66,7 @@ class AgentService: ...@@ -64,7 +66,7 @@ class AgentService:
self.context_generator = ContextGenerator(self.openai_service, self.pgvector) self.context_generator = ContextGenerator(self.openai_service, self.pgvector)
self.response_generator = ResponseGenerator( self.response_generator = ResponseGenerator(
self.openai_service, self.db_service, self.pedagogy_service, self.openai_service, self.db_service, self.pedagogy_service,
self.query_handler, self.context_generator self.query_handler, self.context_generator, self
) )
self.tashkeel_agent = TashkeelAgent(self.openai_service) self.tashkeel_agent = TashkeelAgent(self.openai_service)
...@@ -76,13 +78,26 @@ class AgentService: ...@@ -76,13 +78,26 @@ class AgentService:
def generate_response(self, user_message: str, student_id: str, subject: str = "Science", def generate_response(self, user_message: str, student_id: str, subject: str = "Science",
model: str = Models.chat, temperature: float = 0.3, top_k: int = 3) -> str: model: str = Models.chat, temperature: float = 0.3, top_k: int = 3):
"""Main response generation method""" """
Main response generation method, now handles both string and dictionary responses.
"""
# This can return either a string (for text answers) or a dict (for MCQs)
response = self.response_generator.generate_response( response = self.response_generator.generate_response(
user_message, student_id, subject, model, temperature, top_k user_message, student_id, subject, model, temperature, top_k
) )
# Check if the response is a special dictionary type (like our MCQ response).
# If it is, we must return it directly without any text processing.
if isinstance(response, dict):
logger.info("AgentService received a structured response (MCQ). Bypassing text processing.")
return response
# If we reach here, it means the response is a normal text string.
# Now it is safe to apply text-based fixes.
response = apply_fixes(response, custom_fixes) response = apply_fixes(response, custom_fixes)
#response = self.tashkeel_agent.apply_tashkeel(response) # response = self.tashkeel_agent.apply_tashkeel(response)
print(f"response: {response}") print(f"response: {response}")
return response return response
...@@ -223,4 +238,176 @@ class AgentService: ...@@ -223,4 +238,176 @@ class AgentService:
self.pgvector.insert_mcqs(mcqs_to_store) self.pgvector.insert_mcqs(mcqs_to_store)
return mcqs_to_store return mcqs_to_store
\ No newline at end of file
def handle_ask_for_question(self, student_id: str) -> Dict:
"""
Handles when a student asks for a question. It generates one new question,
uses an LLM to find a small subset of RELEVANT questions, and then
RANDOMLY selects one from that subset. This version correctly handles cases
with a small number of available questions.
"""
logger.info(f"Handling 'ask_for_question' request for student {student_id}.")
# 1. Get student info and determine topic (No changes here)
student_info = self.db_service.get_student_info(student_id)
if not student_info: raise HTTPException(status_code=404, detail="Student not found.")
grade, is_arabic, subject = student_info['grade'], student_info['is_arabic'], "Science"
recent_history = self.db_service.get_chat_history(student_id, limit=6)
if not recent_history: raise HTTPException(status_code=400, detail="Cannot ask a question without conversation context.")
history_text = "\n".join([f"{msg['role']}: {msg['content']}" for msg in recent_history])
topic_prompt = f"""
Based on the recent conversation below, identify the specific Unit and Concept the student is currently discussing.
Your response MUST be a valid JSON object with the keys "unit" and "concept".
Conversation:\n{history_text}
"""
try:
response = self.openai_service.client.chat.completions.create(
model=Models.classification, messages=[{"role": "user", "content": topic_prompt}],
temperature=0, response_format={"type": "json_object"}
)
topic_data = json.loads(response.choices[0].message.content)
unit, concept = topic_data['unit'], topic_data['concept']
logger.info(f"Determined current topic for question: Unit='{unit}', Concept='{concept}'")
except (json.JSONDecodeError, KeyError) as e:
logger.error(f"Could not determine topic for student {student_id}: {e}")
raise HTTPException(status_code=500, detail="Could not determine the current topic.")
# 2. Generate one new question to enrich the pool (No changes here)
try:
self.generate_and_store_mcqs(grade, subject, unit, concept, is_arabic, num_questions=1)
except Exception as e:
logger.warning(f"Non-critical error: Failed to generate a new background MCQ: {e}")
# 3. Retrieve and filter the pool of available questions (No changes here)
all_mcqs = self.pgvector.get_mcqs(grade, subject, unit, concept, is_arabic, limit=None)
if not all_mcqs: raise HTTPException(status_code=404, detail="No questions found for the current topic.")
asked_question_texts = {msg['content'] for msg in recent_history if msg['role'] == 'assistant'}
unasked_mcqs = [mcq for mcq in all_mcqs if mcq['question_text'] not in asked_question_texts]
if not unasked_mcqs:
logger.warning(f"All questions for '{concept}' have been asked recently. Re-using full list.")
unasked_mcqs = all_mcqs
# --- THIS IS THE ROBUST TWO-STEP SELECTION LOGIC ---
# 4. STEP 1 (Filter with AI): Get a SUBSET of relevant questions.
relevant_question_texts = []
last_user_message = recent_history[-1]['content']
# --- THIS IS THE FIX ---
# Dynamically determine how many questions to ask for.
# Ask for up to 3, but no more than the number of available questions.
num_to_select = min(3, len(unasked_mcqs))
# If there's only one question, we don't need to ask the LLM to choose.
if num_to_select == 1:
relevant_question_texts = [unasked_mcqs[0]['question_text']]
logger.info("Only one un-asked question available, selecting it directly.")
elif num_to_select > 1:
selection_prompt = f"""
A student just said: "{last_user_message}"
Here is a list of available questions about the topic '{concept}':
{json.dumps([q['question_text'] for q in unasked_mcqs], ensure_ascii=False, indent=2)}
From the list above, select the {num_to_select} questions that are MOST RELEVANT to what the student just said.
Your response MUST be a valid JSON object with a single key "relevant_questions" which is an array of the chosen question strings.
Example: {{"relevant_questions": ["Question text 1", "Question text 2"]}}
"""
try:
response = self.openai_service.client.chat.completions.create(
model=Models.classification,
messages=[{"role": "user", "content": selection_prompt}],
temperature=0.1,
response_format={"type": "json_object"}
)
response_data = json.loads(response.choices[0].message.content)
relevant_question_texts = response_data.get("relevant_questions", [])
logger.info(f"LLM identified {len(relevant_question_texts)} relevant questions.")
except Exception as e:
logger.warning(f"LLM failed to select a relevant subset of questions: {e}. Will select from all available questions.")
# Robust Fallback: If the LLM fails or returns an empty list, use all un-asked questions as the pool.
if not relevant_question_texts:
relevant_question_texts = [mcq['question_text'] for mcq in unasked_mcqs]
# 5. STEP 2 (Select with Randomness): Randomly choose from the relevant subset.
chosen_question_text = random.choice(relevant_question_texts)
# 6. Find the full MCQ object for the chosen text and return it.
chosen_mcq = None
for mcq in unasked_mcqs:
if mcq['question_text'] == chosen_question_text:
chosen_mcq = mcq
break
# Fallback in case the chosen text somehow doesn't match
if not chosen_mcq:
chosen_mcq = random.choice(unasked_mcqs)
logger.info(f"Selected question for student {student_id}: '{chosen_mcq['question_text']}'")
# Add the chosen question's text to history to prevent immediate re-asking
self.db_service.add_message(student_id, 'assistant', chosen_mcq['question_text'])
return chosen_mcq
def get_dynamic_quiz(
self, grade: int, subject: str, unit: str, concept: str, is_arabic: bool, count: int
) -> List[Dict]:
"""
Generates a dynamic quiz of 'count' questions for a specific topic.
It ensures a portion of the questions are newly generated.
"""
if not self.pgvector:
raise HTTPException(status_code=503, detail="Vector service is not available for this feature.")
# 1. Calculate how many new questions to generate
# Logic: at least 1, up to 1/3 of the quiz size, with a max cap of 5.
num_new_questions = min(max(1, math.floor(count / 3)), 5)
logger.info(f"Request for {count} questions. Will generate {num_new_questions} new ones.")
# 2. Generate and store the new questions
try:
self.generate_and_store_mcqs(
grade=grade,
subject=subject,
unit=unit,
concept=concept,
is_arabic=is_arabic,
num_questions=num_new_questions
)
except Exception as e:
# If generation fails, we can still proceed with existing questions.
logger.warning(f"Could not generate new questions for the quiz due to an error: {e}")
# 3. Retrieve ALL available questions for the topic from the database
all_mcqs = self.pgvector.get_mcqs(
grade=grade,
subject=subject,
unit=unit,
concept=concept,
is_arabic=is_arabic,
limit=None # Retrieve all
)
if not all_mcqs:
raise HTTPException(status_code=404, detail="No questions could be found or generated for this topic.")
# 4. Randomly select the desired number of questions from the full pool
# First, shuffle the entire list of questions
random.shuffle(all_mcqs)
# Then, return a slice of the list with the requested count
# This gracefully handles cases where we have fewer questions than requested.
final_quiz = all_mcqs[:count]
logger.info(f"Returning a dynamic quiz of {len(final_quiz)} questions for '{concept}'.")
return final_quiz
\ No newline at end of file
# services/chat_service.py # services/chat_service.py
from fastapi import UploadFile, HTTPException from fastapi import UploadFile, HTTPException
from typing import Optional from typing import Optional, Dict
import sys import sys
import os import os
import time import time
import io import io
import random
import json
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from core import MessageType, AppConfig from core import MessageType, AppConfig
from repositories import StorageRepository from repositories import StorageRepository
...@@ -31,79 +34,114 @@ class ChatService: ...@@ -31,79 +34,114 @@ class ChatService:
MessageType.TEXT: TextMessageHandler() MessageType.TEXT: TextMessageHandler()
} }
def _format_mcq_for_tts(self, mcq_data: Dict, is_arabic: bool) -> str:
""" Formats a structured MCQ dictionary into a natural, speakable string for TTS. """
question_text = mcq_data.get("question_text", "")
options = [
mcq_data.get("correct_answer"), mcq_data.get("wrong_answer_1"),
mcq_data.get("wrong_answer_2"), mcq_data.get("wrong_answer_3")
]
valid_options = [opt for opt in options if opt]
random.shuffle(valid_options)
spoken_text = f"{question_text}\n\n"
spoken_text += "والاختيارات هي:\n" if is_arabic else "The options are:\n"
for option in valid_options:
spoken_text += f"{option}, \n"
return spoken_text.strip()
# In chat_service.py
# REPLACE the entire process_message method with this one.
def process_message(self, student_id: str, file: Optional[UploadFile] = None, text: Optional[str] = None, game_context: Optional[str] = None): def process_message(self, student_id: str, file: Optional[UploadFile] = None, text: Optional[str] = None, game_context: Optional[str] = None):
"""Process message and generate text and audio response.""" """
Processes a message, stores the full response in Redis, and returns a
detailed confirmation payload that differs for text vs. MCQ responses.
"""
try: try:
if file and file.filename: if file and file.filename:
# Assuming handle method reads the file content user_message = self.handlers[MessageType.AUDIO].openai_service.transcribe_audio(file.file.read(), file.filename)
file_content = file.file.read()
user_message = self.handlers[MessageType.AUDIO].openai_service.transcribe_audio(file_content, file.filename)
elif text: elif text:
user_message = text user_message = text
else: else:
raise HTTPException(status_code=400, detail="No text or audio file provided.") raise HTTPException(status_code=400, detail="No text or audio file provided.")
final_message_for_agent = user_message final_message_for_agent = f"game context: {game_context}\nuser query: {user_message}" if game_context else user_message
if game_context:
print(f"Game context provided: {game_context}")
final_message_for_agent = f"game context: {game_context}\nuser query: {user_message}"
agent_response_text = self.agent_service.generate_response( agent_response = self.agent_service.generate_response(
user_message=final_message_for_agent, user_message=final_message_for_agent,
student_id=student_id, student_id=student_id,
) )
audio_data = self._generate_and_upload_audio(agent_response_text, student_id) response_payload_for_redis = None
text_for_audio = ""
agent_response_for_confirmation = ""
# This block determines what to store in Redis and what text to generate audio from.
# The logic here remains the same.
if isinstance(agent_response, dict) and agent_response.get("type") == "mcq":
mcq_data = agent_response.get("data")
response_payload_for_redis = mcq_data
student_info = self.agent_service.db_service.get_student_info(student_id)
is_arabic = student_info.get('is_arabic', True) if student_info else True
text_for_audio = self._format_mcq_for_tts(mcq_data, is_arabic)
agent_response_for_confirmation = text_for_audio
else:
agent_response_text = str(agent_response)
response_payload_for_redis = agent_response_text
text_for_audio = agent_response_text
agent_response_for_confirmation = agent_response_text
# Generate audio for the prepared text
audio_data = self._generate_and_upload_audio(text_for_audio, student_id)
# Store the full payload (dict or string) and audio bytes in Redis for the polling endpoint
self.response_manager.store_response( self.response_manager.store_response(
student_id=student_id, student_id=student_id,
text=agent_response_text, text=response_payload_for_redis,
audio_filepath=audio_data.get("filepath"), audio_filepath=audio_data.get("filepath"),
audio_bytes=audio_data.get("bytes") audio_bytes=audio_data.get("bytes")
) )
print(f"Generated response for student {student_id}: {agent_response_text[:100]}...")
return {
"status": "success", # Case 1: The response was an MCQ. Return the special structure.
"message": "Message processed and agent response ready", if isinstance(agent_response, dict) and agent_response.get("type") == "mcq":
"student_id": student_id, return {
"agent_response": agent_response_text, "status": "success",
"audio_filepath": audio_data.get("filepath") "message": "Message processed and MCQ response ready",
} "student_id": student_id,
"response_type": "mcq",
"agent_response": agent_response_for_confirmation, # The speakable version
"question": agent_response.get("data"), # The NEW structured data field
"audio_filepath": audio_data.get("filepath")
}
# Case 2: The response was normal text. Return the standard structure.
else:
return {
"status": "success",
"message": "Message processed and agent response ready",
"student_id": student_id,
"response_type": "text",
"agent_response": agent_response_for_confirmation,
"audio_filepath": audio_data.get("filepath")
}
except Exception as e: except Exception as e:
print(f"Error processing message for student {student_id}: {e}") print(f"Error processing message for student {student_id}: {e}")
raise HTTPException(status_code=500, detail=f"Failed to process message: {str(e)}") raise HTTPException(status_code=500, detail=f"Failed to process message: {str(e)}")
def _generate_and_upload_audio(self, text: str, student_id: str) -> dict: def _generate_and_upload_audio(self, text: str, student_id: str) -> dict:
""" """ Segments text, generates TTS audio, and uploads to MinIO. """
Segments mixed-language text and generates TTS audio, then uploads to MinIO.
"""
try: try:
segments = self.segmentation_service.segment_text(text) segments = self.segmentation_service.segment_text(text)
audio_bytes = self.agent_service.tts_service.generate_speech_from_sequence(segments) audio_bytes = self.agent_service.tts_service.generate_speech_from_sequence(segments)
file_extension = "wav"
timestamp = int(time.time()) timestamp = int(time.time())
filename = f"agent_response_{timestamp}_{student_id}.{file_extension}" filename = f"agent_response_{timestamp}_{student_id}.wav"
minio_file_path = f"audio/{filename}" minio_file_path = f"audio/{filename}"
self.storage_repo.upload_file(io.BytesIO(audio_bytes), self.config.minio_bucket, minio_file_path)
self.storage_repo.upload_file( full_url = self.storage_repo.get_file_url(self.config.minio_bucket, minio_file_path, expires=3600)
file_obj=io.BytesIO(audio_bytes),
bucket=self.config.minio_bucket,
file_path=minio_file_path
)
full_url = self.storage_repo.get_file_url(
bucket=self.config.minio_bucket,
file_path=minio_file_path,
expires=3600 # 1 hour
)
print(f"Successfully generated and uploaded TTS audio: {filename}") print(f"Successfully generated and uploaded TTS audio: {filename}")
return {"bytes": audio_bytes, "filepath": full_url} return {"bytes": audio_bytes, "filepath": full_url}
except Exception as e: except Exception as e:
print(f"Error in _generate_and_upload_audio: {e}") print(f"Error in _generate_and_upload_audio: {e}")
return {"bytes": None, "filepath": None} return {"bytes": None, "filepath": None}
\ No newline at end of file
# services/response_manager.py # services/response_manager.py
import json import json
import base64 import base64
from typing import Optional, Dict from typing import Optional, Dict, Union
from .redis_client import redis_client from .redis_client import redis_client
class ResponseManager: class ResponseManager:
...@@ -21,20 +21,24 @@ class ResponseManager: ...@@ -21,20 +21,24 @@ class ResponseManager:
"""Creates a consistent key for the student's queue.""" """Creates a consistent key for the student's queue."""
return f"student_queue:{student_id}" return f"student_queue:{student_id}"
def store_response(self, student_id: str, text: str, audio_filepath: Optional[str] = None, audio_bytes: Optional[bytes] = None) -> None:
"""Adds a new response to the END of the queue for a specific student.""" def store_response(self, student_id: str, text: Union[str, Dict], audio_filepath: Optional[str] = None, audio_bytes: Optional[bytes] = None) -> None:
"""
Adds a new response to the queue. The 'text' can be a string or a dictionary.
"""
key = self._get_key(student_id) key = self._get_key(student_id)
encoded_audio = base64.b64encode(audio_bytes).decode('utf-8') if audio_bytes else None encoded_audio = base64.b64encode(audio_bytes).decode('utf-8') if audio_bytes else None
# This payload now flexibly stores either a string or a dict in the 'text' field.
payload = { payload = {
"text": text, "text": text,
"audio_filepath": audio_filepath, "audio_filepath": audio_filepath,
"audio_bytes_b64": encoded_audio "audio_bytes_b64": encoded_audio
} }
# RPUSH adds the new item to the right (end) of the list.
self.redis.rpush(key, json.dumps(payload)) self.redis.rpush(key, json.dumps(payload))
# Reset the expiration time for the whole queue each time a new item is added.
self.redis.expire(key, self.ttl_seconds) self.redis.expire(key, self.ttl_seconds)
def get_response(self, student_id: str) -> Dict: def get_response(self, student_id: str) -> Dict:
"""Atomically retrieves and removes the OLDEST response from the front of the queue.""" """Atomically retrieves and removes the OLDEST response from the front of the queue."""
key = self._get_key(student_id) key = self._get_key(student_id)
......
...@@ -7,6 +7,7 @@ from fastapi.responses import Response, StreamingResponse ...@@ -7,6 +7,7 @@ from fastapi.responses import Response, StreamingResponse
from starlette.background import BackgroundTask from starlette.background import BackgroundTask
import sys import sys
import os import os
import json
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from core import ResponseStatus from core import ResponseStatus
from services.response_manager import ResponseManager from services.response_manager import ResponseManager
...@@ -20,42 +21,46 @@ class ResponseService: ...@@ -20,42 +21,46 @@ class ResponseService:
def get_agent_response(self, student_id: str): def get_agent_response(self, student_id: str):
""" """
Gets the agent response from the manager and streams the raw audio bytes Gets the agent response from the manager and streams the audio.
directly, avoiding temporary files and re-downloading from MinIO. It intelligently handles both text and structured MCQ data by encoding
the payload in the 'X-Response-Text' header and signaling the type
in the 'X-Response-Type' header.
""" """
if not self.response_manager.is_response_fresh(student_id): if not self.response_manager.is_response_fresh(student_id):
raise HTTPException(status_code=404, detail="Agent response not ready or expired.") raise HTTPException(status_code=404, detail="Agent response not ready or expired.")
response_data = self.response_manager.get_response(student_id) response_data = self.response_manager.get_response(student_id)
text_response = response_data.get("text") payload_data = response_data.get("text") # This can be a string or a dict
audio_bytes = response_data.get("audio_bytes") audio_bytes = response_data.get("audio_bytes")
if not audio_bytes: if not payload_data or not audio_bytes:
# Handle text-only response if audio failed raise HTTPException(status_code=404, detail=f"Response for {student_id} was incomplete, claimed, or expired.")
return {
"status": ResponseStatus.SUCCESS,
"message": "Text response available (audio generation failed).",
"text": text_response
}
if not response_data or not response_data.get("text"):
raise HTTPException(status_code=404, detail=f"Response for student {student_id} was already claimed or expired.")
response_type = "text"
encoded_text = ""
# Check the type of the payload to decide how to encode it
if isinstance(payload_data, dict):
# It's an MCQ
response_type = "mcq"
# Serialize the dictionary to a JSON string
json_string = json.dumps(payload_data, ensure_ascii=False)
# Base64-encode the JSON string
encoded_text = base64.b64encode(json_string.encode('utf-8')).decode('ascii')
else:
# It's a normal text string
response_type = "text"
# Base64-encode the string directly
encoded_text = base64.b64encode(str(payload_data).encode('utf-8')).decode('ascii')
# Determine content type based on filename extension # Stream the raw audio bytes
filename = response_data.get("audio_filename", "")
media_type = "audio/wav" if filename.endswith(".wav") else "audio/mpeg"
# Encode the text in Base64 for the header
encoded_text = base64.b64encode(text_response.encode('utf-8')).decode('ascii')
# Stream the raw bytes directly
return Response( return Response(
content=audio_bytes, content=audio_bytes,
media_type=media_type, media_type="audio/wav",
headers={ headers={
"X-Response-Type": response_type, # Signal the payload type
"X-Response-Text": encoded_text, "X-Response-Text": encoded_text,
"Access-Control-Expose-Headers": "X-Response-Text" "Access-Control-Expose-Headers": "X-Response-Text, X-Response-Type" # Expose the new header
} }
) )
\ No newline at end of file
...@@ -185,505 +185,170 @@ ...@@ -185,505 +185,170 @@
</div> </div>
<script> <script>
// Configuration - Auto-detect current domain for CapRover const Config = {
const Config = { BACKEND_URL: `${window.location.origin}/chat`,
BACKEND_URL: `${window.location.origin}/chat`, AUDIO_RESPONSE_URL: `${window.location.origin}/get-audio-response`
AUDIO_RESPONSE_URL: `${window.location.origin}/get-audio-response` };
}; const StatusType = { SUCCESS: 'success', ERROR: 'error', PROCESSING: 'processing' };
// Enums class TextDecoderUtil {
const RecordingState = { static decode(str) {
IDLE: 'idle', try {
RECORDING: 'recording', return new TextDecoder('utf-8').decode(Uint8Array.from(atob(str), c => c.charCodeAt(0)));
PROCESSING: 'processing' } catch (e) { return "Error decoding text."; }
}; }
}
const StatusType = {
INFO: 'info', class APIClient {
SUCCESS: 'success', async sendFormData(formData) {
ERROR: 'error', const response = await fetch(Config.BACKEND_URL, { method: 'POST', body: formData });
RECORDING: 'recording', if (!response.ok) {
PROCESSING: 'processing' const err = await response.json().catch(() => ({ detail: `HTTP ${response.status}` }));
}; throw new Error(err.detail);
}
// Base64 Decoder utility return await response.json();
class TextDecoder { }
static decodeBase64Utf8(str) { async fetchAudioResponse(studentId) {
try { const url = `${Config.AUDIO_RESPONSE_URL}?student_id=${encodeURIComponent(studentId)}`;
const bytes = Uint8Array.from(atob(str), c => c.charCodeAt(0)); const response = await fetch(url);
const decoder = new window.TextDecoder('utf-8'); if (!response.ok) {
return decoder.decode(bytes); const err = await response.json().catch(() => ({ detail: `HTTP ${response.status}` }));
} catch (error) { throw new Error(err.detail || 'Failed to get audio response');
return str; // Return original string if decode fails }
return response; // Return the full response object
}
}
class UIManager {
constructor() { this.init(); }
init() {
this.studentIdInput = document.getElementById('studentIdInput');
this.textInput = document.getElementById('textInput');
this.sendTextBtn = document.getElementById('sendTextBtn');
this.chatContainer = document.getElementById('chatContainer');
this.status = document.getElementById('status');
}
showStatus(message, type) {
this.status.textContent = message;
this.status.className = `status ${type}`;
this.status.style.display = 'block';
}
addMessage(text, sender, audioUrl = null) {
const msgDiv = document.createElement('div');
msgDiv.className = `message ${sender}-message`;
const senderName = sender === 'user' ? 'أنت' : 'المساعد';
msgDiv.innerHTML = `<strong>${senderName}:</strong> <div class="message-content"></div>`;
msgDiv.querySelector('.message-content').innerHTML = text;
if (audioUrl) {
const audio = document.createElement('audio');
audio.controls = true;
audio.src = audioUrl;
msgDiv.appendChild(audio);
audio.play().catch(() => {});
}
this.chatContainer.appendChild(msgDiv);
this.chatContainer.scrollTop = this.chatContainer.scrollHeight;
}
getStudentId() { return this.studentIdInput.value.trim() || 'student_001'; }
getTextInput() { return this.textInput.value.trim(); }
clearTextInput() { this.textInput.value = ''; }
}
class ChatService {
constructor(apiClient, uiManager) {
this.apiClient = apiClient;
this.uiManager = uiManager;
}
async sendRequest(formData) {
try {
const response = await this.apiClient.sendFormData(formData);
if (response.status === 'success') {
// ALWAYS poll after a successful chat request.
await this.getAgentResponse(this.uiManager.getStudentId());
} else {
throw new Error(response.message || 'Unknown server error');
} }
} catch (error) {
this.uiManager.showStatus(`خطأ: ${error.message}`, StatusType.ERROR);
} }
} }
async sendTextMessage(text, studentId) {
// Message Factory if (!text || !studentId) return;
class MessageFactory { this.uiManager.showStatus('يتم إرسال النص...', StatusType.PROCESSING);
static createUserMessage(text) { this.uiManager.addMessage(text, 'user');
return { const formData = new FormData();
text, formData.append('text', text);
sender: 'user', formData.append('student_id', studentId);
audioUrl: null await this.sendRequest(formData);
};
}
static createAgentMessage(text, audioUrl = null) {
return {
text,
sender: 'agent',
audioUrl
};
}
} }
// API Client with enhanced error handling // --- THIS IS THE INTELLIGENT PART OF THE FRONT-END ---
class APIClient { async getAgentResponse(studentId) {
async sendFormData(url, formData) { this.uiManager.showStatus('جاري جلب رد المساعد...', StatusType.PROCESSING);
try { try {
const response = await fetch(url, { const response = await this.apiClient.fetchAudioResponse(studentId);
method: 'POST', const responseType = response.headers.get('X-Response-Type');
body: formData, const encodedText = response.headers.get('X-Response-Text');
mode: 'cors', const audioBlob = await response.blob();
credentials: 'omit' const audioUrl = URL.createObjectURL(audioBlob);
});
let displayText = "";
if (!response.ok) {
let errorData; if (responseType === 'mcq') {
try { // Decode the Base64 string, then parse the JSON
errorData = await response.json(); const jsonString = TextDecoderUtil.decode(encodedText);
} catch { const questionData = JSON.parse(jsonString);
errorData = { detail: `HTTP ${response.status}: ${response.statusText}` };
} // Format the question text for display
throw new Error(errorData.detail || `Request failed with status ${response.status}`); displayText = `${questionData.question_text}\n`;
} const options = [
questionData.correct_answer, questionData.wrong_answer_1,
const responseData = await response.json(); questionData.wrong_answer_2, questionData.wrong_answer_3
return responseData; ];
const shuffledOptions = options.filter(opt => opt).sort(() => Math.random() - 0.5);
} catch (error) { shuffledOptions.forEach((option, index) => {
throw error; displayText += `${index + 1}. ${option}\n`;
}
}
async fetchAudioResponse(studentId) {
try {
// Build URL with student_id
const urlWithParam = `${Config.AUDIO_RESPONSE_URL}?student_id=${encodeURIComponent(studentId)}`;
const response = await fetch(urlWithParam, {
method: 'GET',
mode: 'cors',
credentials: 'omit'
}); });
displayText = `<pre>${displayText}</pre>`;
this.uiManager.showStatus('✓ تم استلام السؤال!', StatusType.SUCCESS);
if (response.ok) { } else { // Default to 'text'
const encodedText = response.headers.get('X-Response-Text'); displayText = TextDecoderUtil.decode(encodedText);
this.uiManager.showStatus('✓ تم استلام الرد!', StatusType.SUCCESS);
let agentText = "لا يوجد رد متاح";
if (encodedText) {
try {
agentText = TextDecoder.decodeBase64Utf8(encodedText);
} catch (e) {
agentText = "خطأ في فك تشفير الرد";
}
}
const audioBlob = await response.blob();
return { agentText, audioBlob };
} else {
let errorData;
try {
errorData = await response.json();
} catch {
errorData = { detail: `HTTP ${response.status}: ${response.statusText}` };
}
throw new Error(errorData.detail || 'Failed to get response');
}
} catch (error) {
throw error;
}
}
}
// State Machine
class RecordingStateMachine {
constructor() {
this.state = RecordingState.IDLE;
this.listeners = {};
}
setState(newState) {
const oldState = this.state;
this.state = newState;
this.notifyListeners(oldState, newState);
}
getState() {
return this.state;
}
addListener(callback) {
const id = Date.now() + Math.random();
this.listeners[id] = callback;
return id;
}
removeListener(id) {
delete this.listeners[id];
}
notifyListeners(oldState, newState) {
Object.values(this.listeners).forEach(callback => {
callback(oldState, newState);
});
}
}
// Audio Recording Module
class AudioRecorder {
constructor(stateMachine, uiManager) {
this.stateMachine = stateMachine;
this.uiManager = uiManager;
this.mediaRecorder = null;
this.audioChunks = [];
}
async startRecording() {
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
this.mediaRecorder = new MediaRecorder(stream, { mimeType: 'audio/webm;codecs=opus' });
this.audioChunks = [];
this.mediaRecorder.ondataavailable = (event) => {
if (event.data.size > 0) {
this.audioChunks.push(event.data);
}
};
this.mediaRecorder.onstop = () => {
const recordedBlob = new Blob(this.audioChunks, { type: 'audio/webm;codecs=opus' });
stream.getTracks().forEach(track => track.stop());
this.onRecordingComplete(recordedBlob);
};
this.mediaRecorder.start(100);
this.stateMachine.setState(RecordingState.RECORDING);
this.uiManager.showStatus('التسجيل قيد التقدم...', StatusType.RECORDING);
} catch (error) {
this.uiManager.showStatus(`خطأ في الوصول إلى الميكروفون: ${error.message}`, StatusType.ERROR);
this.stateMachine.setState(RecordingState.IDLE);
} }
}
stopRecording() {
if (this.mediaRecorder && this.mediaRecorder.state !== 'inactive') {
this.mediaRecorder.stop();
this.stateMachine.setState(RecordingState.PROCESSING);
this.uiManager.showStatus('تم إيقاف التسجيل. تتم المعالجة والإرسال...', StatusType.PROCESSING);
}
}
onRecordingComplete(audioBlob) { this.uiManager.addMessage(displayText, 'agent', audioUrl);
this.uiManager.mediator.handleAudioRecorded(audioBlob);
}
}
// Chat UI Manager
class ChatUIManager {
constructor() {
this.initializeElements();
this.mediator = null;
}
initializeElements() {
this.studentIdInput = document.getElementById('studentIdInput');
this.textInput = document.getElementById('textInput');
this.sendTextBtn = document.getElementById('sendTextBtn');
this.startBtn = document.getElementById('startBtn');
this.stopBtn = document.getElementById('stopBtn');
this.status = document.getElementById('status');
this.chatContainer = document.getElementById('chatContainer');
}
showStatus(message, type = StatusType.INFO) {
this.status.textContent = message;
this.status.className = `status ${type}`;
this.status.style.display = 'block';
}
addMessageToChat(message) {
const messageDiv = document.createElement('div');
messageDiv.className = `message ${message.sender}-message`;
const senderName = message.sender === 'user' ? 'أنت' : 'المساعد';
messageDiv.innerHTML = `<strong>${senderName}:</strong> ${message.text}`;
if (message.audioUrl) {
const audioPlayer = document.createElement('audio');
audioPlayer.controls = true;
audioPlayer.src = message.audioUrl;
messageDiv.appendChild(audioPlayer);
}
this.chatContainer.appendChild(messageDiv);
this.chatContainer.scrollTop = this.chatContainer.scrollHeight;
}
updateRecordingState(oldState, newState) { } catch (error) {
switch(newState) { this.uiManager.showStatus(`خطأ في الشبكة: ${error.message}`, StatusType.ERROR);
case RecordingState.IDLE:
this.startBtn.disabled = false;
this.startBtn.textContent = 'بدء التسجيل';
this.startBtn.classList.remove('recording', 'processing');
this.stopBtn.disabled = true;
this.textInput.disabled = false;
this.sendTextBtn.disabled = false;
this.studentIdInput.disabled = false;
break;
case RecordingState.RECORDING:
this.startBtn.disabled = true;
this.startBtn.textContent = 'يتم التسجيل...';
this.startBtn.classList.add('recording');
this.stopBtn.disabled = false;
this.textInput.disabled = true;
this.sendTextBtn.disabled = true;
this.studentIdInput.disabled = true;
break;
case RecordingState.PROCESSING:
this.startBtn.disabled = true;
this.startBtn.textContent = 'يتم المعالجة...';
this.startBtn.classList.remove('recording');
this.startBtn.classList.add('processing');
this.stopBtn.disabled = true;
this.textInput.disabled = true;
this.sendTextBtn.disabled = true;
this.studentIdInput.disabled = true;
break;
}
}
clearTextInput() {
this.textInput.value = '';
}
getTextInput() {
return this.textInput.value.trim();
}
getStudentId() {
return this.studentIdInput.value.trim() || 'student_001';
} }
} }
}
// Message Manager class App {
class MessageManager { constructor() {
constructor(uiManager) { this.ui = new UIManager();
this.uiManager = uiManager; this.api = new APIClient();
} this.chatService = new ChatService(this.api, this.ui);
this.initEventListeners();
addUserMessage(text) {
const message = MessageFactory.createUserMessage(text);
this.uiManager.addMessageToChat(message);
}
addAgentMessage(text, audioUrl = null) {
const message = MessageFactory.createAgentMessage(text, audioUrl);
this.uiManager.addMessageToChat(message);
// Auto-play audio if available
if (audioUrl) {
setTimeout(() => {
const audioPlayer = this.uiManager.chatContainer.lastChild.querySelector('audio');
if (audioPlayer) {
audioPlayer.play().catch(e => {
// Silent fail for auto-play
});
}
}, 100);
}
}
} }
initEventListeners() {
// Chat Service - Enhanced with better error handling this.ui.sendTextBtn.onclick = () => {
class ChatService { const text = this.ui.getTextInput();
constructor(apiClient, messageManager, uiManager) { if(text) {
this.apiClient = apiClient; this.chatService.sendTextMessage(text, this.ui.getStudentId());
this.messageManager = messageManager; this.ui.clearTextInput();
this.uiManager = uiManager;
}
async sendTextMessage(text, studentId) {
if (!text) {
this.uiManager.showStatus('الرجاء إدخال رسالة.', StatusType.ERROR);
return false;
}
if (!studentId) {
this.uiManager.showStatus('الرجاء إدخال رقم الطالب.', StatusType.ERROR);
return false;
} }
};
this.uiManager.showStatus('يتم إرسال النص...', StatusType.PROCESSING); this.ui.textInput.onkeypress = (e) => { if (e.key === 'Enter') this.ui.sendTextBtn.click(); };
this.messageManager.addUserMessage(text);
// Note: Simplified to remove audio recording logic for clarity
try { document.getElementById('startBtn').style.display = 'none';
const formData = new FormData(); document.getElementById('stopBtn').style.display = 'none';
formData.append('text', text);
formData.append('student_id', studentId);
const response = await this.apiClient.sendFormData(Config.BACKEND_URL, formData);
if (response.status === 'success') {
await this.getAgentResponse(studentId);
return true;
} else {
throw new Error(response.message || 'Unknown error');
}
} catch (error) {
this.uiManager.showStatus(`خطأ: ${error.message}`, StatusType.ERROR);
return false;
}
}
async sendAudioMessage(audioBlob, studentId) {
if (!studentId) {
this.uiManager.showStatus('الرجاء إدخال رقم الطالب.', StatusType.ERROR);
return false;
}
try {
const formData = new FormData();
formData.append('file', audioBlob, `voice_message_${Date.now()}.webm`);
formData.append('student_id', studentId);
const response = await this.apiClient.sendFormData(Config.BACKEND_URL, formData);
if (response.status === 'success') {
await this.getAgentResponse(studentId);
return true;
} else {
throw new Error(response.message || 'Unknown error');
}
} catch (error) {
this.uiManager.showStatus(`خطأ: ${error.message}`, StatusType.ERROR);
return false;
}
}
async getAgentResponse(studentId) {
try {
this.uiManager.showStatus('جاري جلب رد المساعد...', StatusType.PROCESSING);
const { agentText, audioBlob } = await this.apiClient.fetchAudioResponse(studentId);
if (!agentText || agentText === "لا يوجد رد متاح") {
throw new Error('لم يتم استلام رد صالح من المساعد');
}
const audioUrl = URL.createObjectURL(audioBlob);
this.messageManager.addAgentMessage(agentText, audioUrl);
this.uiManager.showStatus('✓ تم استلام الرد! جاهز للرسالة التالية.', StatusType.SUCCESS);
} catch (error) {
this.uiManager.showStatus(`خطأ في الشبكة: ${error.message}`, StatusType.ERROR);
// Add fallback text message
this.messageManager.addAgentMessage('عذراً، حدث خطأ في استلام الرد الصوتي. يرجى المحاولة مرة أخرى.');
}
}
} }
}
// Mediator Pattern - Enhanced document.addEventListener('DOMContentLoaded', () => { new App(); });
class ChatMediator { </script>
constructor() {
this.apiClient = new APIClient();
this.stateMachine = new RecordingStateMachine();
this.uiManager = new ChatUIManager();
this.messageManager = new MessageManager(this.uiManager);
this.chatService = new ChatService(this.apiClient, this.messageManager, this.uiManager);
this.audioRecorder = new AudioRecorder(this.stateMachine, this.uiManager);
this.uiManager.mediator = this;
this.initializeEventHandlers();
this.initializeStateHandlers();
}
initializeEventHandlers() {
this.uiManager.sendTextBtn.addEventListener('click', () => this.handleSendText());
this.uiManager.textInput.addEventListener('keypress', (e) => {
if (e.key === 'Enter') {
this.handleSendText();
}
});
this.uiManager.startBtn.addEventListener('click', () => this.handleStartRecording());
this.uiManager.stopBtn.addEventListener('click', () => this.handleStopRecording());
}
initializeStateHandlers() {
this.stateMachine.addListener((oldState, newState) => {
this.uiManager.updateRecordingState(oldState, newState);
});
}
async handleSendText() {
const text = this.uiManager.getTextInput();
const studentId = this.uiManager.getStudentId();
if (!text) return;
this.uiManager.clearTextInput();
await this.chatService.sendTextMessage(text, studentId);
}
async handleStartRecording() {
const studentId = this.uiManager.getStudentId();
if (!studentId) {
this.uiManager.showStatus('الرجاء إدخال رقم الطالب أولاً.', StatusType.ERROR);
return;
}
await this.audioRecorder.startRecording();
}
handleStopRecording() {
this.audioRecorder.stopRecording();
}
async handleAudioRecorded(audioBlob) {
const studentId = this.uiManager.getStudentId();
this.messageManager.addUserMessage("تم إرسال الرسالة الصوتية.");
const success = await this.chatService.sendAudioMessage(audioBlob, studentId);
this.stateMachine.setState(RecordingState.IDLE);
}
}
// Application Entry Point
class UnifiedChatApp {
constructor() {
this.mediator = new ChatMediator();
}
static initialize() {
return new UnifiedChatApp();
}
}
// Initialize application when DOM is ready
document.addEventListener('DOMContentLoaded', () => {
try {
UnifiedChatApp.initialize();
console.log('Chat application with Student ID support initialized successfully!');
} catch (error) {
console.error('Failed to initialize chat application:', error);
}
});
</script>
</body> </body>
</html> </html>
\ No newline at end of file
import requests
import json
# The base URL of your locally running FastAPI application
BASE_URL = "https://voice-agent.caprover.al-arcade.com"
# --- Test Configuration ---
# Use a student ID that has some recent chat history on any topic.
TEST_STUDENT_ID = "student_001"
# Change this to a normal phrase or the trigger phrase to test different paths
TRIGGER_PHRASE = "اسألني سؤال" # "Ask me a question"
# TRIGGER_PHRASE = "ما هو التكيف؟" # "What is adaptation?"
def test_chat_endpoint(student_id: str, message: str):
"""
Sends a single request to the /chat endpoint and prints the full response.
"""
endpoint = f"{BASE_URL}/chat"
payload = {
"student_id": student_id,
"text": message,
}
print(f"▶️ Sending message to /chat for student '{student_id}'...")
print(f" Message: '{message}'")
try:
# Make the POST request
response = requests.post(endpoint, data=payload, timeout=120)
# Print the HTTP status code and headers for context
print(f"\n--- API Response from /chat ---")
print(f"Status Code: {response.status_code}")
print("Headers:")
for key, value in response.headers.items():
print(f" {key}: {value}")
# Try to parse and pretty-print the JSON response body
try:
response_data = response.json()
print("\nResponse Body (JSON):")
print(json.dumps(response_data, indent=2, ensure_ascii=False))
except json.JSONDecodeError:
print("\nResponse Body (Not JSON):")
print(response.text)
except requests.exceptions.RequestException as e:
print(f"\n❌ FAILED: An error occurred while making the request: {e}")
if __name__ == "__main__":
print("="*60)
print(" SIMPLE /chat ENDPOINT TEST")
print(" This script sends one message and prints the immediate response.")
print("="*60)
test_chat_endpoint(TEST_STUDENT_ID, TRIGGER_PHRASE)
print("\n" + "="*60)
print(" Test complete.")
print("="*60)
\ No newline at end of file
"""
======================================================================
Dynamic Quiz API Cookbook & Test Script
======================================================================
Purpose:
This script serves as a live integration test and a practical guide ("cookbook")
for using the new Dynamic Quiz API endpoint.
It demonstrates how to request a quiz of a specific size ('n') for a given topic.
----------------------------------------------------------------------
API Endpoint Guide
----------------------------------------------------------------------
Generate a Dynamic Quiz (POST /quiz/dynamic)
---------------------------------------------
This is the primary endpoint for creating quizzes for students. It's designed
to be both fresh and comprehensive.
How it Works:
1. It intelligently calculates a small number of *new* questions to generate based on
the requested quiz size ('count'). This ensures the question bank is always growing.
2. It calls the AI to generate these new, unique questions and saves them to the database.
3. It retrieves ALL available questions for the topic (both old and new).
4. It randomly shuffles this complete list and returns the number of questions the user asked for.
This provides a dynamic, varied quiz experience every time while efficiently expanding
your question database.
- Method: POST
- URL: [BASE_URL]/quiz/dynamic
- Data Format: Must be sent as `application/x-www-form-urlencoded` (form data).
Parameters (Form Data):
- grade (int, required): The grade level of the curriculum (e.g., 4).
- subject (str, required): The subject of the curriculum (e.g., "Science").
- unit (str, required): The exact name of the unit.
- concept (str, required): The exact name of the concept.
- is_arabic (bool, required): Set to `true` for Arabic curriculum, `false` for English.
- count (int, optional, default=5): The total number of questions you want in the final quiz.
Example Usage (using cURL):
# Request a quiz of 10 random questions for the topic.
# This will generate ~3 new questions and then pick 10 from the whole pool.
curl -X POST [BASE_URL]/quiz/dynamic \
-F "grade=4" \
-F "subject=Science" \
-F "unit=الوحدة الأولى: الأنظمة الحية" \
-F "concept=المفهوم الأول: التكيف والبقاء" \
-F "is_arabic=true" \
-F "count=10"
----------------------------------------------------------------------
How to Run This Script
----------------------------------------------------------------------
1. Ensure your FastAPI server is running.
2. Make sure the BASE_URL variable below is set to your server's address.
3. Run the script from your terminal: python3 simple_dynamic_quiz_test.py
"""
import requests
import json
# The base URL of your API server.
# Change this to "http://localhost:8000" if you are testing locally.
BASE_URL = "https://voice-agent.caprover.al-arcade.com"
def test_dynamic_quiz(grade: int, subject: str, unit: str, concept: str, is_arabic: bool, count: int):
"""
Calls the /quiz/dynamic endpoint and prints the raw JSON response.
"""
endpoint = f"{BASE_URL}/quiz/dynamic"
payload = {
"grade": grade,
"subject": subject,
"unit": unit,
"concept": concept,
"is_arabic": is_arabic,
"count": count,
}
print(f">> Requesting a dynamic quiz of {count} questions for:")
print(f" Topic: Grade {grade} {subject} -> {unit} -> {concept}")
print(f" Language: {'Arabic' if is_arabic else 'English'}")
try:
# Make the POST request with a long timeout to allow for new question generation
response = requests.post(endpoint, data=payload, timeout=180)
print(f"\n--- API Response ---")
print(f"Status Code: {response.status_code}")
# Try to parse and pretty-print the JSON response
try:
response_data = response.json()
print("\nResponse Body (JSON):")
print(json.dumps(response_data, indent=2, ensure_ascii=False))
except json.JSONDecodeError:
print("\nResponse Body (Not JSON):")
print(response.text)
except requests.exceptions.RequestException as e:
print(f"\nFAILED: An error occurred while making the request: {e}")
if __name__ == "__main__":
print("\n" + "="*50)
print("STARTING TEST 1: ARABIC DYNAMIC QUIZ")
print("="*50)
# Updated test data as requested
arabic_test_data = {
"grade": 4,
"subject": "Science",
"unit": "الوحدة الأولى: الأنظمة الحية",
"concept": "المفهوم الأول: التكيف والبقاء",
"is_arabic": True,
"count": 3
}
test_dynamic_quiz(**arabic_test_data)
print("\n" + "="*50)
print("STARTING TEST 2: ENGLISH DYNAMIC QUIZ")
print("="*50)
# Updated test data as requested
english_test_data = {
"grade": 5,
"subject": "Science",
"unit": "Unit 1: Matter and Energy in Ecosystems",
"concept": "Concept 1.1: Properties of Matter",
"is_arabic": False,
"count": 2
}
test_dynamic_quiz(**english_test_data)
print("\n" + "="*50)
print("All tests complete.")
print("="*50)
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment