edit mcq schema

parent 8bbfd066
......@@ -25,6 +25,8 @@ from services import (
DataIngestionService
)
from schemas.mcq import QuestionResponse, QuizResponse, MCQListResponse
class DIContainer:
......@@ -339,7 +341,7 @@ def create_app() -> FastAPI:
async def generate_mcqs_handler(
request: Request,
curriculum: str = Form(...),
grade: str = Form(...), # Changed to str
grade: str = Form(...),
subject: str = Form(...),
unit: str = Form(...),
concept: str = Form(...),
......@@ -347,7 +349,10 @@ def create_app() -> FastAPI:
is_arabic: bool = Form(False),
):
"""
Generates and stores a new set of MCQs for a specific topic, using the new schema.
Generates and stores a new set of MCQs.
NOTE: This endpoint intentionally returns the FULL question object,
including curriculum, grade, etc., as it might be useful for the client
that just initiated the generation. The GET endpoints will be filtered.
"""
container = request.app.state.container
try:
......@@ -371,7 +376,8 @@ def create_app() -> FastAPI:
logger.error(f"Error in generate_mcqs_handler: {e}")
raise HTTPException(status_code=500, detail=str(e))
@app.get("/mcq")
# --- STEP 2: UPDATE THE /mcq ENDPOINT SIGNATURE ---
@app.get("/mcq", response_model=MCQListResponse)
async def get_mcqs_handler(
request: Request,
curriculum: str,
......@@ -383,11 +389,12 @@ def create_app() -> FastAPI:
limit: Optional[int] = None
):
"""
Retrieves existing MCQs for a specific topic, now filtering by curriculum.
Retrieves existing MCQs, filtered to the 11-field response model.
"""
container = request.app.state.container
try:
questions = container.agent_service.pgvector.get_mcqs(
# The service layer still returns the full objects from the DB
questions_from_db = container.agent_service.pgvector.get_mcqs(
curriculum=curriculum,
grade=grade,
subject=subject,
......@@ -396,16 +403,18 @@ def create_app() -> FastAPI:
is_arabic=is_arabic,
limit=limit
)
# FastAPI will automatically filter `questions_from_db` to match the model
return {
"status": "success",
"count": len(questions),
"questions": questions
"count": len(questions_from_db),
"questions": questions_from_db
}
except Exception as e:
logger.error(f"Error in get_mcqs_handler: {e}")
raise HTTPException(status_code=500, detail=str(e))
@app.post("/quiz/dynamic")
# --- STEP 3: UPDATE THE /quiz/dynamic ENDPOINT SIGNATURE ---
@app.post("/quiz/dynamic", response_model=QuizResponse)
async def get_dynamic_quiz_handler(
request: Request,
curriculum: str = Form(...),
......@@ -417,11 +426,12 @@ def create_app() -> FastAPI:
count: int = Form(5)
):
"""
Generates a dynamic quiz, now using curriculum as a key identifier.
Generates a dynamic quiz, filtered to the 11-field response model.
"""
container = request.app.state.container
try:
quiz_questions = container.agent_service.get_dynamic_quiz(
# The service layer still returns the full objects
quiz_questions_full = container.agent_service.get_dynamic_quiz(
curriculum=curriculum,
grade=grade,
subject=subject,
......@@ -430,10 +440,11 @@ def create_app() -> FastAPI:
is_arabic=is_arabic,
count=count
)
# FastAPI will automatically filter `quiz_questions_full` to match the model
return {
"status": "success",
"message": f"Successfully generated a dynamic quiz with {len(quiz_questions)} questions.",
"quiz": quiz_questions
"message": f"Successfully generated a dynamic quiz with {len(quiz_questions_full)} questions.",
"quiz": quiz_questions_full
}
except HTTPException as e:
raise e
......
from .response import WebhookResponse
from .message import TextMessage
from .mcq import QuestionResponse, QuizResponse, MCQListResponse
\ No newline at end of file
from pydantic import BaseModel
from typing import List, Optional
class QuestionResponse(BaseModel):
"""Defines the exact 11 fields to be returned for each question."""
question_text: str
question_type: Optional[str] = None
correct_answer: str
wrong_answer_1: Optional[str] = None
wrong_answer_2: Optional[str] = None
wrong_answer_3: Optional[str] = None
wrong_answer_4: Optional[str] = None
difficulty_level: Optional[int] = None
blooms_level: Optional[str] = None
is_arabic: bool
hint: Optional[str] = None
class Config:
orm_mode = True # This helps Pydantic work with dictionary-like objects
class QuizResponse(BaseModel):
"""Defines the structure for the quiz endpoints."""
status: str
message: str
quiz: List[QuestionResponse]
class MCQListResponse(BaseModel):
"""Defines the structure for the GET /mcq endpoint."""
status: str
count: int
questions: List[QuestionResponse]
\ No newline at end of file
......@@ -137,13 +137,15 @@ class AgentService:
is_arabic: bool, num_questions: int = 5
) -> List[Dict]:
"""
Generates NEW, UNIQUE MCQs with the full schema. The AI is now responsible
for assigning a balanced difficulty level to each question.
Generates NEW, UNIQUE MCQs with balanced difficulty and Bloom's taxonomy levels.
Each returned question includes:
- difficulty_level: 1–10
- blooms_level: One of ["remember", "understand", "apply", "analysis", "evaluate", "create"]
"""
if not self.pgvector:
raise HTTPException(status_code=503, detail="Vector service is not available for context retrieval.")
# ... (Step 1 and 2 for getting existing questions and context remain the same) ...
logger.info(f"Checking for existing questions for: {curriculum}/{grade}/{subject}/{unit}/{concept}")
existing_questions = self.pgvector.get_mcqs(
curriculum, grade, subject, unit, concept, is_arabic, limit=None
......@@ -153,6 +155,7 @@ class AgentService:
q_list = [f"- {q['question_text']}" for q in existing_questions]
existing_questions_text = "\n".join(q_list)
# --- STEP 2: CONTEXT RETRIEVAL ---
search_query = f"summary of {concept} in {unit} for {subject}"
query_embedding = self.openai_service.generate_embedding(search_query)
try:
......@@ -166,11 +169,10 @@ class AgentService:
raise HTTPException(status_code=404, detail="No curriculum context found for this topic.")
full_context = "\n---\n".join([chunk['chunk_text'] for chunk in context_chunks])
# --- STEP 3: THE PROMPT IS UPDATED TO HANDLE DIFFICULTY INTERNALLY ---
# --- STEP 3: PROMPT CONSTRUCTION ---
if is_arabic:
# (A similar detailed prompt in Arabic would be needed here)
prompt =f"""
أنت خبير في تطوير المناهج التعليمية، ومهمتك هي إنشاء **أسئلة اختيار من متعدد جديدة بالكامل** (أسئلة لم تظهر من قبل).
prompt = f"""
أنت خبير في تطوير المناهج التعليمية، ومهمتك هي إنشاء **أسئلة اختيار من متعدد جديدة بالكامل**.
هذه هي الأسئلة الموجودة بالفعل لمفهوم "{concept}":
--- الأسئلة الموجودة ---
......@@ -185,35 +187,35 @@ class AgentService:
قم بإنشاء {num_questions} سؤالًا جديدًا تمامًا من نوع الاختيار من متعدد (MCQ)، **مختلفة كليًا عن الأسئلة الموجودة أعلاه**.
⚠️ **مهم جدًا**:
يجب أن تتضمن الأسئلة مستويات صعوبة متنوعة وفق التوزيع التالي تقريبًا:
- ٤٠٪ أسئلة سهلة (مستوى صعوبة من ١ إلى ٤)
- ٣٠٪ أسئلة متوسطة (مستوى صعوبة من ٥ إلى ٧)
- ٣٠٪ أسئلة صعبة (مستوى صعوبة من ٨ إلى ١٠)
يجب أن تشمل الأسئلة مستويات متنوعة من الصعوبة وفق التوزيع التالي:
- ٤٠٪ أسئلة سهلة (١ إلى ٤)
- ٣٠٪ أسئلة متوسطة (٥ إلى ٧)
- ٣٠٪ أسئلة صعبة (٨ إلى ١٠)
**صيغة الإخراج مطلوبة أن تكون مصفوفة JSON صالحة** (JSON array) من الكائنات (objects).
كل كائن يجب أن يحتوي على المفاتيح التالية **بالضبط**:
كما يجب أن تغطي الأسئلة مستويات تصنيف بلوم الستة التالية بشكل متوازن تقريبًا:
- "تذكر" (remember)
- "فهم" (understand)
- "تطبيق" (apply)
- "تحليل" (analysis)
- "تقييم" (evaluate)
- "إبداع" (create)
**صيغة الإخراج** يجب أن تكون مصفوفة JSON صالحة (JSON array) تحتوي على كائنات (objects) بالمفاتيح التالية تمامًا:
- "question_text": نص السؤال.
- "difficulty_level": رقم صحيح من ١ إلى ١٠ يمثل مستوى الصعوبة.
- "difficulty_level": رقم من ١ إلى ١٠.
- "blooms_level": واحدة من ["remember", "understand", "apply", "analysis", "evaluate", "create"].
- "question_type": نوع السؤال (مثلاً: "multiple_choice").
- "correct_answer": الإجابة الصحيحة الوحيدة.
- "wrong_answer_1": إجابة خاطئة ولكن معقولة.
- "wrong_answer_2": إجابة خاطئة ولكن معقولة.
- "wrong_answer_3": إجابة خاطئة ولكن معقولة.
- "wrong_answer_4": إجابة خاطئة ولكن معقولة.
- "hint": تلميح أو مساعدة للطالب لفهم السؤال.
- "question_image_url": اترك هذا الحقل كسلسلة فارغة "".
- "correct_image_url": اترك هذا الحقل كسلسلة فارغة "".
- "wrong_image_url_1": اترك هذا الحقل كسلسلة فارغة "".
- "wrong_image_url_2": اترك هذا الحقل كسلسلة فارغة "".
- "wrong_image_url_3": اترك هذا الحقل كسلسلة فارغة "".
- "wrong_image_url_4": اترك هذا الحقل كسلسلة فارغة "".
- "correct_answer": الإجابة الصحيحة.
- "wrong_answer_1" إلى "wrong_answer_4": إجابات خاطئة معقولة.
- "hint": تلميح للطالب.
- "question_image_url", "correct_image_url", "wrong_image_url_1" إلى "_4": اتركها كسلسلة فارغة "".
لا تكتب أي نص خارج مصفوفة JSON.
"""
else:
prompt = f"""
You are an expert curriculum developer creating new multiple-choice questions.
You are an expert curriculum developer. Your task is to generate **entirely new multiple-choice questions (MCQs)** that do NOT overlap with any existing ones.
Here are the questions that ALREADY EXIST for the concept "{concept}":
--- EXISTING QUESTIONS ---
......@@ -225,38 +227,45 @@ class AgentService:
{full_context}
--- END CONTEXT ---
Generate {num_questions} NEW and COMPLETELY DIFFERENT multiple-choice questions from the list above.
Generate {num_questions} NEW and COMPLETELY DIFFERENT multiple-choice questions.
⚠️ **Important Requirements**:
- Distribute difficulty levels approximately as follows:
- 40% easy (difficulty 1–4)
- 30% medium (difficulty 5–7)
- 30% hard (difficulty 8–10)
- Also, balance across Bloom's taxonomy levels:
- "remember"
- "understand"
- "apply"
- "analysis"
- "evaluate"
- "create"
**IMPORTANT**: For the {num_questions} questions you generate, assign a difficulty level to each one. The distribution should be approximately:
- 40% easy (difficulty 1-4)
- 30% medium (difficulty 5-7)
- 30% hard (difficulty 8-10)
Your response MUST be a valid JSON array of objects.
Each object must have **exactly** these keys:
Your response MUST be a valid JSON array of objects. Each object must have these exact keys:
- "question_text": The text of the question.
- "difficulty_level": An integer between 1 and 10, based on the distribution rule.
- "question_type": The type of question (e.g., "multiple_choice").
- "difficulty_level": Integer 1–10.
- "blooms_level": One of ["remember", "understand", "apply", "analysis", "evaluate", "create"].
- "question_type": The type (e.g., "multiple_choice").
- "correct_answer": The single correct answer.
- "wrong_answer_1": A plausible wrong answer.
- "wrong_answer_2": Another plausible wrong answer.
- "wrong_answer_3": A third plausible wrong answer.
- "wrong_answer_4": A fourth plausible wrong answer.
- "hint": A helpful hint for the student.
- "question_image_url": Leave this as an empty string "".
- "correct_image_url": Leave this as an empty string "".
- "wrong_image_url_1": Leave this as an empty string "".
- "wrong_image_url_2": Leave this as an empty string "".
- "wrong_image_url_3": Leave this as an empty string "".
- "wrong_image_url_4": Leave this as an empty string "".
Do not include any text outside of the JSON array.
- "wrong_answer_1" to "wrong_answer_4": Plausible incorrect answers.
- "hint": Helpful explanation or guidance.
- "question_image_url": ""
- "correct_image_url": ""
- "wrong_image_url_1" to "wrong_image_url_4": ""
Do not include any text outside the JSON array.
"""
# --- STEP 4: CALL LLM and PARSE (unchanged) ---
# --- STEP 4: CALL LLM ---
try:
response = self.openai_service.client.chat.completions.create(
model=Models.chat, messages=[{"role": "user", "content": prompt}],
temperature=0.7, response_format={"type": "json_object"}
model=Models.chat,
messages=[{"role": "user", "content": prompt}],
temperature=0.7,
response_format={"type": "json_object"}
)
response_content = response.choices[0].message.content
json_response = json.loads(response_content)
......@@ -267,13 +276,18 @@ class AgentService:
logger.error(f"Failed to parse MCQ response from LLM: {e}\nRaw Response: {response_content}")
raise HTTPException(status_code=500, detail="Failed to generate or parse MCQs from AI.")
# --- STEP 5: PREPARE FOR STORAGE (Now gets difficulty_level from the AI response) ---
# --- STEP 5: STORE ---
mcqs_to_store = []
for q in generated_questions:
mcqs_to_store.append({
"curriculum": curriculum, "grade": grade, "subject": subject, "unit": unit,
"concept": concept, "is_arabic": is_arabic,
"difficulty_level": q.get("difficulty_level"), # <-- AI now provides this
"curriculum": curriculum,
"grade": grade,
"subject": subject,
"unit": unit,
"concept": concept,
"is_arabic": is_arabic,
"difficulty_level": q.get("difficulty_level"),
"blooms_level": q.get("blooms_level"),
"question_text": q.get("question_text"),
"question_type": q.get("question_type", "multiple_choice"),
"correct_answer": q.get("correct_answer"),
......@@ -282,12 +296,12 @@ class AgentService:
"wrong_answer_3": q.get("wrong_answer_3"),
"wrong_answer_4": q.get("wrong_answer_4"),
"hint": q.get("hint"),
"question_image_url": q.get("question_image_url"),
"correct_image_url": q.get("correct_image_url"),
"wrong_image_url_1": q.get("wrong_image_url_1"),
"wrong_image_url_2": q.get("wrong_image_url_2"),
"wrong_image_url_3": q.get("wrong_image_url_3"),
"wrong_image_url_4": q.get("wrong_image_url_4"),
"question_image_url": q.get("question_image_url", ""),
"correct_image_url": q.get("correct_image_url", ""),
"wrong_image_url_1": q.get("wrong_image_url_1", ""),
"wrong_image_url_2": q.get("wrong_image_url_2", ""),
"wrong_image_url_3": q.get("wrong_image_url_3", ""),
"wrong_image_url_4": q.get("wrong_image_url_4", ""),
})
self.pgvector.insert_mcqs(mcqs_to_store)
......@@ -295,6 +309,7 @@ class AgentService:
def handle_ask_for_question(self, student_id: str) -> Dict:
"""
Handles when a student asks for a question. It generates one new question,
......
......@@ -527,32 +527,32 @@ class PGVectorService:
def insert_mcqs(self, mcq_list: List[Dict]):
"""
Inserts a batch of MCQs, now including ALL new fields from the updated schema.
Inserts a batch of MCQs, now including the blooms_level field.
"""
if not mcq_list:
return
with self.pool_handler.get_connection() as conn:
with conn.cursor() as cur:
# --- UPDATED INSERT QUERY WITH ALL NEW COLUMNS ---
# --- UPDATED INSERT QUERY ---
insert_query = """
INSERT INTO mcq_questions (
curriculum, grade, subject, unit, concept, question_text,
question_type, difficulty_level, is_arabic, correct_answer,
question_type, difficulty_level, blooms_level, is_arabic, correct_answer,
wrong_answer_1, wrong_answer_2, wrong_answer_3, wrong_answer_4,
question_image_url, correct_image_url, wrong_image_url_1,
wrong_image_url_2, wrong_image_url_3, wrong_image_url_4, hint
) VALUES (
%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,
%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,
%s, %s, %s, %s, %s, %s, %s
);
"""
# --- UPDATED DATA PREPARATION TO MATCH THE NEW SCHEMA ---
# Using .get() provides safety against missing keys from the LLM response
# --- UPDATED DATA PREPARATION ---
data_to_insert = [
(
q.get('curriculum'), q.get('grade'), q.get('subject'), q.get('unit'), q.get('concept'),
q.get('question_text'), q.get('question_type'), q.get('difficulty_level'),
q.get('blooms_level'), # <-- ADDED THIS
q.get('is_arabic'), q.get('correct_answer'), q.get('wrong_answer_1'),
q.get('wrong_answer_2'), q.get('wrong_answer_3'), q.get('wrong_answer_4'),
q.get('question_image_url'), q.get('correct_image_url'), q.get('wrong_image_url_1'),
......
# setup_mcq_table.py
import psycopg2
import os
from dotenv import load_dotenv
......@@ -6,7 +8,7 @@ load_dotenv()
def setup_mcq_table(drop_existing_table: bool = False):
"""
Sets up the mcq_questions table with the final, comprehensive schema.
Sets up the mcq_questions table with the final schema, now including blooms_level.
"""
try:
conn = psycopg2.connect(
......@@ -24,8 +26,8 @@ def setup_mcq_table(drop_existing_table: bool = False):
cur.execute("DROP TABLE IF EXISTS mcq_questions CASCADE;")
print("Table dropped.")
print("Creating mcq_questions table with the NEW COMPREHENSIVE schema...")
# --- THIS IS THE FULLY UPDATED TABLE SCHEMA ---
print("Creating mcq_questions table with blooms_level column...")
# --- UPDATED SCHEMA ---
cur.execute("""
CREATE TABLE IF NOT EXISTS mcq_questions (
id SERIAL PRIMARY KEY,
......@@ -37,25 +39,25 @@ def setup_mcq_table(drop_existing_table: bool = False):
question_text TEXT NOT NULL,
question_type TEXT,
difficulty_level INTEGER,
blooms_level TEXT,
is_arabic BOOLEAN NOT NULL,
correct_answer TEXT NOT NULL,
wrong_answer_1 TEXT,
wrong_answer_2 TEXT,
wrong_answer_3 TEXT,
wrong_answer_4 TEXT,
question_image_url TEXT, -- Placeholder for MinIO URL
correct_image_url TEXT, -- Placeholder for MinIO URL
wrong_image_url_1 TEXT, -- Placeholder for MinIO URL
wrong_image_url_2 TEXT, -- Placeholder for MinIO URL
wrong_image_url_3 TEXT, -- Placeholder for MinIO URL
wrong_image_url_4 TEXT, -- Placeholder for MinIO URL
question_image_url TEXT,
correct_image_url TEXT,
wrong_image_url_1 TEXT,
wrong_image_url_2 TEXT,
wrong_image_url_3 TEXT,
wrong_image_url_4 TEXT,
hint TEXT,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
""")
print("Creating indexes on mcq_questions table...")
# --- UPDATED INDEX TO INCLUDE CURRICULUM ---
cur.execute("""
CREATE INDEX IF NOT EXISTS idx_mcq_topic
ON mcq_questions(curriculum, grade, is_arabic, subject, unit, concept);
......@@ -71,7 +73,5 @@ def setup_mcq_table(drop_existing_table: bool = False):
print("Database connection closed.")
if __name__ == "__main__":
# To apply the new schema, run this script.
# Set drop_existing_table=True to ensure a clean recreation.
print("Setting up the new MCQ table structure...")
print("Setting up the MCQ table structure...")
setup_mcq_table(drop_existing_table=True)
\ No newline at end of file
......@@ -248,8 +248,15 @@
document.body.removeChild(link);
});
// --- THIS IS THE CORRECTED CSV EXPORT FUNCTION ---
function convertQuizToCSV(quiz) {
const headers = ['Question', 'Correct Answer', 'Wrong Answer 1', 'Wrong Answer 2', 'Wrong Answer 3'];
// Define the exact, case-sensitive headers you requested
const headers = [
'question_text', 'question_type', 'correct_answer',
'wrong_answer_1', 'wrong_answer_2', 'wrong_answer_3', 'wrong_answer_4',
'difficulty_level', 'blooms_level', 'is_arabic', 'hint'
];
const escapeCSV = (str) => {
if (str === null || str === undefined) return '';
let result = str.toString();
......@@ -258,13 +265,24 @@
}
return result;
};
const rows = quiz.map(q => [
const rows = quiz.map(q => {
// Map the data to the specific headers in the correct order
return [
escapeCSV(q.question_text),
escapeCSV(q.question_type),
escapeCSV(q.correct_answer),
escapeCSV(q.wrong_answer_1),
escapeCSV(q.wrong_answer_2),
escapeCSV(q.wrong_answer_3)
].join(','));
escapeCSV(q.wrong_answer_3),
escapeCSV(q.wrong_answer_4),
escapeCSV(q.difficulty_level),
escapeCSV(q.blooms_level),
q.is_arabic ? '1' : '0',
escapeCSV(q.hint)
].join(',');
});
return [headers.join(','), ...rows].join('\n');
}
......
import requests
import json
import time
# --- Configuration ---
# The base URL of your running FastAPI application.
# Change this if your app is running on a different host or port.
BASE_URL = "https://voice-agent-v2.caprover.al-arcade.com"
# Define the topic we will use for all tests
TEST_DATA = {
"curriculum": "EGYPTIAN National",
"grade": "4th Grade",
"subject": "Science",
"unit": "الوحدة الأولى: ما النظام؟",
"concept": "المفهوم 3.1: الطاقة كنظام",
}
# --- Helper Function to Print Formatted JSON ---
def print_json(data, title=""):
"""
This function is responsible for printing the entire JSON response.
`json.dumps` with `indent=2` makes it readable.
`ensure_ascii=False` correctly displays Arabic characters.
"""
if title:
print(f"--- {title} ---")
# This line prints the FULL response data.
print(json.dumps(data, indent=2, ensure_ascii=False))
print("\n" + "="*50 + "\n")
# --- Test Functions ---
def test_generate_mcqs():
"""Tests the POST /mcq/generate endpoint."""
print(" Starting test for: POST /mcq/generate")
form_data = {**TEST_DATA, "count": 2, "is_arabic": True}
try:
start_time = time.time()
response = requests.post(f"{BASE_URL}/mcq/generate", data=form_data)
duration = time.time() - start_time
print(f"Status Code: {response.status_code} (took {duration:.2f} seconds)")
response.raise_for_status()
response_data = response.json()
# >>> THE RESPONSE IS PRINTED RIGHT HERE <<<
print_json(response_data, "Full Response from /mcq/generate")
assert response_data["status"] == "success"
print("✅ Test for /mcq/generate PASSED")
except requests.exceptions.RequestException as e:
print(f"❌ Test for /mcq/generate FAILED: An HTTP error occurred: {e}")
print("--- Raw Response Text ---")
print(response.text)
except Exception as e:
print(f"❌ Test for /mcq/generate FAILED: An error occurred: {e}")
print("--- Raw Response Text ---")
print(response.text)
def test_dynamic_quiz():
"""Tests the POST /quiz/dynamic endpoint."""
print(" Starting test for: POST /quiz/dynamic")
form_data = {**TEST_DATA, "count": 3, "is_arabic": True}
try:
start_time = time.time()
response = requests.post(f"{BASE_URL}/quiz/dynamic", data=form_data)
duration = time.time() - start_time
print(f"Status Code: {response.status_code} (took {duration:.2f} seconds)")
response.raise_for_status()
response_data = response.json()
# >>> THE RESPONSE IS PRINTED RIGHT HERE <<<
print_json(response_data, "Full Response from /quiz/dynamic")
assert response_data["status"] == "success"
# Verify that the response is indeed filtered
if response_data.get("quiz"):
first_question = response_data["quiz"][0]
if "curriculum" in first_question:
# This is not a failure of the test, but a failure of the API logic.
print("⚠️ WARNING: /quiz/dynamic response was NOT filtered. It still contains the 'curriculum' field.")
else:
print("✔️ Verification successful: /quiz/dynamic response is correctly filtered.")
print("✅ Test for /quiz/dynamic PASSED")
except requests.exceptions.RequestException as e:
print(f"❌ Test for /quiz/dynamic FAILED: An HTTP error occurred: {e}")
print("--- Raw Response Text ---")
print(response.text)
except Exception as e:
print(f"❌ Test for /quiz/dynamic FAILED: An error occurred: {e}")
print("--- Raw Response Text ---")
print(response.text)
# --- Main Execution Block ---
if __name__ == "__main__":
print("Starting MCQ Endpoint Tests...\n")
# Run the first test
test_generate_mcqs()
# Run the second test
test_dynamic_quiz()
print("All tests completed.")
\ No newline at end of file
"""
======================================================================
MCQ API Cookbook & Test Script
======================================================================
Purpose:
This script serves as both a live integration test and a practical guide ("cookbook")
for using the Multiple-Choice Question (MCQ) generation and retrieval API endpoints.
It demonstrates how to:
1. Generate and store new MCQs for a specific curriculum topic.
2. Retrieve existing MCQs from the database for that same topic.
----------------------------------------------------------------------
API Endpoints Guide
----------------------------------------------------------------------
There are two main endpoints for the MCQ feature:
1. Generate Questions (POST /mcq/generate)
------------------------------------------
This is the "creator" endpoint. It uses an AI model to generate a new set of questions
based on the curriculum content stored in the vector database. It then saves these
new questions to the `mcq_questions` table for future use.
- Method: POST
- URL: [BASE_URL]/mcq/generate
- Data Format: Must be sent as `application/x-www-form-urlencoded` (form data).
Parameters (Form Data):
- grade (int, required): The grade level of the curriculum (e.g., 4).
- subject (str, required): The subject of the curriculum (e.g., "Science").
- unit (str, required): The exact name of the unit.
- concept (str, required): The exact name of the concept.
- is_arabic (bool, required): Set to `true` for Arabic curriculum, `false` for English.
- count (int, optional, default=5): The number of new questions to generate.
Example Usage (using cURL):
curl -X POST [BASE_URL]/mcq/generate \
-F "grade=4" \
-F "subject=Science" \
-F "unit=الوحدة الأولى: الأنظمة الحية" \
-F "concept=المفهوم الأول: التكيف والبقاء" \
-F "is_arabic=true" \
-F "count=3"
2. Retrieve Questions (GET /mcq)
---------------------------------
This is the "reader" endpoint. It quickly and cheaply retrieves questions that have
already been generated and stored in the database. It does NOT call the AI model.
- Method: GET
- URL: [BASE_URL]/mcq
Parameters (URL Query Parameters):
- grade (int, required): The grade level.
- subject (str, required): The subject.
- unit (str, required): The unit name.
- concept (str, required): The concept name.
- is_arabic (bool, required): `true` for Arabic, `false` for English.
- limit (int, optional, default=None): The maximum number of questions to retrieve.
If omitted, it will retrieve ALL questions for that topic.
Example Usage (using cURL):
# Get the 5 most recent questions for a topic
curl "[BASE_URL]/mcq?grade=4&subject=Science&unit=...&concept=...&is_arabic=true&limit=5"
# Get ALL questions for a topic
curl "[BASE_URL]/mcq?grade=4&subject=Science&unit=...&concept=...&is_arabic=true"
----------------------------------------------------------------------
How to Run This Script
----------------------------------------------------------------------
1. Ensure your FastAPI server is running.
2. Make sure the BASE_URL variable below is set to your server's address.
3. Run the script from your terminal: python3 msq_test.py
"""
import requests
import json
import time
from typing import Optional
# The base URL of your API server.
BASE_URL = "https://voice-agent.caprover.al-arcade.com"
def test_mcq_generation(grade: int, subject: str, unit: str, concept: str, is_arabic: bool, count: int):
"""
Tests the POST /mcq/generate endpoint.
"""
endpoint = f"{BASE_URL}/mcq/generate"
payload = {
"grade": grade,
"subject": subject,
"unit": unit,
"concept": concept,
"is_arabic": is_arabic,
"count": count,
}
print(f">> Attempting to GENERATE {count} new questions for:")
print(f" Topic: Grade {grade} {subject} -> {unit} -> {concept}")
print(f" Language: {'Arabic' if is_arabic else 'English'}")
try:
response = requests.post(endpoint, data=payload, timeout=120)
if response.status_code == 200:
print(f"SUCCESS: API returned status code {response.status_code}")
data = response.json()
print(f" Message: {data.get('message')}")
if 'questions' in data and data['questions']:
print("\n --- Details of Generated Questions ---")
for i, q in enumerate(data['questions'], 1):
print(f" {i}. Question: {q['question_text']}")
print(f" Correct: {q['correct_answer']}")
print(f" Wrong 1: {q['wrong_answer_1']}")
print(f" Wrong 2: {q['wrong_answer_2']}")
print(f" Wrong 3: {q['wrong_answer_3']}\n")
return True
else:
print(f"FAILED: API returned status code {response.status_code}")
try:
error_data = response.json()
print(f" Error Detail: {error_data.get('detail', 'No detail provided.')}")
except json.JSONDecodeError:
print(f" Response was not valid JSON: {response.text}")
return False
except requests.exceptions.RequestException as e:
print(f"FAILED: An error occurred while making the request: {e}")
return False
def test_mcq_retrieval(grade: int, subject: str, unit: str, concept: str, is_arabic: bool, limit: Optional[int]):
"""
Tests the GET /mcq endpoint with detailed output.
"""
endpoint = f"{BASE_URL}/mcq"
params = {
"grade": grade,
"subject": subject,
"unit": unit,
"concept": concept,
"is_arabic": is_arabic,
}
if limit is not None:
params["limit"] = limit
limit_str = f"up to {limit}" if limit is not None else "ALL"
print(f">> Attempting to RETRIEVE {limit_str} stored questions for the same topic...")
try:
response = requests.get(endpoint, params=params, timeout=30)
if response.status_code == 200:
print(f"SUCCESS: API returned status code {response.status_code}")
data = response.json()
print(f" Found {data.get('count')} stored questions in the database.")
if 'questions' in data and data['questions']:
print("\n --- Details of Retrieved Questions ---")
for i, q in enumerate(data['questions'], 1):
print(f" {i}. Question: {q['question_text']}")
print(f" Correct: {q['correct_answer']}")
print(f" Wrong 1: {q['wrong_answer_1']}")
print(f" Wrong 2: {q['wrong_answer_2']}")
print(f" Wrong 3: {q['wrong_answer_3']}\n")
elif data.get('count') == 0:
print(" (This is expected if this is the first time generating questions for this topic)")
return True
else:
print(f"FAILED: API returned status code {response.status_code}")
try:
error_data = response.json()
print(f" Error Detail: {error_data.get('detail', 'No detail provided.')}")
except json.JSONDecodeError:
print(f" Response was not valid JSON: {response.text}")
return False
except requests.exceptions.RequestException as e:
print(f"FAILED: An error occurred while making the request: {e}")
return False
if __name__ == "__main__":
print("\n" + "="*50)
print("STARTING TEST 1: ARABIC MCQ GENERATION & RETRIEVAL")
print("="*50)
# IMPORTANT: Use actual Unit/Concept names from your database for the best results.
arabic_test_data = {
"grade": 4,
"subject": "Science",
"unit": "الوحدة الأولى: الأنظمة الحية",
"concept": "المفهوم الأول: التكيف والبقاء",
"is_arabic": True,
"count": 3
}
generation_successful = test_mcq_generation(**arabic_test_data)
if generation_successful:
print("-" * 25)
time.sleep(2)
test_mcq_retrieval(limit=None, **{k:v for k,v in arabic_test_data.items() if k != 'count'})
print("\n" + "="*50)
print("STARTING TEST 2: ENGLISH MCQ GENERATION & RETRIEVAL")
print("="*50)
english_test_data = {
"grade": 5,
"subject": "Science",
"unit": "Unit 1: Matter and Energy in Ecosystems",
"concept": "Concept 1.1: Properties of Matter",
"is_arabic": False,
"count": 2
}
generation_successful = test_mcq_generation(**english_test_data)
if generation_successful:
print("-" * 25)
time.sleep(2)
test_mcq_retrieval(limit=None, **{k:v for k,v in english_test_data.items() if k != 'count'})
print("\n" + "="*50)
print("All tests complete.")
print("="*50)
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment