deepang1902 commited on
Commit
e2115c9
·
verified ·
1 Parent(s): 7709312

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +83 -0
app.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify
2
+ from flask_cors import CORS
3
+ from transformers import pipeline
4
+ import nltk
5
+ import ssl
6
+
7
+ # This is a workaround for an NLTK download issue that can occur on some systems.
8
+ try:
9
+ _create_unverified_https_context = ssl._create_unverified_context
10
+ except AttributeError:
11
+ pass
12
+ else:
13
+ ssl._create_default_https_context = _create_unverified_https_context
14
+
15
+ # Download the 'punkt' tokenizer for splitting text into sentences
16
+ nltk.download('punkt')
17
+
18
+ # --- Model Loading ---
19
+ # We use a T5 (Transformer) model fine-tuned for Question Generation.
20
+ # T5 is a modern and powerful alternative to BERT for this specific task.
21
+ # The model 'valhalla/t5-base-qg-hl' is designed to generate questions from text.
22
+ print("Loading question generation model...")
23
+ # Using a specific revision to ensure consistency
24
+ question_generator = pipeline("text2text-generation", model="valhalla/t5-base-qg-hl")
25
+ print("Model loaded successfully!")
26
+
27
+
28
+ # --- Flask App Setup ---
29
+ app = Flask(__name__)
30
+ # Enable Cross-Origin Resource Sharing (CORS) to allow your frontend
31
+ # to communicate with this API.
32
+ CORS(app)
33
+
34
+ # --- API Endpoint ---
35
+ @app.route('/generate-quiz', methods=['POST'])
36
+ def generate_quiz():
37
+ """
38
+ This function handles the API request to generate a quiz.
39
+ It expects a JSON payload with a 'context' key.
40
+ """
41
+ # Get the JSON data from the request
42
+ data = request.get_json()
43
+ if not data or 'context' not in data:
44
+ return jsonify({'error': 'No context provided in the request.'}), 400
45
+
46
+ context = data['context']
47
+
48
+ # 1. Split the context into individual sentences.
49
+ sentences = nltk.sent_tokenize(context)
50
+
51
+ # We'll limit the number of questions to avoid long processing times.
52
+ # You can adjust this limit as needed.
53
+ max_questions = 10
54
+ sentences = sentences[:max_questions]
55
+
56
+ generated_questions = []
57
+
58
+ print(f"Generating questions for {len(sentences)} sentences...")
59
+
60
+ # 2. Generate a question for each sentence.
61
+ for sentence in sentences:
62
+ try:
63
+ # The model generates a string which might contain multiple questions
64
+ # separated by <sep>. We take the first one.
65
+ output = question_generator(sentence, max_length=64, num_beams=4)
66
+ question = output[0]['generated_text'].split('<sep>')[0].strip()
67
+ # Basic validation to ensure the question is meaningful
68
+ if question.endswith('?') and len(question.split()) > 3:
69
+ generated_questions.append({'question': question, 'context': sentence})
70
+ except Exception as e:
71
+ print(f"Error generating question for sentence: '{sentence}'. Error: {e}")
72
+
73
+
74
+ print(f"Successfully generated {len(generated_questions)} questions.")
75
+
76
+ # 3. Return the list of questions as a JSON response.
77
+ return jsonify({'quiz': generated_questions})
78
+
79
+ # --- Run the App ---
80
+ if __name__ == '__main__':
81
+ # When running locally, the app will be available at http://127.0.0.1:5000
82
+ # When running in Colab, you will need a tool like ngrok to expose it.
83
+ app.run(host='0.0.0.0', port=5000)