Datasets:
Tasks:
Text Generation
Modalities:
Text
Sub-tasks:
language-modeling
Languages:
Japanese
Size:
10K - 100K
ArXiv:
Tags:
question-generation
License:
| """ Script to process raw SQuAD file for Question Generation format | |
| cd data/processed | |
| gsplit -l 500 -d --additional-suffix=.jsonl train.jsonl train | |
| gsplit -l 500 -d --additional-suffix=.jsonl test.jsonl test | |
| gsplit -l 1000 -d --additional-suffix=.jsonl validation.jsonl validation | |
| rm -rf test.jsonl | |
| rm -rf train.jsonl | |
| rm -rf validation.jsonl | |
| """ | |
| import json | |
| import os | |
| import re | |
| from tqdm import tqdm | |
| from typing import Dict | |
| from datasets import load_dataset | |
| from ja_sentence_split import JASplitter | |
| HIGHLIGHT_TOKEN = '<hl>' | |
| SPLITTER = JASplitter() | |
| def get_sentence(document: str): | |
| return [str(s) for s in SPLITTER(document)] | |
| def process_single_data(data: Dict): | |
| """ Convert single raw json data into QG format """ | |
| example = {'question': data["question"], 'paragraph': data["context"]} | |
| # check answer | |
| answer_text = data['answers']['text'][0] | |
| answer_start = data['answers']['answer_start'][0] | |
| answer_end = answer_start + len(answer_text) | |
| assert example['paragraph'][answer_start: answer_end] == answer_text | |
| example['answer'] = answer_text | |
| # get sentence | |
| position = example['paragraph'].find(example['answer']) | |
| assert position != -1 | |
| before_tmp = get_sentence(example['paragraph'][:position]) | |
| if len(before_tmp) == 0: | |
| before = '' | |
| before_sentence = '' | |
| else: | |
| if before_tmp[-1].endswith('γ'): | |
| before = ' '.join(before_tmp) | |
| before_sentence = '' | |
| else: | |
| before = ' '.join(before_tmp[:-1]) | |
| before_sentence = before_tmp[-1] | |
| after_tmp = get_sentence(example['paragraph'][position + len(example['answer']):]) | |
| if len(after_tmp) == 0: | |
| after = '' | |
| after_sentence = '' | |
| else: | |
| after = ' '.join(after_tmp[1:]) | |
| after_sentence = after_tmp[0] | |
| example['sentence'] = '{}{}{}'.format(before_sentence, example['answer'], after_sentence) | |
| # get paragraph_sentence | |
| source_text = '{0}{1}{2}{1}{3}'.format(before, HIGHLIGHT_TOKEN, example['sentence'], after) | |
| example['paragraph_sentence'] = re.sub(r'\s+', ' ', source_text) | |
| # get paragraph_answer | |
| source_text = '{0}{1}{2}{1}{3}'.format( | |
| example['paragraph'][:position], HIGHLIGHT_TOKEN, example['answer'], | |
| example['paragraph'][position + len(example['answer']):]) | |
| example['paragraph_answer'] = re.sub(r'\s+', ' ', source_text) | |
| # get sentence_answer | |
| if len(before_tmp) == 0 or before_tmp[-1].endswith('γ'): | |
| before = '' | |
| else: | |
| before = before_tmp[-1] | |
| if len(after_tmp) == 0: | |
| after = '' | |
| else: | |
| after = after_tmp[0] | |
| source_text = '{0}{1}{2}{1}{3}'.format(before, HIGHLIGHT_TOKEN, example['answer'], after) | |
| example['sentence_answer'] = re.sub(r'\s+', ' ', source_text) | |
| for _k in example.keys(): | |
| example[_k] = example[_k].replace('γ\n\n', 'γ').replace('γ\n', 'γ') | |
| return example | |
| if __name__ == '__main__': | |
| jaquad_data = load_dataset("SkelterLabsInc/JaQuAD") | |
| data_dev = jaquad_data['validation'] | |
| # create test set from training | |
| data_train = jaquad_data['train'] | |
| context = sorted(list(set(data_train['context']))) | |
| data_test = [data_train[i] for i in range(len(data_train)) if data_train[i]['context'] in context[:927]] | |
| data_train = [data_train[i] for i in range(len(data_train)) if data_train[i]['context'] in context[927:]] | |
| print(f'train ({len(data_train)}, test ({len(data_test)}), dev ({len(data_dev)})') | |
| data_all = {'train': data_train, 'validation': data_dev, 'test': data_test} | |
| output = './data/processed' | |
| os.makedirs(output, exist_ok=True) | |
| for k, _data in data_all.items(): | |
| with open('{}/{}.jsonl'.format(output, k), 'w') as f: | |
| for single_data in tqdm(_data): | |
| single_data = process_single_data(single_data) | |
| f.write(json.dumps(single_data) + '\n') | |