File size: 2,254 Bytes
22c880e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d38ee56
 
 
e6162fe
22c880e
 
 
 
 
 
 
 
551d4cb
 
 
 
22c880e
 
 
551d4cb
22c880e
 
 
551d4cb
22c880e
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import os
import datasets

_CITATION = """\
# (Optional) Add your citation here
"""

_DESCRIPTION = """\
NLGraph: A collection of graph-related tasks with natural language representations.
Each subset corresponds to a task (e.g., connectivity, cycle, shortest_path).
Each split contains examples stored in JSON Lines format.
"""

# Map folder name to config (subset)
TASKS = [
    "connectivity",
    "cycle",
    "flow",
    "GNN",
    "hamilton",
    "matching",
    "shortest_path",
    "topology",
]

class NLGraphConfig(datasets.BuilderConfig):
    def __init__(self, task_name, **kwargs):
        super().__init__(name=task_name, **kwargs)
        self.task_name = task_name


class NLGraph(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIGS = [
        NLGraphConfig(task_name=task) for task in TASKS
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features({
                "question": datasets.Value("string"),
                "answer": datasets.Value("string"),
                "difficulty": datasets.Value("string"),
                "doc_id": datasets.Value("string"),
                # add more fields depending on your JSONL schema
            }),
            supervised_keys=None,
            homepage="https://huggingface.co/datasets/huayangli/nlgraph",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        data_files = {
            "test": dl_manager.download_and_extract(os.path.join(self.config.name, "test.jsonl")),
            "train": dl_manager.download_and_extract(os.path.join(self.config.name, "train.jsonl")),
        }
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"filepath": data_files['train']},
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={"filepath": data_files['test']},
            ),
        ]

    def _generate_examples(self, filepath):
        import json
        with open(filepath, "r", encoding="utf-8") as f:
            for idx, line in enumerate(f):
                data = json.loads(line)
                yield idx, data