Datasets:
Update files from the datasets library (from 1.7.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.7.0
README.md
CHANGED
|
@@ -21,6 +21,7 @@ task_ids:
|
|
| 21 |
- structure-prediction-other-clause-segmentation
|
| 22 |
- structure-prediction-other-sentence-segmentation
|
| 23 |
- structure-prediction-other-word-segmentation
|
|
|
|
| 24 |
---
|
| 25 |
|
| 26 |
# Dataset Card for LST20
|
|
@@ -28,12 +29,12 @@ task_ids:
|
|
| 28 |
## Table of Contents
|
| 29 |
- [Dataset Description](#dataset-description)
|
| 30 |
- [Dataset Summary](#dataset-summary)
|
| 31 |
-
- [Supported Tasks](#supported-tasks-and-leaderboards)
|
| 32 |
- [Languages](#languages)
|
| 33 |
- [Dataset Structure](#dataset-structure)
|
| 34 |
- [Data Instances](#data-instances)
|
| 35 |
-
- [Data Fields](#data-
|
| 36 |
-
- [Data Splits](#data-
|
| 37 |
- [Dataset Creation](#dataset-creation)
|
| 38 |
- [Curation Rationale](#curation-rationale)
|
| 39 |
- [Source Data](#source-data)
|
|
|
|
| 21 |
- structure-prediction-other-clause-segmentation
|
| 22 |
- structure-prediction-other-sentence-segmentation
|
| 23 |
- structure-prediction-other-word-segmentation
|
| 24 |
+
paperswithcode_id: null
|
| 25 |
---
|
| 26 |
|
| 27 |
# Dataset Card for LST20
|
|
|
|
| 29 |
## Table of Contents
|
| 30 |
- [Dataset Description](#dataset-description)
|
| 31 |
- [Dataset Summary](#dataset-summary)
|
| 32 |
+
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
|
| 33 |
- [Languages](#languages)
|
| 34 |
- [Dataset Structure](#dataset-structure)
|
| 35 |
- [Data Instances](#data-instances)
|
| 36 |
+
- [Data Fields](#data-fields)
|
| 37 |
+
- [Data Splits](#data-splits)
|
| 38 |
- [Dataset Creation](#dataset-creation)
|
| 39 |
- [Curation Rationale](#curation-rationale)
|
| 40 |
- [Source Data](#source-data)
|
lst20.py
CHANGED
|
@@ -152,7 +152,7 @@ class Lst20(datasets.GeneratorBasedBuilder):
|
|
| 152 |
]
|
| 153 |
|
| 154 |
def _generate_examples(self, filepath):
|
| 155 |
-
for fname in sorted(glob.glob(os.path.join(filepath, "*.txt"))):
|
| 156 |
with open(fname, encoding="utf-8") as f:
|
| 157 |
guid = 0
|
| 158 |
tokens = []
|
|
@@ -163,7 +163,7 @@ class Lst20(datasets.GeneratorBasedBuilder):
|
|
| 163 |
for line in f:
|
| 164 |
if line in self._SENTENCE_SPLITTERS:
|
| 165 |
if tokens:
|
| 166 |
-
yield guid, {
|
| 167 |
"id": str(guid),
|
| 168 |
"fname": Path(fname).name,
|
| 169 |
"tokens": tokens,
|
|
@@ -187,7 +187,7 @@ class Lst20(datasets.GeneratorBasedBuilder):
|
|
| 187 |
clause_tags.append(splits[3].rstrip())
|
| 188 |
# last example
|
| 189 |
if tokens:
|
| 190 |
-
yield guid, {
|
| 191 |
"id": str(guid),
|
| 192 |
"fname": Path(fname).name,
|
| 193 |
"tokens": tokens,
|
|
|
|
| 152 |
]
|
| 153 |
|
| 154 |
def _generate_examples(self, filepath):
|
| 155 |
+
for file_idx, fname in enumerate(sorted(glob.glob(os.path.join(filepath, "*.txt")))):
|
| 156 |
with open(fname, encoding="utf-8") as f:
|
| 157 |
guid = 0
|
| 158 |
tokens = []
|
|
|
|
| 163 |
for line in f:
|
| 164 |
if line in self._SENTENCE_SPLITTERS:
|
| 165 |
if tokens:
|
| 166 |
+
yield f"{file_idx}_{guid}", {
|
| 167 |
"id": str(guid),
|
| 168 |
"fname": Path(fname).name,
|
| 169 |
"tokens": tokens,
|
|
|
|
| 187 |
clause_tags.append(splits[3].rstrip())
|
| 188 |
# last example
|
| 189 |
if tokens:
|
| 190 |
+
yield f"{file_idx}_{guid}", {
|
| 191 |
"id": str(guid),
|
| 192 |
"fname": Path(fname).name,
|
| 193 |
"tokens": tokens,
|