Spaces:
Running
Running
Update Space (evaluate main: e4a27243)
Browse files- bleu.py +28 -3
- requirements.txt +1 -1
bleu.py
CHANGED
|
@@ -13,6 +13,9 @@
|
|
| 13 |
# limitations under the License.
|
| 14 |
""" BLEU metric. """
|
| 15 |
|
|
|
|
|
|
|
|
|
|
| 16 |
import datasets
|
| 17 |
|
| 18 |
import evaluate
|
|
@@ -84,13 +87,27 @@ Examples:
|
|
| 84 |
"""
|
| 85 |
|
| 86 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
| 88 |
class Bleu(evaluate.Metric):
|
| 89 |
-
|
|
|
|
|
|
|
|
|
|
| 90 |
return evaluate.MetricInfo(
|
| 91 |
description=_DESCRIPTION,
|
| 92 |
citation=_CITATION,
|
| 93 |
inputs_description=_KWARGS_DESCRIPTION,
|
|
|
|
| 94 |
features=[
|
| 95 |
datasets.Features(
|
| 96 |
{
|
|
@@ -112,7 +129,12 @@ class Bleu(evaluate.Metric):
|
|
| 112 |
],
|
| 113 |
)
|
| 114 |
|
| 115 |
-
def _compute(self, predictions, references
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 116 |
# if only one reference is provided make sure we still use list of lists
|
| 117 |
if isinstance(references[0], str):
|
| 118 |
references = [[ref] for ref in references]
|
|
@@ -120,7 +142,10 @@ class Bleu(evaluate.Metric):
|
|
| 120 |
references = [[tokenizer(r) for r in ref] for ref in references]
|
| 121 |
predictions = [tokenizer(p) for p in predictions]
|
| 122 |
score = compute_bleu(
|
| 123 |
-
reference_corpus=references,
|
|
|
|
|
|
|
|
|
|
| 124 |
)
|
| 125 |
(bleu, precisions, bp, ratio, translation_length, reference_length) = score
|
| 126 |
return {
|
|
|
|
| 13 |
# limitations under the License.
|
| 14 |
""" BLEU metric. """
|
| 15 |
|
| 16 |
+
from dataclasses import dataclass
|
| 17 |
+
from typing import Callable, Optional
|
| 18 |
+
|
| 19 |
import datasets
|
| 20 |
|
| 21 |
import evaluate
|
|
|
|
| 87 |
"""
|
| 88 |
|
| 89 |
|
| 90 |
+
@dataclass
|
| 91 |
+
class BleuConfig(evaluate.info.Config):
|
| 92 |
+
|
| 93 |
+
name: str = "default"
|
| 94 |
+
|
| 95 |
+
tokenizer: Optional[Callable] = None
|
| 96 |
+
max_order: int = 4
|
| 97 |
+
smooth: bool = False
|
| 98 |
+
|
| 99 |
+
|
| 100 |
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
| 101 |
class Bleu(evaluate.Metric):
|
| 102 |
+
CONFIG_CLASS = BleuConfig
|
| 103 |
+
ALLOWED_CONFIG_NAMES = ["default"]
|
| 104 |
+
|
| 105 |
+
def _info(self, config):
|
| 106 |
return evaluate.MetricInfo(
|
| 107 |
description=_DESCRIPTION,
|
| 108 |
citation=_CITATION,
|
| 109 |
inputs_description=_KWARGS_DESCRIPTION,
|
| 110 |
+
config=config,
|
| 111 |
features=[
|
| 112 |
datasets.Features(
|
| 113 |
{
|
|
|
|
| 129 |
],
|
| 130 |
)
|
| 131 |
|
| 132 |
+
def _compute(self, predictions, references):
|
| 133 |
+
if self.config.tokenizer is None:
|
| 134 |
+
tokenizer = Tokenizer13a()
|
| 135 |
+
else:
|
| 136 |
+
tokenizer = self.config.tokenizer
|
| 137 |
+
|
| 138 |
# if only one reference is provided make sure we still use list of lists
|
| 139 |
if isinstance(references[0], str):
|
| 140 |
references = [[ref] for ref in references]
|
|
|
|
| 142 |
references = [[tokenizer(r) for r in ref] for ref in references]
|
| 143 |
predictions = [tokenizer(p) for p in predictions]
|
| 144 |
score = compute_bleu(
|
| 145 |
+
reference_corpus=references,
|
| 146 |
+
translation_corpus=predictions,
|
| 147 |
+
max_order=self.config.max_order,
|
| 148 |
+
smooth=self.config.smooth,
|
| 149 |
)
|
| 150 |
(bleu, precisions, bp, ratio, translation_length, reference_length) = score
|
| 151 |
return {
|
requirements.txt
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
git+https://github.com/huggingface/evaluate@
|
|
|
|
| 1 |
+
git+https://github.com/huggingface/evaluate@e4a2724377909fe2aeb4357e3971e5a569673b39
|