|
|
import ast |
|
|
import csv |
|
|
import os |
|
|
import statistics |
|
|
from typing import Dict, Iterable, List, Optional |
|
|
|
|
|
import datasets |
|
|
|
|
|
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
|
|
|
class LowResourceQeDaConfig(datasets.BuilderConfig): |
|
|
def __init__( |
|
|
self, |
|
|
language_pair: tuple, |
|
|
has_model_scores: bool = False, |
|
|
has_pe: bool = False, |
|
|
language_pairs: Optional[List[str]] = None, |
|
|
include_lang_pair: bool = False, |
|
|
**kwargs, |
|
|
): |
|
|
super().__init__(**kwargs) |
|
|
self.language_pair = language_pair |
|
|
self.has_model_scores = has_model_scores |
|
|
self.has_pe = has_pe |
|
|
self.language_pairs = language_pairs |
|
|
self.include_lang_pair = include_lang_pair |
|
|
|
|
|
|
|
|
LANGUAGE_CONFIGS = { |
|
|
"engu": LowResourceQeDaConfig( |
|
|
name="engu", |
|
|
description="English-Gujarati direct assessment QE", |
|
|
version=datasets.Version("1.0.0"), |
|
|
language_pair=("English", "Gujarati"), |
|
|
), |
|
|
"enhi": LowResourceQeDaConfig( |
|
|
name="enhi", |
|
|
description="English-Hindi direct assessment QE", |
|
|
version=datasets.Version("1.0.0"), |
|
|
language_pair=("English", "Hindi"), |
|
|
), |
|
|
"enmr": LowResourceQeDaConfig( |
|
|
name="enmr", |
|
|
description="English-Marathi direct assessment QE (test split contains PE)", |
|
|
version=datasets.Version("1.0.0"), |
|
|
language_pair=("English", "Marathi"), |
|
|
has_pe=True, |
|
|
), |
|
|
"enta": LowResourceQeDaConfig( |
|
|
name="enta", |
|
|
description="English-Tamil direct assessment QE", |
|
|
version=datasets.Version("1.0.0"), |
|
|
language_pair=("English", "Tamil"), |
|
|
), |
|
|
"ente": LowResourceQeDaConfig( |
|
|
name="ente", |
|
|
description="English-Telugu direct assessment QE", |
|
|
version=datasets.Version("1.0.0"), |
|
|
language_pair=("English", "Telugu"), |
|
|
), |
|
|
"eten": LowResourceQeDaConfig( |
|
|
name="eten", |
|
|
description="Estonian-English direct assessment QE", |
|
|
version=datasets.Version("1.0.0"), |
|
|
language_pair=("Estonian", "English"), |
|
|
has_model_scores=True, |
|
|
), |
|
|
"neen": LowResourceQeDaConfig( |
|
|
name="neen", |
|
|
description="Nepali-English direct assessment QE", |
|
|
version=datasets.Version("1.0.0"), |
|
|
language_pair=("Nepali", "English"), |
|
|
has_model_scores=True, |
|
|
), |
|
|
"sien": LowResourceQeDaConfig( |
|
|
name="sien", |
|
|
description="Sinhala-English direct assessment QE", |
|
|
version=datasets.Version("1.0.0"), |
|
|
language_pair=("Sinhala", "English"), |
|
|
has_model_scores=True, |
|
|
), |
|
|
} |
|
|
MULTILINGUAL_CONFIG = LowResourceQeDaConfig( |
|
|
name="multilingual", |
|
|
description="All language pairs combined for train/dev with language labels; test splits remain per language pair.", |
|
|
version=datasets.Version("1.0.0"), |
|
|
language_pair=("multi", "multi"), |
|
|
has_model_scores=True, |
|
|
has_pe=True, |
|
|
language_pairs=list(LANGUAGE_CONFIGS.keys()), |
|
|
include_lang_pair=True, |
|
|
) |
|
|
|
|
|
|
|
|
def _parse_list(value: str) -> List: |
|
|
try: |
|
|
parsed = ast.literal_eval(value) |
|
|
return list(parsed) if isinstance(parsed, (list, tuple)) else [] |
|
|
except (ValueError, SyntaxError): |
|
|
return [] |
|
|
|
|
|
|
|
|
def _compute_stats(filepath: str) -> Dict[str, Optional[float]]: |
|
|
means: List[float] = [] |
|
|
z_means: List[float] = [] |
|
|
with open(filepath, encoding="utf-8") as f: |
|
|
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE) |
|
|
for row in reader: |
|
|
try: |
|
|
means.append(float(row["mean"])) |
|
|
except (KeyError, ValueError): |
|
|
continue |
|
|
try: |
|
|
z_means.append(float(row["z_mean"])) |
|
|
except (KeyError, ValueError): |
|
|
continue |
|
|
|
|
|
def _range_and_median(values: List[float]) -> Dict[str, Optional[float]]: |
|
|
if not values: |
|
|
return {"min": None, "max": None, "median": None} |
|
|
return { |
|
|
"min": min(values), |
|
|
"max": max(values), |
|
|
"median": statistics.median(values), |
|
|
} |
|
|
|
|
|
return { |
|
|
"count": len(means), |
|
|
"mean_stats": _range_and_median(means), |
|
|
"z_mean_stats": _range_and_median(z_means), |
|
|
} |
|
|
|
|
|
|
|
|
class LowResourceQeDa(datasets.GeneratorBasedBuilder): |
|
|
BUILDER_CONFIGS = list(LANGUAGE_CONFIGS.values()) + [MULTILINGUAL_CONFIG] |
|
|
DEFAULT_CONFIG_NAME = "engu" |
|
|
|
|
|
def _info(self) -> datasets.DatasetInfo: |
|
|
features = { |
|
|
"index": datasets.Value("string"), |
|
|
"original": datasets.Value("string"), |
|
|
"translation": datasets.Value("string"), |
|
|
"scores": datasets.Sequence(datasets.Value("int32")), |
|
|
"mean": datasets.Value("float32"), |
|
|
"z_scores": datasets.Sequence(datasets.Value("float32")), |
|
|
"z_mean": datasets.Value("float32"), |
|
|
} |
|
|
if self.config.include_lang_pair: |
|
|
features["lang_pair"] = datasets.Value("string") |
|
|
if self.config.has_model_scores: |
|
|
features["model_scores"] = datasets.Value("float32") |
|
|
if self.config.has_pe: |
|
|
features["pe"] = datasets.Value("string") |
|
|
|
|
|
return datasets.DatasetInfo( |
|
|
description="Direct assessment quality estimation data for multiple low-resource language pairs.", |
|
|
features=datasets.Features(features), |
|
|
supervised_keys=None, |
|
|
homepage="", |
|
|
citation="", |
|
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: |
|
|
data_dir = self._resolve_data_dir() |
|
|
selected_pairs = self.config.language_pairs or [self.config.name] |
|
|
|
|
|
def _file_for(split: str, lang: str) -> str: |
|
|
return os.path.join(data_dir, f"{split}.{lang}.df.short.tsv") |
|
|
|
|
|
train_files = [(lang, _file_for("train", lang)) for lang in selected_pairs if os.path.exists(_file_for("train", lang))] |
|
|
dev_files = [(lang, _file_for("dev", lang)) for lang in selected_pairs if os.path.exists(_file_for("dev", lang))] |
|
|
test_files = [(lang, _file_for("test", lang)) for lang in selected_pairs if os.path.exists(_file_for("test", lang))] |
|
|
|
|
|
stats = self._collect_stats(train_files, dev_files, test_files) |
|
|
self._log_overview(stats) |
|
|
|
|
|
generators: List[datasets.SplitGenerator] = [] |
|
|
if train_files: |
|
|
generators.append( |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.TRAIN, |
|
|
gen_kwargs={"filepaths": train_files, "split_name": "train"}, |
|
|
) |
|
|
) |
|
|
if dev_files: |
|
|
generators.append( |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.VALIDATION, |
|
|
gen_kwargs={"filepaths": dev_files, "split_name": "dev"}, |
|
|
) |
|
|
) |
|
|
if len(selected_pairs) == 1: |
|
|
if test_files: |
|
|
generators.append( |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.TEST, |
|
|
gen_kwargs={"filepaths": test_files, "split_name": "test"}, |
|
|
) |
|
|
) |
|
|
else: |
|
|
for lang, path in test_files: |
|
|
generators.append( |
|
|
datasets.SplitGenerator( |
|
|
name=f"test_{lang}", |
|
|
gen_kwargs={"filepaths": [(lang, path)], "split_name": "test"}, |
|
|
) |
|
|
) |
|
|
return generators |
|
|
|
|
|
def _generate_examples(self, filepaths: List[tuple], split_name: str) -> Iterable: |
|
|
idx = 0 |
|
|
for lang, filepath in filepaths: |
|
|
pair_config = LANGUAGE_CONFIGS.get(lang, self.config) |
|
|
with open(filepath, encoding="utf-8") as f: |
|
|
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE) |
|
|
for row in reader: |
|
|
item = { |
|
|
"index": str(row.get("index", "")), |
|
|
"original": row.get("original", ""), |
|
|
"translation": row.get("translation", ""), |
|
|
"scores": [int(x) for x in _parse_list(row.get("scores", ""))], |
|
|
"mean": float(row.get("mean", 0.0)), |
|
|
"z_scores": [float(x) for x in _parse_list(row.get("z_scores", ""))], |
|
|
"z_mean": float(row.get("z_mean", 0.0)), |
|
|
} |
|
|
if self.config.include_lang_pair: |
|
|
item["lang_pair"] = lang |
|
|
if self.config.has_model_scores: |
|
|
model_value = row.get("model_scores") |
|
|
item["model_scores"] = ( |
|
|
float(model_value) if pair_config.has_model_scores and model_value not in (None, "") else None |
|
|
) |
|
|
if self.config.has_pe: |
|
|
item["pe"] = row.get("PE") or row.get("pe") or None |
|
|
yield idx, item |
|
|
idx += 1 |
|
|
|
|
|
def _collect_stats(self, train_files, dev_files, test_files): |
|
|
def collect(file_list): |
|
|
return {lang: _compute_stats(path) for lang, path in file_list} |
|
|
|
|
|
return { |
|
|
"train": collect(train_files), |
|
|
"dev": collect(dev_files), |
|
|
"test": collect(test_files), |
|
|
} |
|
|
|
|
|
def _resolve_data_dir(self) -> str: |
|
|
"""Find the directory that actually contains the TSV files.""" |
|
|
candidates = [] |
|
|
if self.config.data_dir: |
|
|
candidates.append(os.path.abspath(self.config.data_dir)) |
|
|
|
|
|
candidates.append(os.path.abspath(os.path.dirname(__file__))) |
|
|
|
|
|
candidates.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))) |
|
|
|
|
|
for cand in candidates: |
|
|
if os.path.exists(os.path.join(cand, "train.engu.df.short.tsv")): |
|
|
return cand |
|
|
|
|
|
raise FileNotFoundError( |
|
|
f"Could not locate TSV files. Checked: {candidates}. " |
|
|
"Pass data_dir to load_dataset or set LOW_RESOURCE_QE_DA_DATA_DIR env var." |
|
|
) |
|
|
|
|
|
def _log_overview(self, stats: Dict[str, Dict[str, Dict[str, Optional[float]]]]) -> None: |
|
|
def print_line(msg: str): |
|
|
logger.info(msg) |
|
|
print(msg, flush=True) |
|
|
|
|
|
if self.config.language_pairs and len(self.config.language_pairs) > 1: |
|
|
for split_name, split_stats in stats.items(): |
|
|
if not split_stats: |
|
|
print_line(f"[{self.config.name}] split={split_name} | no files found") |
|
|
continue |
|
|
total = sum(s["count"] for s in split_stats.values()) |
|
|
mean_mins = [s["mean_stats"]["min"] for s in split_stats.values() if s["mean_stats"]["min"] is not None] |
|
|
mean_maxs = [s["mean_stats"]["max"] for s in split_stats.values() if s["mean_stats"]["max"] is not None] |
|
|
z_mins = [s["z_mean_stats"]["min"] for s in split_stats.values() if s["z_mean_stats"]["min"] is not None] |
|
|
z_maxs = [s["z_mean_stats"]["max"] for s in split_stats.values() if s["z_mean_stats"]["max"] is not None] |
|
|
if mean_mins and mean_maxs and z_mins and z_maxs: |
|
|
print_line( |
|
|
f"[{self.config.name}] split={split_name} | total instances={total} | " |
|
|
f"DA mean range {min(mean_mins):.3f}–{max(mean_maxs):.3f} | " |
|
|
f"z_mean range {min(z_mins):.3f}–{max(z_maxs):.3f}" |
|
|
) |
|
|
for lang, s in split_stats.items(): |
|
|
mean_stats = s["mean_stats"] |
|
|
z_stats = s["z_mean_stats"] |
|
|
print_line( |
|
|
f" - {lang} | n={s['count']} | DA mean {mean_stats['min']:.3f}–{mean_stats['max']:.3f} " |
|
|
f"(median {mean_stats['median']:.3f}) | z_mean {z_stats['min']:.3f}–{z_stats['max']:.3f} " |
|
|
f"(median {z_stats['median']:.3f})" |
|
|
) |
|
|
else: |
|
|
source_lang, target_lang = self.config.language_pair |
|
|
for split, split_stats in stats.items(): |
|
|
if not split_stats: |
|
|
print_line(f"Loaded {self.config.name} ({source_lang} → {target_lang}) | split={split} | no files found") |
|
|
continue |
|
|
lang = self.config.name |
|
|
s = split_stats.get(lang) or next(iter(split_stats.values())) |
|
|
mean_stats = s["mean_stats"] |
|
|
z_stats = s["z_mean_stats"] |
|
|
print_line( |
|
|
f"Loaded {lang} ({source_lang} → {target_lang}) | split={split} | instances={s['count']} " |
|
|
f"| DA mean range {mean_stats['min']:.3f}–{mean_stats['max']:.3f} (median {mean_stats['median']:.3f}) " |
|
|
f"| z_mean range {z_stats['min']:.3f}–{z_stats['max']:.3f} (median {z_stats['median']:.3f})" |
|
|
) |
|
|
|