Datasets:
Tasks:
Sentence Similarity
Modalities:
Text
Formats:
json
Sub-tasks:
semantic-similarity-scoring
Size:
10K - 100K
ArXiv:
License:
| from pathlib import Path | |
| import mteb | |
| log_file_path = Path("remove_empty.log") | |
| # remove log file if exists | |
| if log_file_path.exists(): | |
| log_file_path.unlink() | |
| tasks = mteb.get_tasks(tasks=["STS22"]) | |
| from datasets import load_dataset | |
| dataset = load_dataset(**tasks[0].metadata.dataset) | |
| def filter_sample(x): | |
| if len(x["sentence1"]) > 0 and len(x["sentence2"]) > 0: | |
| return True | |
| log = f"Filtered: {x['sentence1']} -- {x['sentence2']}" | |
| with open(log_file_path, "a") as f: | |
| f.write(log + "\n") | |
| print(log) | |
| return False | |
| for split in dataset: | |
| ds = dataset[split] | |
| # filter empty sentences | |
| n_samples = len(ds) | |
| ds = ds.filter(lambda x: filter_sample(x)) | |
| n_left = len(ds) | |
| log = f"Filtered {n_samples - n_left} samples from {n_samples} in {split}" | |
| with open(log_file_path, "a") as f: | |
| f.write(log + "\n") | |
| print(log) | |
| dataset[split] = ds | |
| save_path = Path(__file__).parent.parent / "data" | |
| for split in dataset: | |
| # dataset[split].to_parquet(save_path / f"{split}-00000-of-00001.parquet") | |
| dataset[split].to_json(save_path / f"{split}.jsonl.gz", compression="gzip") | |
| ds = load_dataset(tasks[0].metadata.dataset["path"]) | |