Datasets:

Modalities:
Audio
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
pandas
License:
iwonachristop commited on
Commit
ac93005
·
verified ·
1 Parent(s): c691966

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +108 -1
README.md CHANGED
@@ -210,7 +210,114 @@ The inclusion of a dataset in the collection was determined by the following cri
210
  To evaluate your model according to the methodology used in our paper, you can use the following code.
211
 
212
  ```python
213
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
  ```
215
 
216
  ## Additional Information
 
210
  To evaluate your model according to the methodology used in our paper, you can use the following code.
211
 
212
  ```python
213
+ import os
214
+ import string
215
+
216
+ from Levenshtein import ratio
217
+ from datasets import load_dataset, Dataset, concatenate_datasets
218
+ from sklearn.metrics import classification_report, f1_score, accuracy_score
219
+
220
+ # 🔧 Change this path to where your JSONL prediction files are stored
221
+ outputs_path = "./"
222
+
223
+ _DATASETS = [
224
+ "cafe", "crema_d", "emns", "emozionalmente", "enterface",
225
+ "jl_Corpus", "mesd", "nemo", "oreau", "pavoque",
226
+ "ravdess", "resd", "subesco",
227
+ ]
228
+
229
+ THRESHOLD = 0.57
230
+
231
+
232
+ def get_expected(split: str) -> tuple[set, str, dict]:
233
+ """Load expected emotion labels and language metadata from CAMEO dataset."""
234
+ ds = load_dataset("amu-cai/CAMEO", split=split)
235
+ return set(ds["emotion"]), ds["language"][0], dict(zip(ds["file_id"], ds["emotion"]))
236
+
237
+
238
+ def process_outputs(dataset_name: str) -> tuple[Dataset, set, str]:
239
+ """Clean and correct predictions, returning a Dataset with fixed predictions."""
240
+ outputs = Dataset.from_json(os.path.join(outputs_path, f"{dataset_name}.jsonl"))
241
+ options, language, expected = get_expected(dataset_name)
242
+
243
+ def preprocess(x):
244
+ return {
245
+ "predicted": x["predicted"].translate(str.maketrans('', '', string.punctuation)).lower().strip(),
246
+ "expected": expected.get(x["file_id"]),
247
+ }
248
+
249
+ outputs = outputs.map(preprocess)
250
+
251
+ def fix_prediction(x):
252
+ if x["predicted"] in options:
253
+ x["fixed_prediction"] = x["predicted"]
254
+ else:
255
+ predicted_words = x["predicted"].split()
256
+ label_scores = {
257
+ label: sum(r for r in (ratio(label, word) for word in predicted_words) if r > THRESHOLD)
258
+ for label in options
259
+ }
260
+ x["fixed_prediction"] = max(label_scores, key=label_scores.get)
261
+ return x
262
+
263
+ outputs = outputs.map(fix_prediction)
264
+ return outputs, options, language
265
+
266
+
267
+ def calculate_metrics(outputs: Dataset, labels: set) -> dict:
268
+ """Compute classification metrics."""
269
+ y_true = outputs["expected"]
270
+ y_pred = outputs["fixed_prediction"]
271
+
272
+ return {
273
+ "f1_macro": f1_score(y_true, y_pred, average="macro"),
274
+ "weighted_f1": f1_score(y_true, y_pred, average="weighted"),
275
+ "accuracy": accuracy_score(y_true, y_pred),
276
+ "metrics_per_label": classification_report(
277
+ y_true, y_pred, target_names=sorted(labels), output_dict=True
278
+ ),
279
+ }
280
+
281
+
282
+ # 🧮 Main Evaluation Loop
283
+ results = []
284
+ outputs_per_language = {}
285
+ full_outputs, full_labels = None, set()
286
+
287
+ for dataset in _DATASETS:
288
+ jsonl_path = os.path.join(outputs_path, f"{dataset}.jsonl")
289
+
290
+ if not os.path.isfile(jsonl_path):
291
+ print(f"Jsonl file for {dataset} not found.")
292
+ continue
293
+
294
+ outputs, labels, language = process_outputs(dataset)
295
+ metrics = calculate_metrics(outputs, labels)
296
+ results.append({"language": language, "dataset": dataset, **metrics})
297
+
298
+ if language not in outputs_per_language:
299
+ outputs_per_language[language] = {"labels": labels, "outputs": outputs}
300
+ else:
301
+ outputs_per_language[language]["labels"] |= labels
302
+ outputs_per_language[language]["outputs"] = concatenate_datasets([
303
+ outputs_per_language[language]["outputs"], outputs
304
+ ])
305
+
306
+ full_outputs = outputs if full_outputs is None else concatenate_datasets([full_outputs, outputs])
307
+ full_labels |= labels
308
+
309
+ # 🔤 Per-language evaluation
310
+ for language, data in outputs_per_language.items():
311
+ metrics = calculate_metrics(data["outputs"], data["labels"])
312
+ results.append({"language": language, "dataset": "all", **metrics})
313
+
314
+ # 🌍 Global evaluation
315
+ if full_outputs is not None:
316
+ metrics = calculate_metrics(full_outputs, full_labels)
317
+ results.append({"language": "all", "dataset": "all", **metrics})
318
+
319
+ # 💾 Save results
320
+ Dataset.from_list(results).to_json(os.path.join(outputs_path, "results.jsonl"))
321
  ```
322
 
323
  ## Additional Information