1
Fork 0
mirror of https://github.com/Steffo99/unimore-bda-6.git synced 2024-11-21 23:44:19 +00:00

Write results to a ./data/logs/results.tsv file as well

This commit is contained in:
Steffo 2023-05-08 04:16:32 +02:00
parent b4cc6f8707
commit 8d831afbe3
Signed by: steffo
GPG key ID: 2A24051445686895
2 changed files with 93 additions and 78 deletions

View file

@ -24,6 +24,9 @@ def main():
log.debug("Ensuring there are no leftover caches...")
Caches.ensure_clean()
with open("./data/logs/results.tsv", "w") as file:
file.write("function\tanalyzer\ttokenizer\trun no\tmean absolute error\tmean squared error\tperfects\trecall 1\trecall 2\trecall 3\trecall 4\trecall 5\tprecision 1\tprecision 2\tprecision 3\tprecision 4\tprecision 5\n")
with mongo_client_from_config() as db:
try:
db.admin.command("ping")
@ -105,6 +108,9 @@ def main():
except TrainingFailedError:
slog.error("Training failed, trying again with a different dataset...")
file.write(f"{sample_func.__name__}\t{SentimentAnalyzer.__name__}\t{Tokenizer.__name__}\t{runs}\t\t\t\t\t\t\t\t\t\t\t\t\t\n")
file.flush()
continue
else:
@ -112,6 +118,11 @@ def main():
slog.info("Evaluating sentiment analyzer: %s", sa)
evaluation_results = sa.evaluate(evaluation_dataset_func=datasets.evaluation)
slog.info("Evaluation results: %s", evaluation_results)
file.write(f"{sample_func.__name__}\t{SentimentAnalyzer.__name__}\t{Tokenizer.__name__}\t{runs}\t{evaluation_results.mean_absolute_error()}\t{evaluation_results.mean_squared_error()}\t{evaluation_results.perfect_count()}\t{evaluation_results.recall(1.0)}\t{evaluation_results.recall(2.0)}\t{evaluation_results.recall(3.0)}\t{evaluation_results.recall(4.0)}\t{evaluation_results.recall(5.0)}\t{evaluation_results.precision(1.0)}\t{evaluation_results.precision(2.0)}\t{evaluation_results.precision(3.0)}\t{evaluation_results.precision(4.0)}\t{evaluation_results.precision(5.0)}\n")
file.flush()
successful_runs += 1
cumulative_evaluation_results += evaluation_results
break

View file

@ -147,6 +147,8 @@ class EvaluationResults:
"""
try:
return self.confusion_matrix[rating][rating] / self.recall_count(rating)
except KeyError:
return float("NaN")
except ZeroDivisionError:
return float("inf")
@ -156,6 +158,8 @@ class EvaluationResults:
"""
try:
return self.confusion_matrix[rating][rating] / self.precision_count(rating)
except KeyError:
return float("NaN")
except ZeroDivisionError:
return float("inf")