1
Fork 0
mirror of https://github.com/Steffo99/unimore-bda-6.git synced 2024-11-22 16:04:18 +00:00
bda-6-steffo/unimore_bda_6/analysis/base.py

100 lines
2.7 KiB
Python
Raw Normal View History

2023-02-08 18:46:05 +00:00
from __future__ import annotations
2023-02-02 01:56:37 +00:00
import abc
2023-02-03 22:27:44 +00:00
import logging
2023-02-04 05:14:24 +00:00
import dataclasses
2023-02-02 16:24:11 +00:00
2023-02-08 18:46:05 +00:00
from ..database import Text, Category, CachedDatasetFunc
from ..tokenizer import BaseTokenizer
2023-02-02 16:24:11 +00:00
2023-02-03 22:27:44 +00:00
log = logging.getLogger(__name__)
2023-02-02 01:56:37 +00:00
2023-02-03 22:27:44 +00:00
class BaseSentimentAnalyzer(metaclass=abc.ABCMeta):
2023-02-02 01:56:37 +00:00
"""
Abstract base class for sentiment analyzers implemented in this project.
"""
2023-02-08 18:46:05 +00:00
# noinspection PyUnusedLocal
def __init__(self, *, tokenizer: BaseTokenizer):
pass
def __repr__(self):
return f"<{self.__class__.__qualname__}>"
2023-02-02 01:56:37 +00:00
@abc.abstractmethod
2023-02-08 18:46:05 +00:00
def train(self, training_dataset_func: CachedDatasetFunc, validation_dataset_func: CachedDatasetFunc) -> None:
2023-02-02 01:56:37 +00:00
"""
2023-02-08 18:46:05 +00:00
Train the analyzer with the given training and validation datasets.
2023-02-02 01:56:37 +00:00
"""
raise NotImplementedError()
2023-02-08 18:46:05 +00:00
@abc.abstractmethod
def use(self, text: Text) -> Category:
"""
Run the model on the given input.
"""
raise NotImplementedError()
def evaluate(self, evaluation_dataset_func: CachedDatasetFunc) -> EvaluationResults:
2023-02-02 01:56:37 +00:00
"""
2023-02-03 22:27:44 +00:00
Perform a model evaluation by calling repeatedly `.use` on every text of the test dataset and by comparing its resulting category with the expected category.
2023-02-02 01:56:37 +00:00
2023-02-03 22:27:44 +00:00
Returns a tuple with the number of correct results and the number of evaluated results.
"""
2023-02-04 05:14:24 +00:00
2023-02-03 22:27:44 +00:00
evaluated: int = 0
2023-02-04 05:14:24 +00:00
correct: int = 0
2023-02-08 09:54:14 +00:00
score: float = 0.0
2023-02-02 01:56:37 +00:00
2023-02-08 18:46:05 +00:00
for review in evaluation_dataset_func():
2023-02-04 05:14:24 +00:00
resulting_category = self.use(review.text)
2023-02-03 22:27:44 +00:00
evaluated += 1
2023-02-10 04:52:13 +00:00
correct += 1 if round(resulting_category) == round(review.category) else 0
2023-02-08 09:54:14 +00:00
score += 1 - (abs(resulting_category - review.category) / 4)
2023-02-02 01:56:37 +00:00
2023-02-08 09:54:14 +00:00
return EvaluationResults(correct=correct, evaluated=evaluated, score=score)
2023-02-03 16:50:40 +00:00
2023-02-08 18:46:05 +00:00
@dataclasses.dataclass
class EvaluationResults:
"""
Container for the results of a dataset evaluation.
"""
correct: int
evaluated: int
score: float
def __repr__(self):
return f"<EvaluationResults: {self!s}>"
def __str__(self):
return f"{self.evaluated} evaluated, {self.correct} correct, {self.correct / self.evaluated:.2%} accuracy, {self.score:.2f} score, {self.score / self.evaluated:.2%} scoreaccuracy"
2023-02-02 01:56:37 +00:00
2023-02-04 00:36:42 +00:00
class AlreadyTrainedError(Exception):
"""
This model has already been trained and cannot be trained again.
"""
class NotTrainedError(Exception):
"""
This model has not been trained yet.
"""
2023-02-08 09:54:14 +00:00
class TrainingFailedError(Exception):
"""
The model wasn't able to complete the training and should not be used anymore.
"""
2023-02-02 01:56:37 +00:00
__all__ = (
2023-02-03 22:27:44 +00:00
"BaseSentimentAnalyzer",
2023-02-04 00:36:42 +00:00
"AlreadyTrainedError",
"NotTrainedError",
2023-02-08 09:54:14 +00:00
"TrainingFailedError",
2023-02-02 01:56:37 +00:00
)