diff --git a/src/app.py b/src/app.py index cef3321..f64b7ce 100644 --- a/src/app.py +++ b/src/app.py @@ -103,6 +103,7 @@ if __name__ == "__main__": rand = np.random.randint(0, 4294967295) #rand = 2205910060 # LiR for power_plant #rand = 347617386 # LoR for electrical_grid + #rand = 834535453 # LoR for heart #rand = 1793295160 # MLP for iris #rand = 2914000170 # MLP for frogs #rand = 885416001 # KMe for frogs_no_target diff --git a/src/learning/data.py b/src/learning/data.py index 8383032..8633fba 100644 --- a/src/learning/data.py +++ b/src/learning/data.py @@ -132,6 +132,7 @@ class ConfusionMatrix: self.matrix = conf_matrix self.classes = classes self.total = dataset_y.shape[0] + self.weights = np.sum(conf_matrix, axis=1) self.tp = np.diagonal(conf_matrix) self.fp = np.sum(conf_matrix, axis=0) - self.tp self.fn = np.sum(conf_matrix, axis=1) - self.tp @@ -152,33 +153,39 @@ class ConfusionMatrix: def recall_per_class(self) -> np.ndarray: return self.divide_ignore_zero(self.tp, self.tp + self.fn) + def specificity_per_class(self) -> np.ndarray: + return self.divide_ignore_zero(self.tn, self.tn + self.fp) + + def cohen_kappa_per_class(self) -> np.ndarray: + p_pl = (self.tp + self.fn) * (self.tp + self.fp) / (self.total ** 2) + p_ne = (self.tn + self.fp) * (self.tn + self.fn) / (self.total ** 2) + p = p_pl + p_ne + return (self.accuracy_per_class() - p) / (1 - p) + def f1_score_per_class(self) -> np.ndarray: prec = self.precision_per_class() rec = self.recall_per_class() return self.divide_ignore_zero(2 * prec * rec, prec + rec) - def specificity_per_class(self) -> np.ndarray: - return self.divide_ignore_zero(self.tn, self.tn + self.fp) - def accuracy(self) -> float: return self.tp.sum() / self.total def precision(self) -> float: precision_per_class = self.precision_per_class() - support = np.sum(self.matrix, axis=1) - return np.average(precision_per_class, weights=support) + return np.average(precision_per_class, weights=self.weights) def recall(self) -> float: recall_per_class = self.recall_per_class() - support = np.sum(self.matrix, axis=1) - return np.average(recall_per_class, weights=support) - - def f1_score(self) -> float: - f1_per_class = self.f1_score_per_class() - support = np.sum(self.matrix, axis=1) - return np.average(f1_per_class, weights=support) + return np.average(recall_per_class, weights=self.weights) def specificity(self) -> float: specificity_per_class = self.specificity_per_class() - support = np.sum(self.matrix, axis=1) - return np.average(specificity_per_class, weights=support) + return np.average(specificity_per_class, weights=self.weights) + + def f1_score(self) -> float: + f1_per_class = self.f1_score_per_class() + return np.average(f1_per_class, weights=self.weights) + + def cohen_kappa(self) -> float: + kappa_per_class = self.cohen_kappa_per_class() + return np.average(kappa_per_class, weights=self.weights) diff --git a/src/learning/ml.py b/src/learning/ml.py index 5899969..731a7c9 100644 --- a/src/learning/ml.py +++ b/src/learning/ml.py @@ -90,8 +90,9 @@ class MLAlgorithm(ABC): print(f"Accuracy : {conf.accuracy():0.5f} - classes {conf.accuracy_per_class()}") print(f"Precision : {conf.precision():0.5f} - classes {conf.precision_per_class()}") print(f"Recall : {conf.recall():0.5f} - classes {conf.recall_per_class()}") - print(f"F1 score : {conf.f1_score():0.5f} - classes {conf.f1_score_per_class()}") print(f"Specificity: {conf.specificity():0.5f} - classes {conf.specificity_per_class()}") + print(f"Cohen Kappa: {conf.cohen_kappa():0.5f} - classes {conf.cohen_kappa_per_class()}") + print(f"F1 score : {conf.f1_score():0.5f} - classes {conf.f1_score_per_class()}") def test_confusion_matrix(self) -> ConfusionMatrix: if self._target_type != TargetType.Classification\