hyperimpute.utils.tester module

class Eval(metric: str = 'aucroc')

Bases: object

Helper class for evaluating the performance of the models.

Parameters:

metric – str, default=”aucroc” The type of metric to use for evaluation. Potential values: [“aucprc”, “aucroc”].

average_precision_score(y_test: ndarray, y_pred_proba: ndarray) float
get_metric() str
roc_auc_score(y_test: ndarray, y_pred_proba: ndarray) float
score_proba(y_test: ndarray, y_pred_proba: ndarray) float
evaluate_estimator(estimator: Any, X: DataFrame, Y: Series, n_folds: int = 3, metric: str = 'aucroc', seed: int = 0, pretrained: bool = False, *args: Any, **kwargs: Any) Dict
evaluate_regression(estimator: Any, X: DataFrame, Y: Series, n_folds: int = 3, seed: int = 0, *args: Any, **kwargs: Any) Dict
score_classification_model(estimator: Any, X_train: DataFrame, X_test: Series, y_train: DataFrame, y_test: Series) float