diff --git a/tests/main.py b/tests/main.py new file mode 100644 index 0000000..1709913 --- /dev/null +++ b/tests/main.py @@ -0,0 +1,8 @@ +from test_metrics import run_test_metrics + +def main(): + """ Runs generation and analysis steps back-to-back. """ + run_test_metrics() + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/tests/test_metrics.py b/tests/test_metrics.py new file mode 100644 index 0000000..c3c2b38 --- /dev/null +++ b/tests/test_metrics.py @@ -0,0 +1,37 @@ +""" +This module contains test functions for calibration metrics. + +The main test function generates synthetic data and tests the CalibrationMetrics class. +""" + +import numpy as np +from calzone.metrics import CalibrationMetrics + +def run_test_metrics(): + """ + Test function for calibration metrics. + + Generates synthetic binary classification data and tests the CalibrationMetrics class. + The test includes: + - Generating random binary labels using binomial distribution + - Creating probability predictions using beta distribution + - Testing the CalibrationMetrics class with the generated data + Notice that the test is not exhaustive and doesn't test for correctness of the metrics. + Returns: + None + + Raises: + AssertionError: If the results from CalibrationMetrics are not in dictionary format + """ + # Generate sample data + np.random.seed(123) + n_samples = 1000 + y_true = np.random.binomial(1, 0.3, n_samples) + y_proba = np.zeros((n_samples,2)) + y_proba[:, 1] = np.random.beta(0.5, 0.5, (n_samples, )) + y_proba[:, 0] = 1 - y_proba[:, 1] # Ensure probabilities sum to 1 + + # Test CalibrationMetrics class + metrics = CalibrationMetrics(class_to_calculate=1) + results = metrics.calculate_metrics(y_true, y_proba, metrics='all') + assert isinstance(results, dict) \ No newline at end of file