Skip to content

Commit

Permalink
Remove tests that facilitated refactor + tag patch release (#73)
Browse files Browse the repository at this point in the history
* Replace evaluation_metrics_row with refactored_evaluation_metrics_row

* Move metrics to correct file

* remove tests

* update docs

* unlint

* fix missing nothing
  • Loading branch information
hannahilea authored May 16, 2022
1 parent ea55ce0 commit 4a6390a
Show file tree
Hide file tree
Showing 7 changed files with 446 additions and 596 deletions.
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "Lighthouse"
uuid = "ac2c24cd-07f0-4848-96b2-1b82c3ea0e59"
authors = ["Beacon Biosignals, Inc."]
version = "0.14.6"
version = "0.14.7"

[deps]
ArrowTypes = "31f734f8-188a-4ce0-8406-c8a06bd891cd"
Expand Down
7 changes: 3 additions & 4 deletions docs/src/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,6 @@ Lighthouse.is_early_stopping_exception
learn!
evaluate!
predict!
Lighthouse._calculate_ea_kappas
Lighthouse._calculate_ira_kappas
Lighthouse._calculate_spearman_correlation
```

## The logging interface
Expand Down Expand Up @@ -85,8 +82,10 @@ get_hardened_metrics_multiclass
LabelMetricsRow
get_label_metrics_multirater
get_label_metrics_multirater_multiclass
Lighthouse.refactored_evaluation_metrics_row
Lighthouse._evaluation_row
Lighthouse._calculate_ea_kappas
Lighthouse._calculate_ira_kappas
Lighthouse._calculate_spearman_correlation
```

## Utilities
Expand Down
524 changes: 0 additions & 524 deletions src/learn.jl

Large diffs are not rendered by default.

419 changes: 388 additions & 31 deletions src/metrics.jl

Large diffs are not rendered by default.

52 changes: 52 additions & 0 deletions src/plotting.jl
Original file line number Diff line number Diff line change
Expand Up @@ -493,3 +493,55 @@ plot_roc_curves(args...; kw...) = axisplot(plot_roc_curves!, args; kw...)
annotation_text_size=20)
"""
plot_kappas(args...; kw...) = axisplot(plot_kappas!, args; kw...)

#####
##### Deprecation support
#####

"""
evaluation_metrics_plot(predicted_hard_labels::AbstractVector,
predicted_soft_labels::AbstractMatrix,
elected_hard_labels::AbstractVector,
classes,
thresholds=0.0:0.01:1.0;
votes::Union{Nothing,AbstractMatrix}=nothing,
strata::Union{Nothing,AbstractVector{Set{T}} where T}=nothing,
optimal_threshold_class::Union{Nothing,Integer}=nothing)
Return a plot and dictionary containing a battery of classifier performance
metrics that each compare `predicted_soft_labels` and/or `predicted_hard_labels`
agaist `elected_hard_labels`.
See [`evaluation_metrics`](@ref) for a description of the arguments.
This method is deprecated in favor of calling `evaluation_metrics`
and [`evaluation_metrics_plot`](@ref) separately.
"""
function evaluation_metrics_plot(predicted_hard_labels::AbstractVector,
predicted_soft_labels::AbstractMatrix,
elected_hard_labels::AbstractVector, classes, thresholds;
votes::Union{Nothing,AbstractMatrix}=nothing,
strata::Union{Nothing,AbstractVector{Set{T}} where T}=nothing,
optimal_threshold_class::Union{Nothing,Integer}=nothing)
Base.depwarn("""
```
evaluation_metrics_plot(predicted_hard_labels::AbstractVector,
predicted_soft_labels::AbstractMatrix,
elected_hard_labels::AbstractVector, classes, thresholds;
votes::Union{Nothing,AbstractMatrix}=nothing,
strata::Union{Nothing,AbstractVector{Set{T}} where T}=nothing,
optimal_threshold_class::Union{Nothing,Integer}=nothing)
```
has been deprecated in favor of
```
plot_dict = evaluation_metrics(predicted_hard_labels, predicted_soft_labels,
elected_hard_labels, classes, thresholds;
votes, strata, optimal_threshold_class)
(evaluation_metrics_plot(plot_dict), plot_dict)
```
""", :evaluation_metrics_plot)
plot_dict = evaluation_metrics(predicted_hard_labels, predicted_soft_labels,
elected_hard_labels, classes, thresholds; votes, strata,
optimal_threshold_class)
return evaluation_metrics_plot(plot_dict), plot_dict
end
2 changes: 2 additions & 0 deletions src/utilities.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
##### miscellaneous
#####

has_value(x) = !isnothing(x) && !ismissing(x)

function increment_at!(array, index_lists)
for index_list in index_lists
array[index_list...] += 1
Expand Down
36 changes: 0 additions & 36 deletions test/learn.jl
Original file line number Diff line number Diff line change
Expand Up @@ -22,28 +22,6 @@ function Lighthouse.loss_and_prediction(c::TestClassifier, dummy_input_batch)
return c.dummy_loss, dummy_soft_label_batch
end

function evaluation_refactor_test(predicted_hard_labels, predicted_soft_labels,
elected_hard_labels, classes,
thresholds=0.0:0.01:1.0; votes=nothing,
strata=nothing, optimal_threshold_class=missing)
orig_row = Lighthouse.evaluation_metrics_row(predicted_hard_labels,
predicted_soft_labels,
elected_hard_labels, classes, thresholds;
votes, strata, optimal_threshold_class)
new_row = Lighthouse.refactored_evaluation_metrics_row(predicted_hard_labels,
predicted_soft_labels,
elected_hard_labels, classes,
thresholds;
votes, strata,
optimal_threshold_class)
orig_nt = NamedTuple(orig_row)
new_nt = NamedTuple(new_row)
for k in keys(orig_nt)
@test isequal(orig_nt[k], new_nt[k])
end
return nothing
end

@testset "Multi-class learn!(::TestModel, ...)" begin
mktempdir() do tmpdir
model = TestClassifier(1000000.0, ["class_$i" for i in 1:5])
Expand Down Expand Up @@ -118,8 +96,6 @@ end
eachrow(votes))
evaluate!(predicted_hard, predicted_soft, elected_hard, model.classes, logger;
logger_prefix="wheeeeeee", logger_suffix="_for_all_time", votes)
evaluation_refactor_test(predicted_hard, predicted_soft, elected_hard,
model.classes; votes)
@test length(logger.logged["wheeeeeee/time_in_seconds_for_all_time"]) == 1
@test length(logger.logged["wheeeeeee/metrics_for_all_time"]) == 1

Expand All @@ -132,9 +108,6 @@ end
for i in 1:size(votes, 1)]
plot_data = evaluation_metrics(predicted_hard, predicted_soft, elected_hard,
model.classes, 0.0:0.01:1.0; votes, strata)
evaluation_refactor_test(predicted_hard, predicted_soft, elected_hard,
model.classes,
0.0:0.01:1.0; votes, strata)
@test haskey(plot_data, "stratified_kappas")
plot = evaluation_metrics_plot(plot_data)

Expand Down Expand Up @@ -271,18 +244,13 @@ end

evaluate!(predicted_hard, predicted_soft, elected_hard, model.classes, logger;
logger_prefix="wheeeeeee", logger_suffix="_for_all_time", votes=nothing)
evaluation_refactor_test(predicted_hard, predicted_soft, elected_hard,
model.classes;
votes=nothing)
plot_data = last(logger.logged["wheeeeeee/metrics_for_all_time"])
@test !haskey(plot_data, "per_class_IRA_kappas")
@test !haskey(plot_data, "multiclass_IRA_kappas")
test_evaluation_metrics_roundtrip(plot_data)

evaluate!(predicted_hard, predicted_soft, elected_hard, model.classes, logger;
logger_prefix="wheeeeeee", logger_suffix="_for_all_time", votes=votes)
evaluation_refactor_test(predicted_hard, predicted_soft, elected_hard,
model.classes; votes)
plot_data = last(logger.logged["wheeeeeee/metrics_for_all_time"])
@test haskey(plot_data, "per_class_IRA_kappas")
@test haskey(plot_data, "multiclass_IRA_kappas")
Expand All @@ -292,14 +260,10 @@ end
evaluate!(predicted_hard, predicted_soft, elected_hard, model.classes, logger;
logger_prefix="wheeeeeee", logger_suffix="_for_all_time", votes=votes,
optimal_threshold_class=1)
evaluation_refactor_test(predicted_hard, predicted_soft, elected_hard,
model.classes; votes, optimal_threshold_class=1)
plot_data_1 = last(logger.logged["wheeeeeee/metrics_for_all_time"])
evaluate!(predicted_hard, predicted_soft, elected_hard, model.classes, logger;
logger_prefix="wheeeeeee", logger_suffix="_for_all_time", votes=votes,
optimal_threshold_class=2)
evaluation_refactor_test(predicted_hard, predicted_soft, elected_hard,
model.classes; votes, optimal_threshold_class=2)
plot_data_2 = last(logger.logged["wheeeeeee/metrics_for_all_time"])
test_evaluation_metrics_roundtrip(plot_data_2)

Expand Down

2 comments on commit 4a6390a

@hannahilea
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@JuliaRegistrator
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Registration pull request created: JuliaRegistries/General/60349

After the above pull request is merged, it is recommended that a tag is created on this repository for the registered package version.

This will be done automatically if the Julia TagBot GitHub Action is installed, or can be done manually through the github interface, or via:

git tag -a v0.14.7 -m "<description of version>" 4a6390a7093fc8efca54115d2de91bf07ec1e383
git push origin v0.14.7

Please sign in to comment.