From 1b05527bc8ce8ed3c1e96cb98a6c6054d438c470 Mon Sep 17 00:00:00 2001 From: jasonfan1997 Date: Tue, 17 Dec 2024 10:24:52 -0500 Subject: [PATCH] Fixing bib --- paper/paper.bib | 88 ++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 87 insertions(+), 1 deletion(-) diff --git a/paper/paper.bib b/paper/paper.bib index 3a7661c..59284c1 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -280,4 +280,90 @@ @article{gu_likelihod_ratio eprint = {https://academic.oup.com/biostatistics/article-pdf/12/1/87/17739283/kxq045.pdf} } -@InProceedings{guo_calibration, \ No newline at end of file +@InProceedings{guo_calibration, + title = {On Calibration of Modern Neural Networks}, + author = {Chuan Guo and Geoff Pleiss and Yu Sun and Kilian Q. Weinberger}, + booktitle = {Proceedings of the 34th International Conference on Machine Learning}, + pages = {1321--1330}, + year = {2017}, + editor = {Precup, Doina and Teh, Yee Whye}, + volume = {70}, + series = {Proceedings of Machine Learning Research}, + month = {06--11 Aug}, + publisher = {PMLR}, + pdf = {http://proceedings.mlr.press/v70/guo17a/guo17a.pdf}, + url = {https://proceedings.mlr.press/v70/guo17a.html}, + abstract = {Confidence calibration – the problem of predicting probability estimates representative of the true correctness likelihood – is important for classification models in many applications. We discover that modern neural networks, unlike those from a decade ago, are poorly calibrated. Through extensive experiments, we observe that depth, width, weight decay, and Batch Normalization are important factors influencing calibration. We evaluate the performance of various post-processing calibration methods on state-of-the-art architectures with image and document classification datasets. Our analysis and experiments not only offer insights into neural network learning, but also provide a simple and straightforward recipe for practical settings: on most datasets, temperature scaling – a single-parameter variant of Platt Scaling – is surprisingly effective at calibrating predictions.} +} + +@article{hl_test, + author = {David W. Hosmer and Stanley Lemesbow}, + title = {Goodness of fit tests for the multiple logistic regression model}, + journal = {Communications in Statistics - Theory and Methods}, + volume = {9}, + number = {10}, + pages = {1043--1069}, + year = {1980}, + publisher = {Taylor \& Francis}, + doi = {10.1080/03610928008827941}, + url = {https://www.tandfonline.com/doi/abs/10.1080/03610928008827941}, + eprint = {https://www.tandfonline.com/doi/pdf/10.1080/03610928008827941} +} + +@article{huang_tutorial, + author = {Huang, Yingxiang and Li, Wentao and Macheret, Fima and Gabriel, Rodney A and Ohno-Machado, Lucila}, + title = {A tutorial on calibration measurements and calibration models for clinical prediction models}, + journal = {Journal of the American Medical Informatics Association}, + volume = {27}, + number = {4}, + pages = {621-633}, + year = {2020}, + month = {02}, + abstract = {Our primary objective is to provide the clinical informatics community with an introductory tutorial on calibration measurements and calibration models for predictive models using existing R packages and custom implemented code in R on real and simulated data. Clinical predictive model performance is commonly published based on discrimination measures, but use of models for individualized predictions requires adequate model calibration. This tutorial is intended for clinical researchers who want to evaluate predictive models in terms of their applicability to a particular population. It is also for informaticians and for software engineers who want to understand the role that calibration plays in the evaluation of a clinical predictive model, and to provide them with a solid starting point to consider incorporating calibration evaluation and calibration models in their work.Covered topics include (1) an introduction to the importance of calibration in the clinical setting, (2) an illustration of the distinct roles that discrimination and calibration play in the assessment of clinical predictive models, (3) a tutorial and demonstration of selected calibration measurements, (4) a tutorial and demonstration of selected calibration models, and (5) a brief discussion of limitations of these methods and practical suggestions on how to use them in practice.}, + issn = {1527-974X}, + doi = {10.1093/jamia/ocz228}, + url = {https://doi.org/10.1093/jamia/ocz228}, + eprint = {https://academic.oup.com/jamia/article-pdf/27/4/621/34153143/ocz228.pdf} +} + +@inproceedings{nixon_ace, + title = {Measuring Calibration in Deep Learning.}, + author = {Nixon, Jeremy and Dusenberry, Michael W and Zhang, Linchuan and Jerfel, Ghassen and Tran, Dustin}, + booktitle = {CVPR workshops}, + volume = {2}, + number = {7}, + year = {2019} +} + +@article{spiegelhalter_z, + title = {Probabilistic prediction in patient management and clinical trials}, + author = {Spiegelhalter, David J}, + journal = {Statistics in medicine}, + volume = {5}, + number = {5}, + pages = {421--433}, + year = {1986}, + publisher = {Wiley Online Library} +} + +@inproceedings{prevalence_shift, + author = {Tian, Junjiao and Liu, Yen-Cheng and Glaser, Nathaniel and Hsu, Yen-Chang and Kira, Zsolt}, + booktitle = {Advances in Neural Information Processing Systems}, + editor = {H. Larochelle and M. Ranzato and R. Hadsell and M.F. Balcan and H. Lin}, + pages = {8101--8113}, + publisher = {Curran Associates, Inc.}, + title = {Posterior Re-calibration for Imbalanced Datasets}, + url = {https://proceedings.neurips.cc/paper_files/paper/2020/file/5ca359ab1e9e3b9c478459944a2d9ca5-Paper.pdf}, + volume = {33}, + year = {2020} +} + +@article{Walsh_overview, + title = {Beyond discrimination: a comparison of calibration methods and clinical usefulness of predictive models of readmission risk}, + author = {Walsh, Colin G and Sharman, Kavya and Hripcsak, George}, + journal = {Journal of biomedical informatics}, + volume = {76}, + pages = {9--18}, + year = {2017}, + publisher = {Elsevier} +} \ No newline at end of file