@inproceedings{37fa14d4e4a4499d85b203b06a8b52df,
title = "Information-theoretic analysis of stability and bias of learning algorithms",
abstract = "Machine learning algorithms can be viewed as stochastic transformations that map training data to hypotheses. Following Bousquet and Elisseeff, we say that such an algorithm is stable if its output does not depend too much on any individual training example. Since stability is closely connected to generalization capabilities of learning algorithms, it is of theoretical and practical interest to obtain sharp quantitative estimates on the generalization bias of machine learning algorithms in terms of their stability properties. We propose several information-theoretic measures of algorithmic stability and use them to upper-bound the generalization bias of learning algorithms. Our framework is complementary to the information-theoretic methodology developed recently by Russo and Zou.",
author = "Maxim Raginsky and Alexander Rakhlin and Tsao Matthew and Yihong Wu and Xu Aolin",
note = "Publisher Copyright: {\textcopyright} 2016 IEEE.; 2016 IEEE Information Theory Workshop, ITW 2016 ; Conference date: 11-09-2016 Through 14-09-2016",
year = "2016",
month = oct,
day = "21",
doi = "10.1109/ITW.2016.7606789",
language = "English (US)",
series = "2016 IEEE Information Theory Workshop, ITW 2016",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "26--30",
booktitle = "2016 IEEE Information Theory Workshop, ITW 2016",
address = "United States",
}