diff --git a/ma.bib b/ma.bib index aadc30d..973d1f6 100644 --- a/ma.bib +++ b/ma.bib @@ -46,7 +46,6 @@ general idea: robot drives in environment, autoencoder says it's novel, robot sw journal = {arXiv e-prints}, year = {2015}, date = {2015-05-21}, - eid = {arXiv:1505.05424v2}, eprint = {1505.05424v2}, eprintclass = {stat.ML}, eprinttype = {arXiv}, @@ -86,7 +85,6 @@ uncertainty in weights - all weights represented by probability distributions ov series = {Proceedings of Machine Learning Research}, publisher = {PMLR}, pages = {1050--1059}, - url = {http://proceedings.mlr.press/v48/gal16.html}, abstract = {Deep learning tools have gained tremendous attention in applied machine learning. However such tools for regression and classification do not capture model uncertainty. In comparison, Bayesian models offer a mathematically grounded framework to reason about model uncertainty, but usually come with a prohibitive computational cost. In this paper we develop a new theoretical framework casting dropout training in deep neural networks (NNs) as approximate Bayesian inference in deep Gaussian processes. A direct result of this theory gives us tools to model uncertainty with dropout NNs – extracting information from existing models that has been thrown away so far. This mitigates the problem of representing uncertainty in deep learning without sacrificing either computational complexity or test accuracy. We perform an extensive study of the properties of dropout’s uncertainty. Various network architectures and non-linearities are assessed on tasks of regression and classification, using MNIST as an example. We show a considerable improvement in predictive log-likelihood and RMSE compared to existing state-of-the-art methods, and finish by using dropout’s uncertainty in deep reinforcement learning. dropout as bayesian approximation}, @@ -197,7 +195,6 @@ and 17.0% which is considerably better than the previous state-of-the-art. The n title = {Bias-Reduced Uncertainty Estimation for Deep Neural Classifiers}, journal = {arXiv e-prints}, date = {2018-09-30}, - eid = {arXiv:1805.08206v3}, eprint = {1805.08206v3}, eprintclass = {cs.LG}, eprinttype = {arXiv}, @@ -214,7 +211,6 @@ and 17.0% which is considerably better than the previous state-of-the-art. The n title = {Evaluating Bayesian Deep Learning Methods for Semantic Segmentation}, journal = {arXiv e-prints}, date = {2018-11-30}, - eid = {arXiv:1811.12709v1}, eprint = {1811.12709v1}, eprintclass = {cs.CV}, eprinttype = {arXiv}, @@ -230,7 +226,6 @@ and 17.0% which is considerably better than the previous state-of-the-art. The n title = {Closed Form Variational Objectives For Bayesian Neural Networks with a Single Hidden Layer}, journal = {arXiv e-prints}, date = {2018-12-02}, - eid = {arXiv:1811.00686v2}, eprint = {1811.00686v2}, eprintclass = {stat.ML}, eprinttype = {arXiv},