From f4ae75bb66a9cf1eec78034178be4e9f78563e7b Mon Sep 17 00:00:00 2001 From: Jim Martens Date: Mon, 3 Jun 2019 12:32:28 +0200 Subject: [PATCH] Added missing literature to bibtex file Signed-off-by: Jim Martens --- ma.bib | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/ma.bib b/ma.bib index 7a5d784..31befe2 100644 --- a/ma.bib +++ b/ma.bib @@ -652,4 +652,49 @@ to construct explicit models for non-normal classes. Application includes infere timestamp = {2019.03.07}, } +@Article{Miller2018a, + author = {Dimity Miller and Feras Dayoub and Michael Milford and Niko Sünderhauf}, + title = {Evaluating Merging Strategies for Sampling-based Uncertainty Techniques in Object Detection}, + journal = {arXiv preprint}, + date = {2018-09-17}, + eprint = {1809.06006v3}, + eprintclass = {cs.CV}, + eprinttype = {arXiv}, + abstract = {There has been a recent emergence of sampling-based techniques for estimating epistemic uncertainty in deep neural networks. While these methods can be applied to classification or semantic segmentation tasks by simply averaging samples, this is not the case for object detection, where detection sample bounding boxes must be accurately associated and merged. A weak merging strategy can significantly degrade the performance of the detector and yield an unreliable uncertainty measure. This paper provides the first in-depth investigation of the effect of different association and merging strategies. We compare different combinations of three spatial and two semantic affinity measures with four clustering methods for MC Dropout with a Single Shot Multi-Box Detector. Our results show that the correct choice of affinity-clustering combination can greatly improve the effectiveness of the classification and spatial uncertainty estimation and the resulting object detection performance. We base our evaluation on a new mix of datasets that emulate near open-set conditions (semantically similar unknown classes), distant open-set conditions (semantically dissimilar unknown classes) and the common closed-set conditions (only known classes).}, + file = {:/home/jim/Documents/Studium/MA/Literatur/42_evaluating-merging-strategies-sampling-based-uncertainty-techniques-object-detection.pdf:PDF}, + keywords = {cs.CV}, + owner = {jim}, + timestamp = {2019.06.03}, +} + +@InCollection{Chen2017, + author = {Jinghui Chen and Saket Sathe and Charu Aggarwal and Deepak Turaga}, + title = {Outlier Detection with Autoencoder Ensembles}, + booktitle = {Proceedings of the 2017 {SIAM} International Conference on Data Mining}, + year = {2017}, + publisher = {Society for Industrial and Applied Mathematics}, + pages = {90--98}, + doi = {10.1137/1.9781611974973.11}, + abstract = {In this paper, we introduce autoencoder ensembles for unsupervised outlier detection. One problem with neural networks is that they are sensitive to noise and often require large data sets to work robustly, while increasing data size makes them slow. As a result, there are only a few existing works in the literature on the use of neural networks in outlier detection. This paper shows that neural networks can be a very competitive technique to other existing methods. The basic idea is to randomly vary on the connectivity architecture of the autoencoder to obtain significantly better performance. Furthermore, we combine this technique with an adaptive sampling method to make our approach more efficient and effective. Experimental results comparing the proposed approach with state-of-the-art detectors are presented on several benchmark data sets showing the accuracy of our approach.}, + file = {:/home/jim/Documents/Studium/MA/Literatur/44_Autoencoder-ensembles.pdf:PDF}, + owner = {jim}, + timestamp = {2019.06.03}, +} + +@Article{Kazantsev2015, + author = {Pavel Aleksandrovich Kazantsev and Pavel Vyacheslavovich Skribtsov}, + title = {Pedestrian Detection in {RGB}-D Data Using Deep Autoencoders}, + journal = {American Journal of Applied Sciences}, + year = {2015}, + volume = {12}, + number = {11}, + pages = {847--856}, + doi = {10.3844/ajassp.2015.847.856}, + abstract = {Recent popularity of RGB-D sensors mostly comes from the fact that RGB-images and depth maps supplement each other in machine vision tasks, such as object detection and recognition. This article addresses a problem of RGB and depth data fusion for pedestrian detection. We propose pedestrian detection algorithm that involves fusion of outputs of 2D- and 3D-detectors based on deep autoencoders. Outputs are fused with neural network classifier trained using a dataset which entries are represented by pairs of reconstruction errors of 2D- and 3D-autoencoders. Experimental results show that fusing outputs almost totally eliminate false accepts (precision is 99.8%) and brings recall to 93.2% when tested on the combined dataset that includes a lot of samples with significantly distorted human silhouette. Though we use walking pedestrians as objects of interest, there are few pedestrian-specific processing blocks in this algorithm, so, in general, it can be applied to any type of objects.}, + file = {:/home/jim/Documents/Studium/MA/Literatur/45_pedestrian-detection-in-rgbd-using-autoencoders.pdf:PDF}, + owner = {jim}, + publisher = {Science Publications}, + timestamp = {2019.06.03}, +} + @Comment{jabref-meta: databaseType:biblatex;}