masterthesis-latex/glossary.tex

71 lines
2.0 KiB
TeX
Raw Permalink Normal View History

% acronyms
\newacronym{MC}{MC}{Monte Carlo}
\newacronym{MCDO}{MCDO}{Monte Carlo Dropout}
\newacronym{MCBN}{MCBN}{Monte Carlo Batch Normalisation}
\newacronym{MLP}{MLP}{multilayer perceptron}
\newacronym{NMS}{NMS}{non-maximum suppression}
\newacronym{OSE}{OSE}{open set error}
\newacronym{SSD}{SSD}{Single Shot MultiBox Detector}
\newacronym{pdf}{pdf}{probabilistic density function}
% terms
\newglossaryentry{BGR}{
name={BGR},
description={
stands for the three colour channels blue, green, and red in this order
}
}
\newglossaryentry{Caffe}{
name={Caffe},
description={
is a deep learning framework written in C++
}
}
\newglossaryentry{CCTV}{
name={CCTV},
description={
stands for closed-circuit television or video surveillance
}
}
\newglossaryentry{Dirichlet distribution}{
name={Dirichlet distribution},
description={
is named after Peter Dirichlet and a family of probability
distributions
}
}
\newglossaryentry{entropy}{
name={entropy},
description={
describes the amount of information provided by something. More likely
events have a lower entropy than rare events. In case of classification probabilities, uniform predictions contain more information than predictions with a clear "winner"
}
}
\newglossaryentry{Hopfield network}{
name={Hopfield network},
description={
is a recurrent neural network. Used as "associative" memory
systems with binary thresholds. Guaranteed to converge to local
minimum, this can be the wrong one though
}
}
\newglossaryentry{posterior}{
name={posterior},
description={
probability output of a neural network
}
}
\newglossaryentry{RGB}{
name={RGB},
description={
stands for the three colour channels red, green, and blue in this order
}
}
\newglossaryentry{vanilla}
{
name={vanilla},
description={
is used to describe the original state of something
}
}