Second pass over thesis
- added more glossary terms - added crucial information - improved language Signed-off-by: Jim Martens <github@2martens.de>
This commit is contained in:
25
glossary.tex
25
glossary.tex
@ -1,4 +1,8 @@
|
||||
% acronyms
|
||||
\newacronym{MC}{MC}{Monte Carlo}
|
||||
\newacronym{MCDO}{MCDO}{Monte Carlo Dropout}
|
||||
\newacronym{MCBN}{MCBN}{Monte Carlo Batch Normalisation}
|
||||
\newacronym{MLP}{MLP}{multilayer perceptron}
|
||||
\newacronym{NMS}{NMS}{non-maximum suppression}
|
||||
\newacronym{OSE}{OSE}{open set error}
|
||||
\newacronym{SSD}{SSD}{Single Shot MultiBox Detector}
|
||||
@ -17,6 +21,19 @@
|
||||
is a deep learning framework written in C++
|
||||
}
|
||||
}
|
||||
\newglossaryentry{CCTV}{
|
||||
name={CCTV},
|
||||
description={
|
||||
stands for closed-circuit television or video surveillance
|
||||
}
|
||||
}
|
||||
\newglossaryentry{Dirichlet distribution}{
|
||||
name={Dirichlet distribution},
|
||||
description={
|
||||
is named after Peter Dirichlet and a family of probability
|
||||
distributions
|
||||
}
|
||||
}
|
||||
\newglossaryentry{entropy}{
|
||||
name={entropy},
|
||||
description={
|
||||
@ -24,6 +41,14 @@
|
||||
events have a lower entropy than rare events. In case of classification probabilities, uniform predictions contain more information than predictions with a clear "winner"
|
||||
}
|
||||
}
|
||||
\newglossaryentry{Hopfield network}{
|
||||
name={Hopfield network},
|
||||
description={
|
||||
is a recurrent neural network. Used as "associative" memory
|
||||
systems with binary thresholds. Guaranteed to converge to local
|
||||
minimum, this can be the wrong one though
|
||||
}
|
||||
}
|
||||
\newglossaryentry{posterior}{
|
||||
name={posterior},
|
||||
description={
|
||||
|
||||
Reference in New Issue
Block a user