diff --git a/neural-networks/bib.bib b/neural-networks/bib.bib index fa1cddc..2c12e9f 100644 --- a/neural-networks/bib.bib +++ b/neural-networks/bib.bib @@ -1,14 +1,84 @@ +% This file was created with JabRef 2.10. +% Encoding: UTF-8 + + +@Article{French1999, + Title = {Catastrophic forgetting in connectionist networks}, + Author = {French, Robert M}, + Journaltitle = {Trends in cognitive sciences}, + Year = {1999}, + Number = {4}, + Pages = {128--135}, + Volume = {3}, + + Owner = {jim}, + Timestamp = {2018.04.20} +} + +@Article{Kirkpatrick2017, + Title = {Overcoming catastrophic forgetting in neural networks}, + Author = {Kirkpatrick, James and Pascanu, Razvan and Rabinowitz, Neil and Veness, Joel and Desjardins, Guillaume and Rusu, Andrei A and Milan, Kieran and Quan, John and Ramalho, Tiago and Grabska-Barwinska, Agnieszka and others}, + Journaltitle = {Proceedings of the National Academy of Sciences}, + Year = {2017}, + Number = {13}, + Pages = {3521--3526}, + Volume = {114}, + + Owner = {jim}, + Timestamp = {2018.04.20} +} + @Book{Leunen:Scholars:92, - author = {M.-C. van Leunen}, - title = {A Handbook for Scholars}, - publisher = {Oxford University Press}, - year = 1992 + Title = {A Handbook for Scholars}, + Author = {M.-C. van Leunen}, + Year = {1992}, + Publisher = {Oxford University Press} +} + +@Inproceedings{Shmelkov2017, + Title = {Incremental Learning of Object Detectors without Catastrophic Forgetting}, + Author = {Shmelkov, Konstantin and Schmid, Cordelia and Alahari, Karteek}, + Booktitle = {ICCV}, + Date = {2017-10-22/2017-10-29}, + Year = {2017}, + Publisher = {IEEE}, + + Journaltitle = {arXiv preprint arXiv:1708.06977}, + Owner = {jim}, + Timestamp = {2018.04.20} } @Misc{Taylor:SIGuide:95, - author = {B. N. Taylor}, - title = {Guide for the Use of the International System of Units (SI)}, - howpublished = {NIST Special Publication 811}, - year = 1995, - note = {\url{http://physics.nist.gov/Document/sp811.pdf}} + Title = {Guide for the Use of the International System of Units (SI)}, + Author = {B. N. Taylor}, + Year = {1995}, + HowPublished = {NIST Special Publication 811}, + Note = {\url{http://physics.nist.gov/Document/sp811.pdf}} } + +@Inbook{Toutounji2016, + Title = {Autonomous Learning Needs a Second Environmental Feedback Loop}, + Author = {Toutounji, Hazem and Pasemann, Frank}, + Booktitle = {Computational Intelligence}, + Year = {2016}, + Pages = {455--472}, + Publisher = {Springer}, + + Owner = {jim}, + Timestamp = {2018.04.20} +} + +@Article{Velez2017, + Title = {Diffusion-based neuromodulation can eliminate catastrophic forgetting in simple neural networks}, + Author = {Velez, Roby and Clune, Jeff}, + Journaltitle = {PloS one}, + Year = {2017}, + Editor = {Irene SendiƱa-Nadal}, + Number = {11}, + Pages = {e0187736}, + Volume = {12}, + + Owner = {jim}, + Timestamp = {2018.04.20} +} + diff --git a/neural-networks/outline.tex b/neural-networks/outline.tex index 2d5f67c..0cf36cb 100644 --- a/neural-networks/outline.tex +++ b/neural-networks/outline.tex @@ -21,7 +21,7 @@ % \selectlanguage{german} % If the thesis is written in English: -\usepackage[english]{babel} +\usepackage[spanish,english]{babel} \selectlanguage{english} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -43,6 +43,7 @@ \usepackage{fancybox} % Gleichungen einrahmen %\usepackage{fancyhdr} % Packet for nicer headers \usepackage[automark]{scrlayer-scrpage} +\usepackage[hidelinks]{hyperref}\urlstyle{rm} %\usepackage{fancyheadings} % Nicer numbering of headlines %\usepackage[outer=3.35cm]{geometry} % Type area (size, margins...) !!!Release version @@ -67,7 +68,9 @@ \usepackage[ backend=biber, bibstyle=ieee, -citestyle=ieee +citestyle=ieee, +minnames=1, +maxnames=2 ]{biblatex} \addbibresource{bib.bib} @@ -201,15 +204,67 @@ citestyle=ieee \section{Introduction} \label{sec:introduction} -Your text here... +Autonomous robots need to adapt to new situations. They have a need to learn +for an entire life. In order to do this they need a second environmental feedback +loop that tells them when to learn.\cite{Toutounji2016} -\cite{Leunen:Scholars:92} -\cite{Taylor:SIGuide:95} +The learning poses another problem as well. The previously learned weights +are usually largely forgotten, which is known as catastrophic forgetting.\cite{French1999} + +Since catastrophic forgetting is a key problem for autonomous learning, it is +crucial to overcome it. In this paper I will present some approaches for +learning in an autonomous setup to analyse which of them if any can overcome +catastrophic forgetting. Attempts to overcome that were made by +Kirkpatrick\cite{Kirkpatrick2017}, Velez\cite{Velez2017} and Shmelkov\cite{Shmelkov2017}. + +\section{Neuromodulation} +\label{sec:neuromodulation} + +Neuromodulation is a way to implement the second environmental feedback loop. +A Modulated Neural Network (MNN) contains neuromodulator cells (NMC), which are +attached to carrier neurons with a modulatory subnetwork (MSN). + +Neuromodulation can also be done based upon Diffusion. + +\section{Plasticity} +\label{sec:plasticity} + +Plasticity can be realized by various approaches. Here three approaches are +presented, Modulated Random Search, Modulated Gaussian Walk and Diffusion based +neuromodulation. + +\subsection{Modulated Random Search} +\label{subsec:mrs} + +Does things. + +\subsection{Modulated Gaussian Walk} +\label{subsec:mgw} + +Does things more efficient. + +\subsection{Localized learning} +\label{subsec:diffusion} + +Velez describe another approach that employs +modularity for the learning. Essentially this results in task-specific localized +learning and functional modules for each subtask. + +\section{Comparison regarding catastrophic forgetting} +\label{sec:comparison} + +Modulated Random Search is not at all useful for overcoming catastrophic forgetting. +Modulated Gaussian Walk improves to that end. +Localized learning overcomes catastrophic forgetting for small +networks. \section{Conclusion} \label{sec:concl} -Your text here... +A second environmental feedback loop is important to tell autonomous systems +when to learn. But the method to learn is important as well to be any use +in a practical environment. A comparison has shown that localized learning +can overcome catastrophic forgetting for small networks. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % hier werden - zum Ende des Textes - die bibliographischen Referenzen