uni/sem/sem.bib

624 lines
48 KiB
BibTeX
Executable File

% This file was created with JabRef 2.10b2.
% Encoding: UTF8
@Inproceedings{Alshamsi2005,
Title = {A {T}echnical {C}omparison of {IPSec} and {SSL}},
Author = {Alshamsi, AbdelNasir and Saito, Takamichi},
Booktitle = {19th International Conference on Advanced Information Networking and Applications},
Date = {2005-03-28/2005-03-30},
Organization = {IEEE},
Pages = {395--398},
Volume = {2},
Owner = {jim},
Timestamp = {2014.10.24}
}
@Techreport{RFC1825,
Title = {Security {Architecture} for the {Internet} {Protocol}},
Author = {Atkinson, R.},
Institution = {IETF},
Year = {1995},
Type = {RFC},
Organization = {BBN Technologies},
Owner = {jim},
Timestamp = {2014.12.13}
}
@Techreport{RFC2401,
Title = {Security {Architecture} for the {Internet} {Protocol}},
Author = {Atkinson, R. and Kent, S.},
Institution = {IETF},
Year = {1998},
Type = {RFC},
Organization = {BBN Technologies},
Owner = {jim},
Timestamp = {2014.12.13}
}
@Inproceedings{Berger2006,
Title = {Analysis of {C}urrent {VPN} {T}echnologies},
Author = {Berger, Thomas},
Booktitle = {Proceedings of the First International Conference of Availability, Reliability and Security},
Year = {2006},
Month = {Apr},
Publisher = {IEEE},
Abstract = {This paper deals with current Virtual Private Network (VPN) technologies, such as Internet Protocol Security (IPSec), Layer Two Tunneling Protocol (L2TP), and Point to Point Tunneling Protocol (PPTP). Furthermore, the VPN solution of the Austrian company phion Information Technologies is presented. After a short introduction to the basics of each protocol, the strengths and weaknesses of each technology are outlined, as far as interoperability, manageability, and practical problems is concerned. This is done by means of a practical analysis and comparison of the results. The analysis includes performance measurement, link quality and stability analysis, feature comparison, interaction with TCP/IP protocols, and some basic security attacks. In order to provide comparable results, all technologies were tested in the same manner. However, this paper does not provide explicit recommendations which technology is to be preferred.},
Owner = {jim},
Review = {This conference contribution is interesting, since it shows the problems of IPSec and the differences between some VPN protocols.},
Timestamp = {2014.10.27}
}
@Book{Boehmer2005,
Title = {{VPN} - {V}irtual {P}rivate {N}etworks},
Author = {Böhmer, Wolfgang},
Year = {2005},
Edition = {2},
Publisher = {Hanser},
Titleaddon = {Kommunikationssicherheit in VPN- und IP-Netzen über GPRS und WLAN},
Abstract = {SSL ursprünglich von Netscape entwickelt. Ein entsprechender RFC wurde von der IETF als veraltet markiert. SSL 3.0 wurde dann jedoch als TLS 1.0 mit einigen Anpassungen als Internetstandard verabschiedet. TLS befindet sich in der Anwendungsschicht im OSI-Referenzmodell. Anwendungen müssen darauf ausgelegt sein TLS zu benutzen.
Es gibt SSL-Connections und SSL-Sessions. Jede Connection ist an eine Session gebunden. Eine Session kann jedoch auch mehrere Connections enthalten. Wann immer Daten von der Anwendungsschicht zu einem Server gesendet werden, passieren diese die SSl-Schicht. Dort werden sie durch das Record-Protokoll in mehrere Blöcke fragmentiert, bei Bedarf komprimiert und mit einem MAC versehen. Als MAC kann SHA-1 eingesetzt werden (MD5 zwar auch, ist aber definitiv broken). Anschließend werden die Daten mit einer symmetrischen Verschlüsselung gesichert versendet. Dabei werden die notwendigen Daten für die Verschlüsselung über das Handshake-Protokoll vorher ausgetauscht.
Das Handshake-Protokoll unterteilt sich in mehrere Phasen. Im Verlauf des Protokolls werden mehrere Nachrichten zwischen Client und Server versendet. Jede Nachricht enthält ein Typenfeld mit 1-Byte-Länge, ein Feld welches die Länge der Nachricht angibt (3 Byte) und ein Inhaltsfeld mit mindestens einem Byte, welches die Parameter des Typenfeldes enthält.},
Owner = {jim},
Timestamp = {2014.10.27}
}
@Inproceedings{Brin1998,
Title = {The Anatomy of a Large-Scale Hypertextual Web Search Engine},
Author = {Brin, Sergey and Page, Lawrence},
Booktitle = {Seventh World Wide Web Conference},
Year = {1998},
Keywords = {World Wide Web, Search Engines, Information Retrieval, PageRank, Google},
Owner = {jim},
Quality = {1},
Timestamp = {2013.10.29}
}
@Book{Buss1992,
Title = {Der {K}ombattantenstatus},
Author = {Buß, Regina},
Year = {1992},
Chapter = {Die Haager Friedenskonferenzen},
Pages = {168--180},
Publisher = {Brockmeyer},
Series = {Bochumer Schriften zur Friedenssicherung und zum Humanitären Völkerrecht},
Abstract = {Oxford Manual: Auf dem Weg zu den Haager Friedenskonferenzen nach der gescheiterten Brüsseler Deklaration von 1870 kam das sog. Oxford Manual heraus. Es wurde von einem Institut bestehend aus unabhängigen Wissenschaftlern entworfen und entsprach größtenteils den Vorstellungen der Großmächte. Es orientierte sich an der Praxis der europäischen Kriegführung und gestaltete die Bestimmungen der Brüsseler Deklaration dahingehend um. Das kontroverse Thema der Levée en masse wurde nahezu vollständig zugunsten der Großmächte ausgelegt. Nur in noch nicht besetzten Gebieten war es demnach gestattet, dass sich unorganisierte Menschenmengen erheben und gegen die Angreifer vorgehen. Laut dem Oxford Manual hatten die Bewohner der besetzten Gebiete eine Gehorsamspflicht, was notwendigerweise das Recht zur bewaffneten Gegenwehr ausschließt. Vorarbeiten zur ersten Haager Friedenskonferenz: Der russische Außenminister Graf Mouravieff lud im Auftrag von Zar Nikolaus II. zusätzlich zu den in Petersburg diplomatisch vertretenen Nationen auch Luxemburg, Montenegro und Siam ein. Alle Länder nahmen die Einladung ein. Der Kreis der Teilnehmer war mit 26 Teilnehmern damit doppelt so groß, wie bei der letzten Konferenz in Brüssel. Teilnehmer: - Deutschland, England, Frankreich, Italien, Österreich-Ungarn, Russland - Belgien, BUlgarien, Dänemark, Griechenland, Luxemburg, Montenegro, die Niederlande, Portugal, Rumänien, Schweden-Norwegen, die Schweiz, Serbien, Spanien und die Türkei - China, Japan, Persien, Siam - USA und Mexiko Kombattant in der Ersten Haager Landkriegsordnung: Hauptziel der Konferenz war die Entwicklung von Instrumenten zur friedlichen Streitbeilegung und Verhinderung von Kriegen. Rüstungsbeschränkung war daher auch ein Ziel ("Abrüstungskonferenz"). Konferenz begann 18. Mai 1899 in Haag. Levée en masse: Dieser strittige Punkt war der Hauptgrund für das Scheitern von der Brüsseler Konferenz und auch die Haager Friedenskonferenz drohte daran zu scheitern. Der Punkt wurde in der 2. Kommission und dort in der 2. Unterkommission behandelt. Am 20. Juni 1899 hat der deutsche Vertreter, von Schwarzhoff, eine mittlerweile berühmte Rede gehalten. Seine Kernpunkte waren, dass Artikel 9 und 10 (beschäftigen sich mit der Levée en masse) keineswegs die Möglichkeit des Patriotismus und der Abwehr angreifender Truppen ausschließen. Es müsse lediglich sichergestellt werden, dass sich die Erhebenden klar als Kämpfer zu erkennen geben, einen Kommandeur haben, ihre Waffen offen tragen, sich an die Rechte des Kriegs halten und ein gemeinsames Zeichen haben. Ferner erläuterte Schwarzhoff, dass Soldaten auch Menschen seien und daher Humanität erwarten dürften. Wenn Soldaten, erschöpft von einem langen Marsch, in einem Dorf Rast machen, dann müssten sie sicher sein können, dass sich die friedlichen Passanten nicht unerwarteter Weise zu Angreifern wandelten. Der russische und niederländische Vertreter schlossen sich dieser Ausführung an. Martens'sche Klausel: In seiner Eingangsrede zu der obigen Sitzung verlas Martens seine Erklärung, die später in die Präambel der HLKO aufgenommen wurde: "[...]Until a perfectly complete code of the laws of war is issued, the Conference thinks it right to declare that in cases not included in the present arrangement, populations and belligerents remain under the protection and empire of the principles of international law, as they result from the usages established between civilized nations, from the laws of humanity, and the requirements of the public conscience." Artikel 9 und 10: Beide Artikel wurden ohne Änderungen übernommen. Artikel 11: Der Status von Nichtkombattanten in einer Armee wurde durch diesen Artikel gewürdigt. Sie stehen unter dem gleichen Schutz, wie Kombattanten. Vorbereitung zweite Konferenz: Der Impuls für die zweite Konferenz ging von den USA aus, obwohl die formelle Einladung erneut von dem russischen Zaren ausging. Diesmal gab es 44 Teilnehmerstaaten. Zusätzlich zu den Signatarstaaten der ersten Konferenz waren diesmal auch folgende Staaten dabei: Argentinien, Bolivien, Brasilien, Chile, Dominikanische Republik, Equador, Guatemala, Haiti, Kolumbien, Nicaragua, Panama, Paraguay, Peru, El Salvador, Uruguay, Venezuela und Norwegen (diesmal als selbstständiger Staat). Levée en masse: Deutschland möchte, dass Gruppen aus der Bevölkerung, die sich erheben, im Voraus ihr Emblem dem Feind bekannt geben. Frankreich antwortet darauf, dass es in den Wirren des Krieges oftmals nicht möglich ist, diese Information dem Feind zu übermitteln, ohne die eigene Position zu kompromittieren. Stattdessen sei es im Aufgabenbereich einer jeden Armee eine gute Aufklärung zu haben, um zu wissen, wo ihre eigenen Truppen sind und wo sich die des Feindes befinden. Der Vorschlag Deutschlands wurde mehrheitlich abgelehnt. Der zweite Änderungsantrag Deutschlands beschäftigte sich mit der Pflicht von den sich Erhebenden, ihre Waffen offen zu tragen. Dieser Vorschlag wurde mehrheitlich angenommen, da er nur eine Klarstellung war, denn die sich Erhebenden mussten auch vorher schon die Gebräuche des Landkriegs beachten, was den offenen Kampf einschließt. Auswerung der Verhandlungen: Die Haager Friedenskonferenzen können als gelungene Kodifikation in Bezug auf den Kombattantenstatus angesehen werden.},
Bookauthor = {Buß, Regina},
Booktitle = {Der {K}ombattantenstatus},
Owner = {jim},
Timestamp = {2014.10.26}
}
@Article{Chou2002,
Title = {Inside {SSL}: {The} {Secure} {Sockets} {Layer} {Protocol}},
Author = {Chou, Wesley},
Journaltitle = {IT Professional},
Year = {2002},
Doi = {10.1109/MITP.2002.1046644},
Month = {Jul/Aug},
Number = {4},
Pages = {47--52},
Volume = {4},
Journal = {IT Professional},
Owner = {jim},
Timestamp = {2014.10.24}
}
@Conference{Clark2004,
Title = {Parsing the {WSJ} using {CCG} and Log-Linear Models},
Author = {Clark, Stephen and Curran, James R.},
Booktitle = {Proceedings of the 42nd Annual Meeting of the Association for Computational Linguistics},
Year = {2004},
Pages = {104-111},
Owner = {jim},
Quality = {1},
Timestamp = {2013.10.29}
}
@Report{Dierks2008,
Title = {The Transport Layer Security (TLS) Protocol},
Author = {Dierks, T. and Rescorla, E.},
Institution = {IETF},
Type = {RFC},
Year = {2008},
Language = {English},
Organization = {RTMF},
Owner = {jim},
Timestamp = {2014.10.24}
}
@Inproceedings{Duffield1999,
Title = {A Flexible Model for Resource Management in Virtual Private Network},
Author = {Duffield, N. G. and Goyal, Pawan and Greenberg, Albert and Mishra, Partho and Ramakrishnan, K. K. and van der Merive, Jacobus E.},
Booktitle = {Proceedings of the Conference on Applications, Technologies, Architectures, and Protocols for Computer Communication},
Year = {1999},
Doi = {10.1145/316188.316209},
ISBN = {1-58113-135-6},
Location = {Cambridge, Massachusetts, USA},
Pages = {95--108},
Publisher = {ACM},
Series = {SIGCOMM '99},
Url = {http://doi.acm.org/10.1145/316188.316209},
Acmid = {316209},
Address = {New York, NY, USA},
Numpages = {14},
Owner = {jim},
Timestamp = {2014.10.18}
}
@Techreport{RFC5996,
Title = {Internet {K}ey {E}xchange {P}rotocol {V}ersion 2 ({IKE}v2)},
Author = {Eronen, P. and Kaufman, C. and Nir, Y. and Hoffman, P.},
Institution = {IETF},
Year = {2010},
Type = {RFC},
Organization = {Microsoft and VPN Consortium and Check Point and Independent},
Owner = {jim},
Timestamp = {2014.12.13}
}
@Techreport{Ferguson2000,
Title = {A {C}ryptographic {E}valuation of {IP}sec},
Author = {Ferguson, Niels and Schneider, Bruce},
Institution = {Counterpane Internet Security, Inc},
Year = {2000},
Owner = {jim},
Timestamp = {2014.10.24}
}
@Article{Fraenkel1968,
Title = {Idee und {R}ealität des {V}ölkerbundes im deutschen politischen {D}enken},
Author = {Fraenkel, Ernst},
Journaltitle = {Vierteljahrshefte für Zeitgeschichte},
Year = {1968},
Month = {Jan},
Number = {1},
Pages = {1--14},
Volume = {16},
Journal = {Vierteljahrshefte für Zeitgeschichte},
Owner = {jim},
Timestamp = {2014.10.26}
}
@Report{Freier2011,
Title = {The Secure Sockets Layer (SSL) Protocol Version 3.0},
Author = {Freier, A. and Karlton, P. and Kocher, P.},
Institution = {IETF},
Type = {RFC},
Year = {2011},
Language = {English},
Owner = {jim},
Timestamp = {2014.10.24}
}
@Incollection{Gasser1991,
Title = {Das humanitäre {V}ölkerrecht},
Author = {Gasser, Hans-Peter},
Booktitle = {Menschlichkeit für alle},
Year = {1991},
Pages = {1--13},
Publisher = {Hans-Haug},
Abstract = {Gasser erläutert die Entwicklung des humanitären Völkerrechts. Das humanitäre Völkerrecht versucht die Leiden und Schrecken des Krieges zu minimieren, heißt Krieg aber in keinem Falle gut. Es gab mehrere Ursprünge des Rechts und es kann kein "Erfinder" benannt werden. Mit der zunehmenden Kodifizierung des Rechts in dem 19. Jahrhundert wurde auch begonnen das humanitäre Völkerrecht zu etablieren. Essentiell dafür war das Haager Abkommen über die Gebräuche und Gesetze des Landkriegs (beinhaltet Haager Landkriegsordnung) von 1899 und 1907. Ebenso war das erste Genfer Abkommen 1864 zentral. Beide Abkommen haben sich rechtlich auf eigener Bahn entwickelt, bis sie nach dem vernichtenden 2. Weltkrieg 1949 zusammengebracht wurden. Zentrale Auffassung des Rechts ist, dass der einzige Zweck des Krieges die Kampfunfähigkeit der gegnerischen Truppen sein darf. Alle Menschen, die nicht aktiv an Kampfhandlungen teilnehmen, sind zu schützen. Nur militärische Ziele dürfen angegriffen werden. Die Neutralität der Lazarette und ihres Personals ist unbedingt sicherzustellen. Jeder darf Verwundeten helfen, gleich auf welcher Seite sie stehen. ... Zum Teil entspringt das Recht auch dem Gesellschaftsvertrag von Rousseau.},
Owner = {jim},
Timestamp = {2014.10.26}
}
@Conference{Gnjatovic2012,
Title = {A {Cognitively-Inspired} {Method} for {Meaning Representation} in {Dialogue Systems}},
Author = {Gnjatović, Milan and Delić, Vlado},
Booktitle = {3rd IEEE International Conference on Cognitive Infocommunications},
Year = {2012},
Month = {December},
Pages = {383-388},
Owner = {jim},
Timestamp = {2014.01.18}
}
@Article{Gong2013,
Title = {Novel Quantum Virtual Private Network Scheme for PON via Quantum Secure Direct Communication},
Author = {Gong, Li-Hua and Liu, Ye and Zhou, Nan-Run},
Journaltitle = {International Journal of Theoretical Physics},
Year = {2013},
Number = {9},
Pages = {3260--3268},
Volume = {52},
Abstract = {Two quantum secure direct communication (QSDC) protocols with quantum identification (QI) based on passive optical network (PON) architecture are proposed. One QSDC protocol can be implemented between two different optical network units just with simple configurations of PON by optical line terminal when they are in the same virtual private network after optical line terminal performing QI to the optical network units in the given PON architecture. The other QSDC protocol is also implemented between any two legitimated users in the virtual private network but with considerable reduction of workload of the optical line terminal. The security analysis shows that the proposed QSDC schemes with quantum identification are unconditionally secure and allow the legitimate users to exchange their secret information efficiently and to realize a quantum virtual private network in the PON networks ultimately.},
Journal = {International Journal of Theoretical Physics},
Owner = {jim},
Publisher = {Springer},
Timestamp = {2014.10.18}
}
@Article{Heffter1951,
Title = {Vom {P}rimat der {A}ussenpolitik},
Author = {Heffter, Heinrich},
Journaltitle = {Historische Zeitschrift},
Year = {1951},
Number = {1},
Pages = {1--20},
Volume = {171},
Journal = {Historische Zeitschrift},
Owner = {jim},
Timestamp = {2014.10.26}
}
@Thesis{Heinzel2003,
Title = {Virtual Private Networks},
Author = {Heinzel, Marcus and Michaelsen, Nils and Scheibe, Alexander},
Institution = {Fachbereich Informatik, Universität Hamburg},
Type = {Seminar paper},
Year = {2003},
Month = {Jan},
Owner = {jim},
School = {Fachbereich Informatik, Universität Hamburg},
Timestamp = {2014.10.18}
}
@Techreport{Hosner2004,
Title = {{IPsec} and the {SSL} {VPN} {Revolution}},
Author = {Hosner, Charlie},
Institution = {SANS Institute},
Year = {2004},
Owner = {jim},
Timestamp = {2014.10.24}
}
@Www{ivpn.net,
Title = {{PPTP} vs {L2TP/IPSec} vs {OpenVPN}},
Author = {ivpn.net},
Date = {2014-11-29},
Url = {https://www.ivpn.net/pptp-vs-l2tp-vs-openvpn},
Owner = {jim},
Timestamp = {2014.11.29}
}
@Inbook{Jurafsky2009,
Title = {Speech and Language Processing},
Author = {Jurafsky, Daniel and Martin, James H.},
Booktitle = {Speech and Language Processing},
Year = {2009},
Chapter = {18},
Edition = {Second},
Pages = {617--644},
Publisher = {Pearson},
Series = {Prentice-Hall series in artificial intelligence},
Abstract = {Sentences get their meanings from the words they contain and the syntactic order of the words. Therefore the meaning of a sentence is partially based on the words and its syntactic structure. The composition of meaning representation is guided by the syntactic components and relations provided by grammars such as CFGs. A meaning representation is generated by first sending the input through a parser which results in the syntactic analysis and second passing this analysis as input to a semantic analyzer. In the syntax-driven semantic analysis it is assumed that syntactic, lexical and anaphoric ambiguities are not a problem. The semantic meanings are attached to the grammar rules and lexical entries from which trees are generated in the first place. This is called rule-to-rule hypothesis. The semantic attachments are written in braces after the syntactic rules themselves. After the syntactic analysis has been created, every word receives a FOL predicate and/or term. The semantic analyzer goes the tree up until the complete FOL term has been created. On the way lambda reduction is used to replace predicates and terms with their proper meanings, received from other parts of the tree.},
Owner = {jim},
Quality = {1},
Timestamp = {2013.11.16}
}
@Inbook{Jurafsky2009a,
Title = {Speech and Language Processing},
Author = {Jurafsky, Daniel and Martin, James H.},
Booktitle = {Speech and Language Processing},
Year = {2009},
Chapter = {17p},
Edition = {2},
Pages = {579--616},
Publisher = {Pearson},
Series = {Prentice-Hall series in artificial intelligence},
Abstract = {Lambda notation is used to bind variables dynamically to later appearing contents. lambda x P(x)(y) results in P(y) after a lambda reduction as x has been bound to y. lambda P P(x)(lambda x Restaurant(x)) results in lambda x Restaurant(x)(x) which results in Restaurant(x)},
Owner = {jim},
Quality = {1},
Timestamp = {2013.11.16}
}
@Inbook{Jurafsky2009b,
Title = {Speech and Language Processing},
Author = {Jurafsky, Daniel and Martin, James H.},
Booktitle = {Speech and Language Processing},
Year = {2009},
Chapter = {13},
Edition = {2},
Pages = {461--492},
Publisher = {Pearson},
Series = {Prentice-Hall series in artificial intelligence},
Owner = {jim},
Quality = {1},
Timestamp = {2013.11.17}
}
@Techreport{RFC4302,
Title = {{IP} {A}uthentication {H}eader},
Author = {Kent, S.},
Institution = {IETF},
Year = {2005},
Type = {RFC},
Organization = {BBN Technologies},
Owner = {jim},
Timestamp = {2014.12.13}
}
@Techreport{RFC4303,
Title = {{IP} {E}ncapsulating {P}ayload (ESP)},
Author = {Kent, S.},
Institution = {IETF},
Year = {2005},
Type = {RFC},
Organization = {BBN Technologies},
Owner = {jim},
Timestamp = {2014.12.13}
}
@Techreport{RFC4301,
Title = {Security {Architecture} for the {Internet} {Protocol}},
Author = {Kent, S. and Seo, K.},
Institution = {IETF},
Year = {2005},
Type = {RFC},
Organization = {BBN Technologies},
Owner = {jim},
Timestamp = {2014.12.13}
}
@Conference{Kessler1997,
Title = {Automatic Detection of Text Genre},
Author = {Kessler, Brett and Nunberg, Geoffrey and Schuetze, Hinrich},
Booktitle = {Proceedings of the 35th Annual Meeting of the Association for Computational Linguistics},
Year = {1997},
Pages = {32-38},
Owner = {jim},
Quality = {1},
Timestamp = {2013.10.29}
}
@Conference{Klein2003,
Title = {Named Entity Recognition with Character-Level Models},
Author = {Klein, Dan and Smarr, Joseph and Nguyen, Huy and Manning, Christopher D.},
Booktitle = {Conference on Natural Learning (CoNLL)},
Year = {2003},
Pages = {180-183},
Owner = {jim},
Quality = {1},
Timestamp = {2013.10.29}
}
@Inproceedings{Kotuliak2011,
Title = {Performance comparison of {IPsec} and {TLS} based {VPN} technologies},
Author = {Kotuliak, I. and Rybar, P. and Truchly, P.},
Booktitle = {9th International Conference on Emerging eLearning Technologies and Applications (ICETA)},
Date = {2011-10-27/2011-10-28},
Location = {Stara Lesna},
Organization = {IEEE},
Pages = {217--221},
Abstract = {IPSec is faster than OpenVPN with the same setup and cipher. Details: IPSec AES > OpenVPN AES IPSec Blowfish > OpenVPN Blowfish IPSec 3DES < OpenVPN 3DES 3DES is a cipher of the past and should not be used anymore. AES and Blowfish have similar results under IPSec and OpenVPN. AES is standardized and has more support than Blowfish. IPSec however is far more complex and difficult to set up while setting up OpenVPN is child's play.},
Owner = {jim},
Timestamp = {2014.11.28}
}
@Article{Li,
Title = {A Comparison of {CYK} and {Earley} {Parsing} {Algorithms}},
Author = {Li, Te and Alagappan, Devi},
Institution = {Arizona State University},
Owner = {jim},
Timestamp = {2014.01.07}
}
@Article{Lingen2014,
Title = {Mit dem {K}rieg seinen {F}rieden machen},
Author = {von Lingen, Kerstin},
Journaltitle = {Ruperto Carola},
Year = {2014},
Number = {4},
Pages = {59--65},
Abstract = {Um die Gräueltaten des Zweiten Weltkrieges zu sühnen, fanden in Europa und Asien nach 1945 mehrere Tausend Kriegsverbrecherprozesse statt. Nicht selten jedoch waren die Verfahren weniger dem Streben nach Gerechtigkeit als politischen Interessen gezollt. Das zeigt sich vor allem an ostasiatischen Kriegsverbrecherprozessen im Kontext von Dekolonisierung und Kaltem Krieg. Heidelberger Historiker erforschen die Wechselwirkungen zwischen Asien und Europa im Rahmen der Prozesse und analysieren die weitreichenden Folgen der ambivalenten Rechtsprechungen.},
Journal = {Ruperto Carola},
Owner = {jim},
Timestamp = {2014.10.26}
}
@Techreport{RFC7321,
Title = {Cryptographic {A}lgorithm {I}mplementation {R}equirements and {U}sage {G}uidance for {E}ncapsulating {S}ecurity {P}ayload ({ESP}) and {A}uthentication {H}eader ({AH})},
Author = {McGrew, D. and Hoffman, P.},
Institution = {IETF},
Year = {2014},
Type = {RFC},
Organization = {Cisco Systems and VPN Consortium},
Owner = {jim},
Timestamp = {2014.12.13}
}
@Techreport{Paskin2001,
Title = {Cubic-time Parsing and Learning Algorithms for Grammatical Bigram Models},
Author = {Paskin, Mark A.},
Institution = {University of California},
Year = {2001},
Month = {June},
Number = {UCB/CSD-01-1148},
Abstract = {In Dependency Grammar there are head words and dependents. Each phrase has only one head word. The head word determines how all of its dependents may be syntactically combined with other words to form a sentence. A head word and all of its dependents form a constituent. In every sentence there may be one or more dependency relationships with one head word each. Dependents that precede their head are called predependents and dependents that follow their head are called postdependents. A dependency parse consists of a set of dependency relationships that satisfies three constraints: 1. Every word except one (the root) is dependent to exactly one head. 2. The dependency relationships are acyclic; no word is, through a sequence of dependency relationships, dependent to itself. 3. When drawn as a graph above the sentence, no two dependency relations cross - a property known as projectivity or planarity. The Grammatical Bigram Probability Model assumes that all the dependents of a head word are independent of one another and their relative order. This is a strong approximation as in full English there are argument structure constraints that rely on the order of dependents. This simplification allows for a reduced computational complexity for parsing and learning. The grammar model falls into the class of "Bilexical grammars". A dependency parse consists of multiple spans. A span has at least two words up to n words. Spans have one property: No word in the span has a parent outside the span. Spans can be joined and closed. To join the span one of them has to be connected (both end words are connected with an edge) and both spans have to share one endword. The new span will be connected if both subspans were connected. If that is not the case, it can be closed by adding an edge between the endwords of the new span. Every dependency parse has a unique span decomposition. For joining the left subspan has be simple. That means it has to have an edge between its endwords or consist of two words only. Relying on this ensures that each span is derived only once. Every span has a signature. This signature states the indexes of its endwords, if it is simple and whether the left or right endword have parents within the span. Spans where both the left and right endword have the parent within the string are called toplevel signatures as such signatures characterize valid parses. Parser operations take signatures as input rather than spans. They produce signatures as well. SEED creates an unconnected and simple span with two adjacent words. CLOSE-LEFT adds an edge between the endwords and makes the left endword the parent of the right one. CLOSE-RIGHT does the opposite and makes the right endword the parent of the left one. These operators require that neither the left nor the right endword have a parent within the span. JOIN takes two input spans and joins them. It requires that the spans share an endword (1.), the shared endword has one parent (2.) and the left input is simple (3.). The JOIN rule applies only if the left span doesn't start the sentence. These operators constitute an algebra over span signatures called span signature algebra. A derivation D is an expression in this algebra. Like operations it evaluates to span signatures. These expressions can be represented as trees where the nodes are operations. There is an isomorphism between dependency parses and their corresponding derivations. Optimal derivation must consist of an operation over the results of optimal sub-derivations. Therefore it is enough to record the parse operation with the most likely derivation of a given signature in order to reconstruct the most likely derivation of the entire sentence. The chart-parse algorithm returns the optimal parse. It uses a subprocedure called EXTRACT-OPT-PARSE that constructs the optimal parse by finding the top-level signature (sigma) with maximum optimal probability (pi*). It backtracks then recursively through the optimal derivation defined by (omega*). If CLOSE operations are encountered edges are recorded in the parse. The algorithm requires O(n³) time and O(n²) space.},
Owner = {jim},
Quality = {1},
Timestamp = {2013.10.29}
}
@Incollection{Paterson2006,
Title = {A {Cryptographic} {Tour} of the {IPsec} {Standards}},
Author = {Paterson, Kenneth G.},
Booktitle = {Information security technical report},
Year = {2006},
Publisher = {Elsevier},
Journal = {information security technical report},
Keywords = {IP, IPsec, network security, cryptography, key management},
Owner = {jim},
Timestamp = {2014.10.24}
}
@Misc{Portalarium2013,
Title = {Shroud of the {Avatar} {Six Month Progress Video}},
Author = {Portalarium},
Year = {2013},
HowPublished = {\url{https://www.youtube.com/watch?v=yGa6hR4a87U}},
Month = {November},
Note = {Accessed on 18.01.2014 12:07},
Owner = {jim},
Timestamp = {2014.01.12}
}
@Book{Reich2010,
Title = {Die {H}aager {L}andkriegsordnung},
Author = {Deutsches Reich},
Year = {2010},
Publisher = {Europ. Hochsch.-Verl.},
Abstract = {Erster Teil: Kombattantenstatus Zweiter Teil: Kriegshandlungen Dritter Teil: Regelung der Besetzung},
Owner = {jim},
Timestamp = {2014.10.30}
}
@Inbook{Russel2010,
Title = {Artificial intelligence: A Modern Approach},
Author = {Russel, Stuart J. and Norvig, Peter},
Booktitle = {Artificial intelligence: A Modern Approach},
Date = {2009-12-11},
Year = {2009},
Bookauthor = {Russel, Stuart J. and Norvig, Peter},
Chapter = {23},
Edition = {3},
Pages = {888--927},
Publisher = {Pearson},
Series = {Prentice-Hall series in artificial intelligence},
Abstract = {The first method to understanding natural language is syntactic analysis or parsing. The goal is to find the phrase structure of a sequence of words according to the rules of the applied grammar. A strict top-to-bottom or bottom-to-top parsing can be inefficient. Given two sentences with the same first 10 words and a difference only from the 11th word on, parsing from left-to-right would force the parser to make a guess about the nature of the sentence. But it doesn't know if it's right until the 11th word. From there it had to backtrack and reanalyze the sentence. To prevent that dynamic programming is used. Every analyzed substring gets stored for later. Once it is discovered that for example "the students in section 2 of Computer Science 101" is a noun phrase, this information can be stored in a structure known as chart. Algorithms that do such storing are called chart parsers. One of this chart parsers is a bottom-up version called CYK algorithm after its inventors John Cocke, Daniel Younger and Tadeo Kasami. This algorithm requires a grammar in the Chomsky Normal Form. The algorithm takes O(n²m) space for the P table with n being the number of words in the sentence and m the number of nonterminal symbols in the grammar. It takes O(n³m) time whereas m is constant for a particular grammar. That's why it is commonly described as O(n³). There is no faster algorithm for general context-free grammars. The CYK algorithm only co mputes the probability of the most probable tree. The subtrees are all represented in P table. PCFGs (Probabilistic context free grammars) have many rules with a probability for each one of them. Learning the grammar from data is better than a knowledge engineering approach. Learning is easiest if we are given a corpus of correctly parsed sentences; commonly known as a treebank. The best known treebank is the Penn Treebank as it consists of 3 million words which have been annotated with part of speech and parse-tree structure. Given an amount of trees, a PCFG can be created just by counting and smoothing. If no treebank is given it is still possible to learn the grammar but it is more difficult. In such a case there are actually two problems: First learning the structure of the grammar rules and second learning the probabilities associated with them. PCFGs have the problem that they are context-free. Combining a PCFG and Markov model will get the best of both. This leads ultimately to lexicalized PCFGs. But another problem of PCFGs is there preference for short sentences. Lexicalized PCFGs introduce so called head words. Such words are the most important words in a phrase and the probabilities are calculated between the head words. Example: "eat a banana" "eat" is the head of the verb phrase "eat a banana", whereas "banana" is the head of the noun phrase "a banana". Probability P1 now depends on "eat" and "banana" and the result would be very high. If the head of the noun phrase were "bandanna", the result would be significantly lower. The next step are definite clause grammars. They can be used to parse in a way of logical inference and makes it possible to reason about languages and strings in many different ways. Furthermore augmentations allow for distinctions in a single subphrase. For example the noun phrase (NP) depends on the subject case and the person and number of persons. A real world example would be "to smell". It is "I smell", "you smell", "we smell", "you smell" and "they smell" but "he/she/it smells". It depends on the person what version is taken. Semantic interpretation is used to give sentences a meaning. This is achieved through logical sentences. The semantics can be added to an already augmented grammar (created during the previous step), resulting in multiple augmentations at the same time. Chill is an inductive logic programming program that can learn to achieve 70% to 85% accuracy on various database query tasks. But there are several complications as English is endlessly complex. First there is the time at which things happened (present, past, future). Second you have the so called speech act which is the speaker's action that has to be deciphered by the hearer. The hearer has to find out what type of action it is (a statement, a question, an order, a warning, a promise and so on). Then there are so called long-distance dependencies and ambiguity. The ambiguity can reach from lexical ambiguity where a word has multiple usages, over syntactic ambiguity where a sentence has multiple parses up to semantic ambiguity where the meaning of the same sentence can be different. Last there is ambiguity between literal meaning and figurative meanings. Finally there are four models that need to be combined to do disambiguation properly: the world model, the mental model, the language model and the acoustic model. -- not so much an abstract of the specific content of that section as an abstract about speech recognition in general -- The second method is speech recognition. It has the added difficulty that the words are not clearly separated and every speaker can pronounce the same sentence with the same meaning different. An example is "The train is approaching". Another written form would be "The train's approaching". Both convey the same meaning in the written language. But if a BBC, a CNN and a german news anchor speeks this sentence it will sound dramatically different. Speech recognition has to deal with that problem to get the written text associated with the spoken words. From the text the first method can than be used to analyze the words and find a meaning. Finally this meaning can be used to create some kind of action in a dialogue system. -- Some problems of speech recognition are segmentation, coarticulation and homophones. Two used models are the acoustic model and the language model. Another major model is the noisy channel model, named after Claude Shannon (1948). He showed that the original message can always be recovered in a noisy channel if the original message is encoded in a redundant enough way. The acoustic model in particular is used to get to the really interesting parts. It is not interesting how words were spoken but more what words where spoken. That means that not all available information needs to be stored and a relative low sample rate is enough. 80 samples at 8kHz with a frame length of about 10 milliseconds is enough for that matter. To distinguish words so called phones are used. There are 49 phones used in English. A phoneme is the smallest unit of sound that has a distinct meaning to speakers of a particular language. Back to the frames: every frame is summarized by a vector of features. Features are important aspects of a speech signal. It can be compared to listening to an orchestra and saying "here the French horns are playing loudly and the violins are playing softly". Yet another difficulty are dialect variations. The language model should be learned from a corpus of transcripts of spoken language. But such a thing is more difficult than building an n-gram model of text, because it requires a hidden Markov model. All in all speech recognition is most effective when used for a specific task against a restricted set of options. A general purpose system can only work accurately if it creates one model for every speaker. Prominent examples like Apple's siri are therefore not very accurate.},
Owner = {jim},
Timestamp = {2013.10.24}
}
@Techreport{RFC4307,
Title = {Cryptographic algorithms for use in the {I}nternet {K}ey {E}xchange version 2 ({IKE}v2)},
Author = {Schiller, J.},
Institution = {IETF},
Year = {2005},
Type = {RFC},
Organization = {Massachusetts Instutute of Technology},
Owner = {jim},
Timestamp = {2014.12.13}
}
@Inproceedings{Scott1921,
Title = {The Conference of 1907},
Author = {Scott, James Brown},
Booktitle = {The Proceedings of the Hague Peace Conferences: Translation of the Official Texts},
Year = {1921},
Publisher = {Oxford University Press},
Volume = {3},
Owner = {jim},
Timestamp = {2014.11.22}
}
@Inproceedings{Scott1920,
Title = {The Conference of 1899},
Author = {Scott, James Brown},
Booktitle = {The Proceedings of the Hague Peace Conferences: Translation of the Official Texts},
Year = {1920},
Publisher = {Oxford University Press},
Owner = {jim},
Timestamp = {2014.11.22}
}
@Inproceedings{Sleator1993,
Title = {Parsing English with a Link Grammar},
Author = {Sleator, Daniel D. K. and Temperley, Davy},
Booktitle = {Third Annual Workshop on Parsing technologies},
Year = {1993},
Owner = {jim},
Quality = {1},
Timestamp = {2013.10.29}
}
@Conference{Smith2008,
Title = {Dependency Parsing by Belief Propagation},
Author = {Smith, David A. and Eisner, Jason},
Booktitle = {Conference on Empirical Methods in Natural Language Processing},
Date = {2008-10-25/2008-10-27},
Year = {2008},
Pages = {145-156},
Owner = {jim},
Quality = {1},
Timestamp = {2013.10.29}
}
@Inproceedings{Sun2011,
Title = {The advantages and the implementation of {SSL} {VPN}},
Author = {Sun, Su Hua},
Booktitle = {2nd {I}nternational {C}onference on Software {E}ngineering and {S}ervice {S}cience (ICSESS)},
Date = {2011-07-15/2011-07-17},
Location = {Beijing},
Organization = {IEEE},
Pages = {548--551},
Abstract = {SSL/TLS VPN is the better choice for remote access to a private network, while IPSec is better for the connection between two fixed endpoints. With IPSec the data is transported unencrypted between the application and the VPN start point and from the VPN end point to the application. In a remote access example, all the data would be readable from inside the target network. Furthermore an attacker only needs access to the client computer and can then use the connection, without being authenticated. With SSL VPN the encryption is application to application and the authentication and authorization happens for each connection. Access to a connection is unequally more difficult than access to a computer.},
Owner = {jim},
Timestamp = {2014.12.13}
}
@Article{Venkateswaran2001,
Title = {Virtual Private Networks},
Author = {Venkateswaran, R.},
Journaltitle = {IEEE Potentials},
Year = {2001},
Month = {Feb/Mar},
Number = {1},
Pages = {11--15},
Volume = {20},
Abstract = {A virtual private network (VPN) can help resolve many of the issues associated with today's private networks. A VPN facilitates an agile IT infrastructure. Global VPNs enable connectivity to all locations anywhere in the world at a fraction of the cost of dedicated links. VPN services enable remote access to the intranet at significantly lower cost, thus enabling support for a mobile workforce. Additionally, the VPN architecture support a reliable authentication mechanism to provide easy access to the intranet from anywhere using any available access media including analog modems, ISDN, cable modems, DSL and wireless. There are primarily three types of VPN services: (1) local area network (LAN) interconnect VPN services, (2) dial-up VPN services, and (3) Ethernet VPN services},
Journal = {IEEE Potentials},
Owner = {jim},
Timestamp = {2014.10.18}
}
@Phdthesis{Weber,
Title = {IPSec Hochverfügbarkeit},
Author = {Weber, Ulrich},
Abstract = {In der Diplomarbeit geht es um die IPSec HighAvailability Integration mit Openswan und Linux-Kernel 2.6.},
Owner = {jim},
Timestamp = {2014.10.18}
}