master
Anne Lorenz 2018-10-18 10:48:07 +02:00
parent 446df63d84
commit bc94fa014e
65 changed files with 2909 additions and 141 deletions

View File

@ -7,14 +7,14 @@ Bag Of Words
BagOfWords counts word stems in an article
and adds new words to the global vocabulary.
Anm.:
note:
The multinomial Naive Bayes classifier is suitable
for classification with discrete features (e.g.,
word counts for text classification).
The multinomial distribution normally requires
integer feature counts. However, in practice,
fractional counts such as tf-idf may also work.
=> durch 'relative_word_frequencies' als Paramter berücksichtigt
=> considered by 'relative_word_frequencies' as parameter
'''
import re
@ -43,7 +43,7 @@ class BagOfWords:
# list of all words to return
words_cleaned = []
for word in words:
# remove numbers
# leave out numbers
if word.isalpha():
# reduce word to stem
word = BagOfWords.reduce_word_to_stem(word)
@ -105,7 +105,9 @@ class BagOfWords:
print('# BOW: making vocabulary of data set')
print('# ...')
vocab = set()
# for every article's text
for text in series:
# add single article's text to total vocabulary
vocab |= set(BagOfWords.extract_words(text))
# transform to list
vocab = list(vocab)
@ -147,10 +149,8 @@ class BagOfWords:
'you\'re', 'you\'ve', 'your', 'yours', 'yourself',
'yourselves']
##=> ist das sinnvoll?:
#add specific words
#stop_words.extend(['reuters', 'also', 'monday', 'tuesday',
# 'wednesday', 'thursday', 'friday'])
#add unwanted terms
stop_words.extend(['reuters', 'bloomberg', 'cnn', 'economist'])
#remove the word 'not' from stop words
#stop_words.remove('not')
@ -165,3 +165,28 @@ class BagOfWords:
stop_words = set(stop_words)
return stop_words
if __name__ == '__main__':
test_article = '''Exclusive: Microsoft's $7.5 billion GitHub deal set for
EU approval - sources. BRUSSELS (Reuters) - U.S. software
giant Microsoft (MSFT.O) is set to win unconditional EU
antitrust approval for its $7.5 billion purchase of
privately held coding website GitHub, two people familiar
with the matter said on Monday. Microsoft announced the
deal in June, its largest acquisition since it bought
LinkedIn for $26 billion in 2016. The GitHub deal is
expected to boost the U.S. software giants cloud
computing business and challenge market leader Amazon
(AMZN.O). GitHub, the worlds largest code host, has
more than 28 million developers using its platform. It
will become a part of Microsofts Intelligent Cloud unit
once the acquisition is completed. Microsoft Chief
Executive Satya Nadella has tried to assuage users
worries that GitHub might favor Microsoft products
over competitors after the deal, saying GitHub would
continue to be an open platform that works with all
public clouds. The European Commission, which is set to
decide on the deal by Oct. 19, did not respond to a
request for immediate comment. Microsoft declined to
comment. Reporting by Foo Yun Chee; editing by Jason
Neely'''
print(BagOfWords.extract_words(test_article))

View File

@ -10,20 +10,14 @@ import csv
import numpy as np
import pandas as pd
# todo: checken, wie groß ("Dataframe maximum size")
# import sys
# print(sys.getsizeof(OBEJCT_NAME_HERE))
# beim selektieren (833 pro Monat) auf Duplikate in Überschrift prüfen!!!
class CsvHandler:
def read_csv(csv_file):
def read_csv(csv_file, usecols=None):
df = pd.read_csv(csv_file,
sep='|',
header=0,
engine='python',
usecols=[1,2,4], #use only 'Title', 'Text' and 'Label'
usecols=usecols,
decimal='.',
quotechar='\'',
#nrows = 200,
@ -42,11 +36,11 @@ class CsvHandler:
returns new DataFrame with only selected items
'''
# new empty DataFrame
df_samples = pd.DataFrame(columns=['rands','title','text','label'])
# df_samples = pd.DataFrame(columns=['rands','title','text','label'])
# initialize random => reproducible sequence
np.random.seed(5)
# pseudorandom float -1.0 <= x <= 1.0 for every sample
pd.Series()
# pd.Series()
# add new column 'Random'
df['Random'] = pd.Series(np.random.randn(len(df)), index=df.index)
# sort DataFrame by random numbers

22
NER.py
View File

@ -18,21 +18,25 @@ class NER:
# toDo: complete lists:
# some legal entity types
company_abbrevs = ['Inc', 'Corp', 'Co', 'Ltd', 'AG', 'LP',
'Plc', 'LLC', 'LBO', 'IPO', 'HQ',
'CIO', 'NGO', 'AB']
company_abbrevs = ['Inc', 'Corp', 'Co', 'Ltd', 'AG', 'LP', 'Plc', 'LLC',
'LBO', 'IPO', 'HQ', 'CIO', 'NGO', 'AB']
# some entities that are not companies
misc = ['Reuters', 'Financial Times', 'Bloomberg', 'The Economist',
'Cnn', 'European Commission', 'EU', 'Staff', 'Min', 'Read',
misc = ['Reuters', 'Financial Times', 'Bloomberg', 'The Economist', 'Cnn',
'European Commission', 'EU', 'Staff', 'Min', 'Read',
'Thomson Reuters Trust Principles', 'New York Stock Exchange',
'NYSE']
def tag_words(text):
stanford_classifier = 'C:\\Users\\anne.lorenz\\Bachelorarbeit\\StanfordNER\\stanford-ner-2018-02-27\\classifiers\\english.all.3class.distsim.crf.ser.gz'
stanford_ner_path = 'C:\\Users\\anne.lorenz\\Bachelorarbeit\\StanfordNER\\stanford-ner-2018-02-27\\stanford-ner.jar'
# path to Stanford NER
stanford_classifier = 'stanford-ner-2018-02-27'\
'\\classifiers'\
'\\english.all.3class.distsim.crf.ser.gz'
stanford_ner_path = 'stanford-ner-2018-02-27'\
'\\stanford-ner.jar'
# create tagger object
st = StanfordNERTagger(stanford_classifier, stanford_ner_path, encoding='utf-8')
st = StanfordNERTagger(stanford_classifier, stanford_ner_path,
encoding='utf-8')
tokenized_text = word_tokenize(text)
# list of tuples (word, tag)
@ -117,7 +121,7 @@ if __name__ == '__main__':
filepath = 'classification_labelled_corrected.csv'
df = CsvHandler.read_csv(filepath)
# articles with label==1
# only articles with label==1
df_hits = df[df['Label'] == 1]
texts = df_hits['Title'] + ' ' + df_hits['Text']

View File

@ -6,7 +6,10 @@ retrieves JSON files from webhose.io
saves articles' relevant information in csv file
'''
#toDo: insert personal webhose key
# toDo: add Uuid, URL, Site and change order to:
# Title, Text, Site, SiteSection, Url, Timestamp
# toDo: insert personal webhose key
import re
from datetime import datetime
@ -72,12 +75,12 @@ class Requester:
continue
else:
article = []
article.append(output['posts'][i]['published'])
article.append(output['posts'][i]['title'].replace('|', ' '))
article.append(output['posts'][i]['published']) # Timestamp
article.append(output['posts'][i]['title'].replace('|', ' ')) # Title
# remove white spaces and separators
text = output['posts'][i]['text'].replace('\n', ' ')\
.replace('\r', ' ').replace('|', ' ')
section = output['posts'][i]['thread']['site_section']
.replace('\r', ' ').replace('|', ' ') # Text
section = output['posts'][i]['thread']['site_section'] # SiteSection
article.append(text)
# remove '\r' at end of some urls
section = section.replace('\r', '')

View File

@ -0,0 +1,339 @@
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
<signature of Ty Coon>, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License.

View File

@ -0,0 +1,171 @@
import edu.stanford.nlp.ie.AbstractSequenceClassifier;
import edu.stanford.nlp.ie.crf.*;
import edu.stanford.nlp.io.IOUtils;
import edu.stanford.nlp.ling.CoreLabel;
import edu.stanford.nlp.ling.CoreAnnotations;
import edu.stanford.nlp.sequences.DocumentReaderAndWriter;
import edu.stanford.nlp.util.Triple;
import java.util.List;
/** This is a demo of calling CRFClassifier programmatically.
* <p>
* Usage: {@code java -mx400m -cp "*" NERDemo [serializedClassifier [fileName]] }
* <p>
* If arguments aren't specified, they default to
* classifiers/english.all.3class.distsim.crf.ser.gz and some hardcoded sample text.
* If run with arguments, it shows some of the ways to get k-best labelings and
* probabilities out with CRFClassifier. If run without arguments, it shows some of
* the alternative output formats that you can get.
* <p>
* To use CRFClassifier from the command line:
* </p><blockquote>
* {@code java -mx400m edu.stanford.nlp.ie.crf.CRFClassifier -loadClassifier [classifier] -textFile [file] }
* </blockquote><p>
* Or if the file is already tokenized and one word per line, perhaps in
* a tab-separated value format with extra columns for part-of-speech tag,
* etc., use the version below (note the 's' instead of the 'x'):
* </p><blockquote>
* {@code java -mx400m edu.stanford.nlp.ie.crf.CRFClassifier -loadClassifier [classifier] -testFile [file] }
* </blockquote>
*
* @author Jenny Finkel
* @author Christopher Manning
*/
public class NERDemo {
public static void main(String[] args) throws Exception {
String serializedClassifier = "classifiers/english.all.3class.distsim.crf.ser.gz";
if (args.length > 0) {
serializedClassifier = args[0];
}
AbstractSequenceClassifier<CoreLabel> classifier = CRFClassifier.getClassifier(serializedClassifier);
/* For either a file to annotate or for the hardcoded text example, this
demo file shows several ways to process the input, for teaching purposes.
*/
if (args.length > 1) {
/* For the file, it shows (1) how to run NER on a String, (2) how
to get the entities in the String with character offsets, and
(3) how to run NER on a whole file (without loading it into a String).
*/
String fileContents = IOUtils.slurpFile(args[1]);
List<List<CoreLabel>> out = classifier.classify(fileContents);
for (List<CoreLabel> sentence : out) {
for (CoreLabel word : sentence) {
System.out.print(word.word() + '/' + word.get(CoreAnnotations.AnswerAnnotation.class) + ' ');
}
System.out.println();
}
System.out.println("---");
out = classifier.classifyFile(args[1]);
for (List<CoreLabel> sentence : out) {
for (CoreLabel word : sentence) {
System.out.print(word.word() + '/' + word.get(CoreAnnotations.AnswerAnnotation.class) + ' ');
}
System.out.println();
}
System.out.println("---");
List<Triple<String, Integer, Integer>> list = classifier.classifyToCharacterOffsets(fileContents);
for (Triple<String, Integer, Integer> item : list) {
System.out.println(item.first() + ": " + fileContents.substring(item.second(), item.third()));
}
System.out.println("---");
System.out.println("Ten best entity labelings");
DocumentReaderAndWriter<CoreLabel> readerAndWriter = classifier.makePlainTextReaderAndWriter();
classifier.classifyAndWriteAnswersKBest(args[1], 10, readerAndWriter);
System.out.println("---");
System.out.println("Per-token marginalized probabilities");
classifier.printProbs(args[1], readerAndWriter);
// -- This code prints out the first order (token pair) clique probabilities.
// -- But that output is a bit overwhelming, so we leave it commented out by default.
// System.out.println("---");
// System.out.println("First Order Clique Probabilities");
// ((CRFClassifier) classifier).printFirstOrderProbs(args[1], readerAndWriter);
} else {
/* For the hard-coded String, it shows how to run it on a single
sentence, and how to do this and produce several formats, including
slash tags and an inline XML output format. It also shows the full
contents of the {@code CoreLabel}s that are constructed by the
classifier. And it shows getting out the probabilities of different
assignments and an n-best list of classifications with probabilities.
*/
String[] example = {"Good afternoon Rajat Raina, how are you today?",
"I go to school at Stanford University, which is located in California." };
for (String str : example) {
System.out.println(classifier.classifyToString(str));
}
System.out.println("---");
for (String str : example) {
// This one puts in spaces and newlines between tokens, so just print not println.
System.out.print(classifier.classifyToString(str, "slashTags", false));
}
System.out.println("---");
for (String str : example) {
// This one is best for dealing with the output as a TSV (tab-separated column) file.
// The first column gives entities, the second their classes, and the third the remaining text in a document
System.out.print(classifier.classifyToString(str, "tabbedEntities", false));
}
System.out.println("---");
for (String str : example) {
System.out.println(classifier.classifyWithInlineXML(str));
}
System.out.println("---");
for (String str : example) {
System.out.println(classifier.classifyToString(str, "xml", true));
}
System.out.println("---");
for (String str : example) {
System.out.print(classifier.classifyToString(str, "tsv", false));
}
System.out.println("---");
// This gets out entities with character offsets
int j = 0;
for (String str : example) {
j++;
List<Triple<String,Integer,Integer>> triples = classifier.classifyToCharacterOffsets(str);
for (Triple<String,Integer,Integer> trip : triples) {
System.out.printf("%s over character offsets [%d, %d) in sentence %d.%n",
trip.first(), trip.second(), trip.third, j);
}
}
System.out.println("---");
// This prints out all the details of what is stored for each token
int i=0;
for (String str : example) {
for (List<CoreLabel> lcl : classifier.classify(str)) {
for (CoreLabel cl : lcl) {
System.out.print(i++ + ": ");
System.out.println(cl.toShorterString());
}
}
}
System.out.println("---");
}
}
}

View File

@ -0,0 +1,289 @@
Stanford NER - v3.9.1 - 2018-02-27
----------------------------------------------
This package provides a high-performance machine learning based named
entity recognition system, including facilities to train models from
supervised training data and pre-trained models for English.
(c) 2002-2015. The Board of Trustees of The Leland
Stanford Junior University. All Rights Reserved.
Original CRF code by Jenny Finkel.
Additional modules, features, internationalization, compaction, and
support code by Christopher Manning, Dan Klein, Christopher Cox, Huy Nguyen
Shipra Dingare, Anna Rafferty, and John Bauer.
This release prepared by Jason Bolton.
LICENSE
The software is licensed under the full GPL v2+. Please see the file LICENCE.txt
For more information, bug reports, and fixes, contact:
Christopher Manning
Dept of Computer Science, Gates 2A
Stanford CA 94305-9020
USA
java-nlp-support@lists.stanford.edu
https://nlp.stanford.edu/software/CRF-NER.html
CONTACT
For questions about this distribution, please contact Stanford's JavaNLP group
at java-nlp-user@lists.stanford.edu. We provide assistance on a best-effort
basis.
TUTORIAL
Quickstart guidelines, primarily for end users who wish to use the included NER
models, are below. For further instructions on training your own NER model,
go to https://nlp.stanford.edu/software/crf-faq.html.
INCLUDED SERIALIZED MODELS / TRAINING DATA
The basic included serialized model is a 3 class NER tagger that can
label: PERSON, ORGANIZATION, and LOCATION entities. It is included as
english.all.3class.distsim.crf.ser.gz. It is trained on data from
CoNLL, MUC6, MUC7, ACE, OntoNotes, and Wikipedia.
Because this model is trained on both US
and UK newswire, it is fairly robust across the two domains.
We have also included a 4 class NER tagger trained on the CoNLL 2003
Shared Task training data that labels for PERSON, ORGANIZATION,
LOCATION, and MISC. It is named
english.conll.4class.distsim.crf.ser.gz .
A third model is trained only on data from MUC and
distinguishes between 7 different classes:
english.muc.7class.distsim.crf.ser.gz.
All of the serialized classifiers come in two versions, one trained to
basically expected standard written English capitalization, and the other
to ignore capitalization information. The case-insensitive versions
of the three models available on the Stanford NER webpage.
These models use a distributional similarity lexicon to improve performance
(by between 1.5%-3% F-measure). The distributional similarity features
make the models perform substantially better, but they require rather
more memory. The distsim models are included in the release package.
The nodistsim versions of the same models may be available on the
Stanford NER webpage.
Finally, we have models for other languages, including two German models,
a Chinese model, and a Spanish model. The files for these models can be
found at:
http://nlp.stanford.edu/software/CRF-NER.html
QUICKSTART INSTRUCTIONS
This NER system requires Java 1.8 or later.
Providing java is on your PATH, you should be able to run an NER GUI
demonstration by just clicking. It might work to double-click on the
stanford-ner.jar archive but this may well fail as the operating system
does not give Java enough memory for our NER system, so it is safer to
instead double click on the ner-gui.bat icon (Windows) or ner-gui.sh
(Linux/Unix/MacOSX). Then, using the top option from the Classifier
menu, load a CRF classifier from the classifiers directory of the
distribution. You can then `either load a text file or web page from
the File menu, or decide to use the default text in the window. Finally,
you can now named entity tag the text by pressing the Run NER button.
From a command line, you need to have java on your PATH and the
stanford-ner.jar file and the lib directory in your CLASSPATH. (The way of doing this depends on
your OS/shell.) The supplied ner.bat and ner.sh should work to allow
you to tag a single file. For example, for Windows:
ner file
Or on Unix/Linux you should be able to parse the test file in the distribution
directory with the command:
java -mx600m -cp stanford-ner.jar:lib/* edu.stanford.nlp.ie.crf.CRFClassifier -loadClassifier classifiers/english.all.3class.distsim.crf.ser.gz -textFile sample.txt
Here's an output option that will print out entities and their class to
the first two columns of a tab-separated columns output file:
java -mx600m -cp stanford-ner.jar:lib/* edu.stanford.nlp.ie.crf.CRFClassifier -loadClassifier classifiers/english.all.3class.distsim.crf.ser.gz -outputFormat tabbedEntities -textFile sample.txt > sample.tsv
When run from a jar file, you also have the option of using a serialized
classifier contained in the jar file.
USING FULL STANFORD CORENLP NER FUNCTIONALITY
This standalone distribution also allows access to the full NER
capabilities of the Stanford CoreNLP pipeline. These capabilities
can be accessed via the NERClassifierCombiner class.
NERClassifierCombiner allows for multiple CRFs to be used together,
and has options for recognizing numeric sequence patterns and time
patterns with the rule-based NER of SUTime.
Suppose one combines three CRF's CRF-1,CRF-2, and CRF-3 with the
NERClassifierCombiner. When the NERClassiferCombiner runs, it will
first apply the NER tags of CRF-1 to the text, then it will apply
CRF-2's NER tags to any tokens not tagged by CRF-1 and so on. If
the option ner.combinationMode is set to NORMAL (default), any label
applied by CRF-1 cannot be applied by subsequent CRF's. For instance
if CRF-1 applies the LOCATION tag, no other CRF's LOCATION tag will be
used. If ner.combinationMode is set to HIGH_RECALL, this limitation
will be deactivated.
To use NERClassifierCombiner at the command-line, the jars in lib
and stanford-ner.jar must be in the CLASSPATH. Here is an example command:
java -mx2g edu.stanford.nlp.ie.NERClassifierCombiner -ner.model \
classifiers/english.conll.4class.distsim.crf.ser.gz,classifiers/english.muc.7class.distsim.crf.ser.gz \
-ner.useSUTime false -textFile sample-w-time.txt
Let's break this down a bit. The flag "-ner.model" should be followed by a
list of CRF's to be combined by the NERClassifierCombiner. Some serialized
CRF's are provided in the classifiers directory. In this example the CRF's
trained on the CONLL 4 class data and the MUC 7 class data are being combined.
When the flag "-ner.useSUTime" is followed by "false", SUTime is shut off. You should
note that when the "false" is omitted, the text "4 days ago" suddenly is
tagged with DATE. These are the kinds of phrases SUTime can identify.
NERClassifierCombiner can be run on different types of input as well. Here is
an example which is run on CONLL style input:
java -mx2g edu.stanford.nlp.ie.NERClassifierCombiner -ner.model \
classifiers/english.conll.4class.distsim.crf.ser.gz,classifiers/english.muc.7class.distsim.crf.ser.gz \
-map word=0,answer=1 -testFile sample-conll-file.txt
It is crucial to include the "-map word=0,answer=1" , which is specifying that
the input test file has the words in the first column and the answer labels
in the second column.
It is also possible to serialize and load an NERClassifierCombiner.
This command loads the three sample crfs with combinationMode=HIGH_RECALL
and SUTime=false, and dumps them to a file named
test_serialized_ncc.ncc.ser.gz
java -mx2g edu.stanford.nlp.ie.NERClassifierCombiner -ner.model \
classifiers/english.conll.4class.distsim.crf.ser.gz,classifiers/english.muc.7class.distsim.crf.ser.gz,\
classifiers/english.all.3class.distsim.crf.ser.gz -ner.useSUTime false \
-ner.combinationMode HIGH_RECALL -serializeTo test.serialized.ncc.ncc.ser.gz
An example serialized NERClassifierCombiner with these settings is supplied in
the classifiers directory. Here is an example of loading that classifier and
running it on the sample CONLL data:
java -mx2g edu.stanford.nlp.ie.NERClassifierCombiner -loadClassifier \
classifiers/example.serialized.ncc.ncc.ser.gz -map word=0,answer=1 \
-testFile sample-conll-file.txt
For a more exhaustive description of NERClassifierCombiner go to
http://nlp.stanford.edu/software/ncc-faq.html
PROGRAMMATIC USE
The NERDemo file illustrates a couple of ways of calling the system
programatically. You should get the same results from
java -cp stanford-ner.jar:lib/*:. -mx300m NERDemo classifiers/english.all.3class.distsim.crf.ser.gz sample.txt
as from using CRFClassifier. For more information on API calls, look in
the enclosed javadoc directory: load index.html in a browser and look
first at the edu.stanford.nlp.ie.crf package and CRFClassifier class.
If you wish to train your own NER systems, look also at the
edu.stanford.nlp.ie package NERFeatureFactory class.
SERVER VERSION
The NER code may also be run as a server listening on a socket:
java -mx1000m -cp stanford-ner.jar:lib/* edu.stanford.nlp.ie.NERServer 1234
You can specify which model to load with flags, either one on disk:
java -mx1000m -cp stanford-ner.jar:lib/* edu.stanford.nlp.ie.NERServer -loadClassifier classifiers/all.3class.crf.ser.gz 1234
Or if you have put a model inside the jar file, as a resource under, say, models:
java -mx1000m -cp stanford-ner.jar:lib/* edu.stanford.nlp.ie.NERServer -loadClassifier models/all.3class.crf.ser.gz 1234
RUNNING CLASSIFIERS FROM INSIDE A JAR FILE
The software can run any serialized classifier from within a jar file by
following the -loadClassifier flag by some resource available within a
jar file on the CLASSPATH. An end user can make
their own jar files with the desired NER models contained inside.
This allows single jar file deployment.
PERFORMANCE GUIDELINES
Performance depends on many factors. Speed and memory use depend on
hardware, operating system, and JVM. Accuracy depends on the data
tested on. Nevertheless, in the belief that something is better than
nothing, here are some statistics from one machine on one test set, in
semi-realistic conditions (where the test data is somewhat varied).
ner-eng-ie.crf-3-all2006-distsim.ser.gz (older version of ner-eng-ie.crf-3-all2008-distsim.ser.gz)
Memory: 320MB (on a 32 bit machine)
PERSON ORGANIZATION LOCATION
91.88 82.91 88.21
--------------------
CHANGES
--------------------
2018-02-27 3.9.1 KBP ner models for Chinese and Spanish
2017-06-09 3.8.0 Updated for compatibility
2016-10-31 3.7.0 Improved Chinese NER
2015-12-09 3.6.0 Updated for compatibility
2015-04-20 3.5.2 synch standalone and CoreNLP functionality
2015-01-29 3.5.1 Substantial accuracy improvements
2014-10-26 3.5.0 Upgrade to Java 1.8
2014-08-27 3.4.1 Add Spanish models
2014-06-16 3.4 Fix serialization bug
2014-01-04 3.3.1 Bugfix release
2013-11-12 3.3.0 Update for compatibility
2013-11-12 3.3.0 Update for compatibility
2013-06-19 3.2.0 Improve handling of line-by-line input
2013-04-04 1.2.8 nthreads option
2012-11-11 1.2.7 Improved English 3 class model by including
data from Wikipedia, release Chinese model
2012-07-09 1.2.6 Minor bug fixes
2012-05-22 1.2.5 Fix encoding issue
2012-04-07 1.2.4 Caseless version of English models supported
2012-01-06 1.2.3 Minor bug fixes
2011-09-14 1.2.2 Improved thread safety
2011-06-19 1.2.1 Models reduced in size but on average improved
in accuracy (improved distsim clusters)
2011-05-16 1.2 Normal download includes 3, 4, and 7
class models. Updated for compatibility
with other software releases.
2009-01-16 1.1.1 Minor bug and usability fixes, changed API
2008-05-07 1.1 Additional feature flags, various code updates
2006-09-18 1.0 Initial release

View File

@ -0,0 +1,193 @@
<!-- build.xml file for ant for JavaNLP -->
<!-- A "project" describes a set of targets that may be requested
when Ant is executed. The "default" attribute defines the
target which is executed if no specific target is requested,
and the "basedir" attribute defines the current working directory
from which Ant executes the requested task. This is normally
set to the current working directory.
-->
<project name="JavaNLP" default="compile" basedir=".">
<property name="build.home" value="${basedir}/classes"/>
<property name="build.tests" value="${basedir}/classes"/>
<property name="docs.home" value="${basedir}/docs"/>
<property name="src.home" value="${basedir}/src"/>
<property name="javadoc.home" value="${basedir}/javadoc"/>
<!-- ==================== Compilation Control Options ==================== -->
<!--
These properties control option settings on the Javac compiler when it
is invoked using the <javac> task.
compile.debug Should compilation include the debug option?
compile.deprecation Should compilation include the deprecation option?
compile.optimize Should compilation include the optimize option?
compile.source Source version compatibility
compile.target Target class version compatibility
-->
<property name="compile.debug" value="true"/>
<property name="compile.deprecation" value="false"/>
<property name="compile.optimize" value="true"/>
<property name="compile.source" value="1.8" />
<property name="compile.target" value="1.8" />
<!-- ==================== All Target ====================================== -->
<!--
The "all" target is a shortcut for running the "clean" target followed
by the "compile" target, to force a complete recompile.
-->
<target name="all" depends="clean,compile"
description="Clean build and dist directories, then compile"/>
<!-- ==================== Clean Target ==================================== -->
<!--
The "clean" target deletes any previous "build" and "dist" directory,
so that you can be ensured the application can be built from scratch.
-->
<target name="clean" description="Delete old classes">
<delete dir="${build.home}/edu"/>
</target>
<!-- ==================== Classpath Targets ==================================== -->
<!--
Sets the classpath for this project properly. We now always use the
lib dir within javanlp.
-->
<target name="classpath" description="Sets the classpath">
<path id="compile.classpath">
<fileset dir="${basedir}/lib">
<include name="*.jar"/>
<exclude name="javanlp*"/>
</fileset>
</path>
</target>
<!-- ==================== Compile Target ================================== -->
<!--
The "compile" target transforms source files (from your "src" directory)
into object files in the appropriate location in the build directory.
This example assumes that you will be including your classes in an
unpacked directory hierarchy under "/WEB-INF/classes".
-->
<target name="compile" depends="prepare,classpath"
description="Compile Java sources">
<!-- Compile Java classes as necessary -->
<mkdir dir="${build.home}"/>
<javac srcdir="${src.home}"
destdir="${build.home}"
debug="${compile.debug}"
encoding="utf-8"
deprecation="${compile.deprecation}"
includeantruntime="false"
optimize="${compile.optimize}"
source="${compile.source}"
target="${compile.target}">
<classpath refid="compile.classpath"/>
<compilerarg value="-Xmaxerrs"/>
<compilerarg value="20"/>
<!-- <compilerarg value="-Xlint"/> -->
</javac>
<!-- Copy application resources -->
<!--
<copy todir="${build.home}/WEB-INF/classes">
<fileset dir="${src.home}" excludes="**/*.java"/>
</copy>
-->
</target>
<!-- ==================== Javadoc Target ================================== -->
<!--
The "javadoc" target creates Javadoc API documentation for the Java
classes included in your application. Normally, this is only required
when preparing a distribution release, but is available as a separate
target in case the developer wants to create Javadocs independently.
-->
<target name="javadoc" depends="compile"
description="Create Javadoc API documentation">
<mkdir dir="${javadoc.home}"/>
<javadoc sourcepath="${src.home}"
destdir="${javadoc.home}"
maxmemory="768m"
author="true"
source="${compile.source}"
Overview="${src.home}/edu/stanford/nlp/overview.html"
Doctitle="Stanford JavaNLP API Documentation"
Windowtitle="Stanford JavaNLP API"
packagenames="*">
<bottom><![CDATA[<FONT SIZE=2><A HREF=\"http://nlp.stanford.edu\">Stanford NLP Group</A></FONT>]]></bottom>
<link href="https://docs.oracle.com/javase/8/docs/api/"/>
<classpath>
<fileset dir="${basedir}/lib">
<include name="*.jar"/>
</fileset>
</classpath>
</javadoc>
</target>
<!-- ==================== Prepare Target ================================== -->
<!--
The "prepare" target is used to create the "build" destination directory,
and copy the static contents of your web application to it. If you need
to copy static files from external dependencies, you can customize the
contents of this task.
Normally, this task is executed indirectly when needed.
-->
<target name="prepare">
<!-- Create build directories as needed -->
<mkdir dir="${build.home}"/>
</target>
</project>

View File

@ -0,0 +1,58 @@
# trainFileList = /u/nlp/data/ner/column_data/all.3class.train.old2,/u/nlp/data/ner/column_data/english.extra.3class.train
trainFileList = /u/nlp/data/ner/column_data/ace23.3class.train,/u/nlp/data/ner/column_data/muc6.3class.ptb.train,/u/nlp/data/ner/column_data/muc7.3class.ptb.train,/u/nlp/data/ner/column_data/conll.3class.train,/u/nlp/data/ner/column_data/wikiner.3class.train,/u/nlp/data/ner/column_data/ontonotes.3class.train,/u/nlp/data/ner/column_data/english.extra.3class.train
testFile = /u/nlp/data/ner/column_data/all.3class.test
serializeTo = english.all.3class.distsim.crf.ser.gz
type = crf
wordFunction = edu.stanford.nlp.process.AmericanizeFunction
#distSimLexicon = /u/nlp/data/pos_tags_are_useless/englishGigaword.200.pruned
#distSimLexicon = /u/nlp/data/pos_tags_are_useless/egw.bnc.200
distSimLexicon = /u/nlp/data/pos_tags_are_useless/egw4-reut.512.clusters
# right options for egw4-reut.512 (though effect of having or not is small)
numberEquivalenceDistSim = true
unknownWordDistSimClass = 0
useDistSim = true
map = word=0,answer=1
saveFeatureIndexToDisk = true
useClassFeature=true
useWord=true
#useWordPairs=true
useNGrams=true
noMidNGrams=true
maxNGramLeng=6
usePrev=true
useNext=true
#useTags=true
#useWordTag=true
useLongSequences=true
useSequences=true
usePrevSequences=true
useTypeSeqs=true
useTypeSeqs2=true
useTypeySequences=true
useOccurrencePatterns=true
useLastRealWord=true
useNextRealWord=true
#useReverse=false
normalize=true
# normalizeTimex=true
wordShape=chris2useLC
useDisjunctive=true
disjunctionWidth=5
#useDisjunctiveShapeInteraction=true
maxLeft=1
readerAndWriter=edu.stanford.nlp.sequences.ColumnDocumentReaderAndWriter
useObservedSequencesOnly=true
useQN = true
QNsize = 25
# makes it go faster
featureDiffThresh=0.05

View File

@ -0,0 +1,63 @@
# This is better than Jenny's either with or without distsim turned on
# And using iob2 is better for optimal CoNLL performance.
# Features titled "chris2009"
trainFile = /u/nlp/data/ner/column_data/conll.4class.train
# testFile = /u/nlp/data/ner/column_data/conll.4class.testa
serializeTo = english.conll.4class.distsim.crf.ser.gz
wordFunction = edu.stanford.nlp.process.AmericanizeFunction
useDistSim = true
distSimLexicon = /u/nlp/data/pos_tags_are_useless/egw4-reut.512.clusters
# right options for egw4-reut.512 (though effect of having or not is small)
numberEquivalenceDistSim = true
unknownWordDistSimClass = 0
map = word=0,answer=1
saveFeatureIndexToDisk = true
useTitle = true
useClassFeature=true
useWord=true
# useWordPairs=true
useNGrams=true
noMidNGrams=true
# maxNGramLeng=6 # Having them all helps, which is the default
usePrev=true
useNext=true
# useTags=true
# useWordTag=true
useLongSequences=true
useSequences=true
usePrevSequences=true
maxLeft=1
useTypeSeqs=true
useTypeSeqs2=true
useTypeySequences=true
useOccurrencePatterns=true
useLastRealWord=true
useNextRealWord=true
#useReverse=false
normalize=true
# normalizeTimex=true
# dan2 better than chris2 on CoNLL data...
wordShape=dan2useLC
useDisjunctive=true
# disjunctionWidth 4 is better than 5 on CoNLL data
disjunctionWidth=4
#useDisjunctiveShapeInteraction=true
type=crf
readerAndWriter=edu.stanford.nlp.sequences.ColumnDocumentReaderAndWriter
useObservedSequencesOnly=true
sigma = 20
useQN = true
QNsize = 25
# makes it go faster
featureDiffThresh=0.05

View File

@ -0,0 +1,54 @@
trainFileList = /u/nlp/data/ner/column_data/muc6.ptb.train,/u/nlp/data/ner/column_data/muc7.ptb.train
# testFile = /u/nlp/data/ner/column_data/muc7.ptb.devtest
serializeTo = english.muc.7class.distsim.crf.ser.gz
type=crf
wordFunction = edu.stanford.nlp.process.AmericanizeFunction
distSimLexicon = /u/nlp/data/pos_tags_are_useless/egw4-reut.512.clusters
numberEquivalenceDistSim = true
unknownWordDistSimClass = 0
useDistSim = true
map = word=0,answer=1
saveFeatureIndexToDisk = true
useClassFeature=true
useWord=true
#useWordPairs=true
useNGrams=true
noMidNGrams=true
maxNGramLeng=6
usePrev=true
useNext=true
#useTags=true
#useWordTag=true
useLongSequences=true
useSequences=true
usePrevSequences=true
useTypeSeqs=true
useTypeSeqs2=true
useTypeySequences=true
useOccurrencePatterns=true
useLastRealWord=true
useNextRealWord=true
#useReverse=false
normalize=true
# normalizeTimex=true
wordShape=chris2useLC
useDisjunctive=true
disjunctionWidth=5
#useDisjunctiveShapeInteraction=true
maxLeft=1
readerAndWriter=edu.stanford.nlp.sequences.ColumnDocumentReaderAndWriter
useObservedSequencesOnly=true
useQN = true
QNsize = 25
# makes it go faster
featureDiffThresh=0.05

View File

@ -0,0 +1,4 @@
ner.model=classifiers/english.conll.4class.distsim.crf.ser.gz,classifiers/english.muc.7class.distsim.crf.ser.gz,classifiers/english.all.3class.distsim.crf.ser.gz
ner.useSUTime=false
ner.combinationMode=HIGH_RECALL
serializeTo=example.serialized.ncc.ncc.ser.gz

View File

@ -0,0 +1 @@
java -mx1500m -cp "stanford-ner.jar;lib/*" edu.stanford.nlp.ie.crf.NERGUI

View File

@ -0,0 +1,2 @@
#!/bin/sh
java -mx500m -cp `dirname $0`/stanford-ner.jar:`dirname $0`/lib/* edu.stanford.nlp.ie.crf.NERGUI

View File

@ -0,0 +1,4 @@
#!/bin/sh
scriptdir=`dirname $0`
java -mx700m -cp "$scriptdir/stanford-ner.jar:lib/*" edu.stanford.nlp.ie.crf.NERGUI

View File

@ -0,0 +1 @@
java -mx1000m -cp stanford-ner.jar;lib/* edu.stanford.nlp.ie.crf.CRFClassifier -loadClassifier classifiers\english.all.3class.distsim.crf.ser.gz -textFile %1

View File

@ -0,0 +1,4 @@
#!/bin/sh
scriptdir=`dirname $0`
java -mx700m -cp "$scriptdir/stanford-ner.jar:$scriptdir/lib/*" edu.stanford.nlp.ie.crf.CRFClassifier -loadClassifier $scriptdir/classifiers/english.all.3class.distsim.crf.ser.gz -textFile $1

View File

@ -0,0 +1,9 @@
John PERSON
Kerry PERSON
will O
fly O
to O
Paris LOCATION
this O
weekend O
. O

View File

@ -0,0 +1,2 @@
Barack Obama was born on August 4, 1961 in Honolulu, Hawaii which was 4 days
ago.

View File

@ -0,0 +1,2 @@
The/O fate/O of/O Lehman/ORGANIZATION Brothers/ORGANIZATION ,/O the/O beleaguered/O investment/O bank/O ,/O hung/O in/O the/O balance/O on/O Sunday/O as/O Federal/ORGANIZATION Reserve/ORGANIZATION officials/O and/O the/O leaders/O of/O major/O financial/O institutions/O continued/O to/O gather/O in/O emergency/O meetings/O trying/O to/O complete/O a/O plan/O to/O rescue/O the/O stricken/O bank/O ./O
Several/O possible/O plans/O emerged/O from/O the/O talks/O ,/O held/O at/O the/O Federal/ORGANIZATION Reserve/ORGANIZATION Bank/ORGANIZATION of/ORGANIZATION New/ORGANIZATION York/ORGANIZATION and/O led/O by/O Timothy/PERSON R./PERSON Geithner/PERSON ,/O the/O president/O of/O the/O New/ORGANIZATION York/ORGANIZATION Fed/ORGANIZATION ,/O and/O Treasury/ORGANIZATION Secretary/O Henry/PERSON M./PERSON Paulson/PERSON Jr./PERSON ./O

View File

@ -0,0 +1 @@
The fate of Lehman Brothers, the beleaguered investment bank, hung in the balance on Sunday as Federal Reserve officials and the leaders of major financial institutions continued to gather in emergency meetings trying to complete a plan to rescue the stricken bank. Several possible plans emerged from the talks, held at the Federal Reserve Bank of New York and led by Timothy R. Geithner, the president of the New York Fed, and Treasury Secretary Henry M. Paulson Jr.

View File

@ -0,0 +1,339 @@
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
<signature of Ty Coon>, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License.

View File

@ -0,0 +1,171 @@
import edu.stanford.nlp.ie.AbstractSequenceClassifier;
import edu.stanford.nlp.ie.crf.*;
import edu.stanford.nlp.io.IOUtils;
import edu.stanford.nlp.ling.CoreLabel;
import edu.stanford.nlp.ling.CoreAnnotations;
import edu.stanford.nlp.sequences.DocumentReaderAndWriter;
import edu.stanford.nlp.util.Triple;
import java.util.List;
/** This is a demo of calling CRFClassifier programmatically.
* <p>
* Usage: {@code java -mx400m -cp "*" NERDemo [serializedClassifier [fileName]] }
* <p>
* If arguments aren't specified, they default to
* classifiers/english.all.3class.distsim.crf.ser.gz and some hardcoded sample text.
* If run with arguments, it shows some of the ways to get k-best labelings and
* probabilities out with CRFClassifier. If run without arguments, it shows some of
* the alternative output formats that you can get.
* <p>
* To use CRFClassifier from the command line:
* </p><blockquote>
* {@code java -mx400m edu.stanford.nlp.ie.crf.CRFClassifier -loadClassifier [classifier] -textFile [file] }
* </blockquote><p>
* Or if the file is already tokenized and one word per line, perhaps in
* a tab-separated value format with extra columns for part-of-speech tag,
* etc., use the version below (note the 's' instead of the 'x'):
* </p><blockquote>
* {@code java -mx400m edu.stanford.nlp.ie.crf.CRFClassifier -loadClassifier [classifier] -testFile [file] }
* </blockquote>
*
* @author Jenny Finkel
* @author Christopher Manning
*/
public class NERDemo {
public static void main(String[] args) throws Exception {
String serializedClassifier = "classifiers/english.all.3class.distsim.crf.ser.gz";
if (args.length > 0) {
serializedClassifier = args[0];
}
AbstractSequenceClassifier<CoreLabel> classifier = CRFClassifier.getClassifier(serializedClassifier);
/* For either a file to annotate or for the hardcoded text example, this
demo file shows several ways to process the input, for teaching purposes.
*/
if (args.length > 1) {
/* For the file, it shows (1) how to run NER on a String, (2) how
to get the entities in the String with character offsets, and
(3) how to run NER on a whole file (without loading it into a String).
*/
String fileContents = IOUtils.slurpFile(args[1]);
List<List<CoreLabel>> out = classifier.classify(fileContents);
for (List<CoreLabel> sentence : out) {
for (CoreLabel word : sentence) {
System.out.print(word.word() + '/' + word.get(CoreAnnotations.AnswerAnnotation.class) + ' ');
}
System.out.println();
}
System.out.println("---");
out = classifier.classifyFile(args[1]);
for (List<CoreLabel> sentence : out) {
for (CoreLabel word : sentence) {
System.out.print(word.word() + '/' + word.get(CoreAnnotations.AnswerAnnotation.class) + ' ');
}
System.out.println();
}
System.out.println("---");
List<Triple<String, Integer, Integer>> list = classifier.classifyToCharacterOffsets(fileContents);
for (Triple<String, Integer, Integer> item : list) {
System.out.println(item.first() + ": " + fileContents.substring(item.second(), item.third()));
}
System.out.println("---");
System.out.println("Ten best entity labelings");
DocumentReaderAndWriter<CoreLabel> readerAndWriter = classifier.makePlainTextReaderAndWriter();
classifier.classifyAndWriteAnswersKBest(args[1], 10, readerAndWriter);
System.out.println("---");
System.out.println("Per-token marginalized probabilities");
classifier.printProbs(args[1], readerAndWriter);
// -- This code prints out the first order (token pair) clique probabilities.
// -- But that output is a bit overwhelming, so we leave it commented out by default.
// System.out.println("---");
// System.out.println("First Order Clique Probabilities");
// ((CRFClassifier) classifier).printFirstOrderProbs(args[1], readerAndWriter);
} else {
/* For the hard-coded String, it shows how to run it on a single
sentence, and how to do this and produce several formats, including
slash tags and an inline XML output format. It also shows the full
contents of the {@code CoreLabel}s that are constructed by the
classifier. And it shows getting out the probabilities of different
assignments and an n-best list of classifications with probabilities.
*/
String[] example = {"Good afternoon Rajat Raina, how are you today?",
"I go to school at Stanford University, which is located in California." };
for (String str : example) {
System.out.println(classifier.classifyToString(str));
}
System.out.println("---");
for (String str : example) {
// This one puts in spaces and newlines between tokens, so just print not println.
System.out.print(classifier.classifyToString(str, "slashTags", false));
}
System.out.println("---");
for (String str : example) {
// This one is best for dealing with the output as a TSV (tab-separated column) file.
// The first column gives entities, the second their classes, and the third the remaining text in a document
System.out.print(classifier.classifyToString(str, "tabbedEntities", false));
}
System.out.println("---");
for (String str : example) {
System.out.println(classifier.classifyWithInlineXML(str));
}
System.out.println("---");
for (String str : example) {
System.out.println(classifier.classifyToString(str, "xml", true));
}
System.out.println("---");
for (String str : example) {
System.out.print(classifier.classifyToString(str, "tsv", false));
}
System.out.println("---");
// This gets out entities with character offsets
int j = 0;
for (String str : example) {
j++;
List<Triple<String,Integer,Integer>> triples = classifier.classifyToCharacterOffsets(str);
for (Triple<String,Integer,Integer> trip : triples) {
System.out.printf("%s over character offsets [%d, %d) in sentence %d.%n",
trip.first(), trip.second(), trip.third, j);
}
}
System.out.println("---");
// This prints out all the details of what is stored for each token
int i=0;
for (String str : example) {
for (List<CoreLabel> lcl : classifier.classify(str)) {
for (CoreLabel cl : lcl) {
System.out.print(i++ + ": ");
System.out.println(cl.toShorterString());
}
}
}
System.out.println("---");
}
}
}

View File

@ -0,0 +1,289 @@
Stanford NER - v3.9.1 - 2018-02-27
----------------------------------------------
This package provides a high-performance machine learning based named
entity recognition system, including facilities to train models from
supervised training data and pre-trained models for English.
(c) 2002-2015. The Board of Trustees of The Leland
Stanford Junior University. All Rights Reserved.
Original CRF code by Jenny Finkel.
Additional modules, features, internationalization, compaction, and
support code by Christopher Manning, Dan Klein, Christopher Cox, Huy Nguyen
Shipra Dingare, Anna Rafferty, and John Bauer.
This release prepared by Jason Bolton.
LICENSE
The software is licensed under the full GPL v2+. Please see the file LICENCE.txt
For more information, bug reports, and fixes, contact:
Christopher Manning
Dept of Computer Science, Gates 2A
Stanford CA 94305-9020
USA
java-nlp-support@lists.stanford.edu
https://nlp.stanford.edu/software/CRF-NER.html
CONTACT
For questions about this distribution, please contact Stanford's JavaNLP group
at java-nlp-user@lists.stanford.edu. We provide assistance on a best-effort
basis.
TUTORIAL
Quickstart guidelines, primarily for end users who wish to use the included NER
models, are below. For further instructions on training your own NER model,
go to https://nlp.stanford.edu/software/crf-faq.html.
INCLUDED SERIALIZED MODELS / TRAINING DATA
The basic included serialized model is a 3 class NER tagger that can
label: PERSON, ORGANIZATION, and LOCATION entities. It is included as
english.all.3class.distsim.crf.ser.gz. It is trained on data from
CoNLL, MUC6, MUC7, ACE, OntoNotes, and Wikipedia.
Because this model is trained on both US
and UK newswire, it is fairly robust across the two domains.
We have also included a 4 class NER tagger trained on the CoNLL 2003
Shared Task training data that labels for PERSON, ORGANIZATION,
LOCATION, and MISC. It is named
english.conll.4class.distsim.crf.ser.gz .
A third model is trained only on data from MUC and
distinguishes between 7 different classes:
english.muc.7class.distsim.crf.ser.gz.
All of the serialized classifiers come in two versions, one trained to
basically expected standard written English capitalization, and the other
to ignore capitalization information. The case-insensitive versions
of the three models available on the Stanford NER webpage.
These models use a distributional similarity lexicon to improve performance
(by between 1.5%-3% F-measure). The distributional similarity features
make the models perform substantially better, but they require rather
more memory. The distsim models are included in the release package.
The nodistsim versions of the same models may be available on the
Stanford NER webpage.
Finally, we have models for other languages, including two German models,
a Chinese model, and a Spanish model. The files for these models can be
found at:
http://nlp.stanford.edu/software/CRF-NER.html
QUICKSTART INSTRUCTIONS
This NER system requires Java 1.8 or later.
Providing java is on your PATH, you should be able to run an NER GUI
demonstration by just clicking. It might work to double-click on the
stanford-ner.jar archive but this may well fail as the operating system
does not give Java enough memory for our NER system, so it is safer to
instead double click on the ner-gui.bat icon (Windows) or ner-gui.sh
(Linux/Unix/MacOSX). Then, using the top option from the Classifier
menu, load a CRF classifier from the classifiers directory of the
distribution. You can then `either load a text file or web page from
the File menu, or decide to use the default text in the window. Finally,
you can now named entity tag the text by pressing the Run NER button.
From a command line, you need to have java on your PATH and the
stanford-ner.jar file and the lib directory in your CLASSPATH. (The way of doing this depends on
your OS/shell.) The supplied ner.bat and ner.sh should work to allow
you to tag a single file. For example, for Windows:
ner file
Or on Unix/Linux you should be able to parse the test file in the distribution
directory with the command:
java -mx600m -cp stanford-ner.jar:lib/* edu.stanford.nlp.ie.crf.CRFClassifier -loadClassifier classifiers/english.all.3class.distsim.crf.ser.gz -textFile sample.txt
Here's an output option that will print out entities and their class to
the first two columns of a tab-separated columns output file:
java -mx600m -cp stanford-ner.jar:lib/* edu.stanford.nlp.ie.crf.CRFClassifier -loadClassifier classifiers/english.all.3class.distsim.crf.ser.gz -outputFormat tabbedEntities -textFile sample.txt > sample.tsv
When run from a jar file, you also have the option of using a serialized
classifier contained in the jar file.
USING FULL STANFORD CORENLP NER FUNCTIONALITY
This standalone distribution also allows access to the full NER
capabilities of the Stanford CoreNLP pipeline. These capabilities
can be accessed via the NERClassifierCombiner class.
NERClassifierCombiner allows for multiple CRFs to be used together,
and has options for recognizing numeric sequence patterns and time
patterns with the rule-based NER of SUTime.
Suppose one combines three CRF's CRF-1,CRF-2, and CRF-3 with the
NERClassifierCombiner. When the NERClassiferCombiner runs, it will
first apply the NER tags of CRF-1 to the text, then it will apply
CRF-2's NER tags to any tokens not tagged by CRF-1 and so on. If
the option ner.combinationMode is set to NORMAL (default), any label
applied by CRF-1 cannot be applied by subsequent CRF's. For instance
if CRF-1 applies the LOCATION tag, no other CRF's LOCATION tag will be
used. If ner.combinationMode is set to HIGH_RECALL, this limitation
will be deactivated.
To use NERClassifierCombiner at the command-line, the jars in lib
and stanford-ner.jar must be in the CLASSPATH. Here is an example command:
java -mx2g edu.stanford.nlp.ie.NERClassifierCombiner -ner.model \
classifiers/english.conll.4class.distsim.crf.ser.gz,classifiers/english.muc.7class.distsim.crf.ser.gz \
-ner.useSUTime false -textFile sample-w-time.txt
Let's break this down a bit. The flag "-ner.model" should be followed by a
list of CRF's to be combined by the NERClassifierCombiner. Some serialized
CRF's are provided in the classifiers directory. In this example the CRF's
trained on the CONLL 4 class data and the MUC 7 class data are being combined.
When the flag "-ner.useSUTime" is followed by "false", SUTime is shut off. You should
note that when the "false" is omitted, the text "4 days ago" suddenly is
tagged with DATE. These are the kinds of phrases SUTime can identify.
NERClassifierCombiner can be run on different types of input as well. Here is
an example which is run on CONLL style input:
java -mx2g edu.stanford.nlp.ie.NERClassifierCombiner -ner.model \
classifiers/english.conll.4class.distsim.crf.ser.gz,classifiers/english.muc.7class.distsim.crf.ser.gz \
-map word=0,answer=1 -testFile sample-conll-file.txt
It is crucial to include the "-map word=0,answer=1" , which is specifying that
the input test file has the words in the first column and the answer labels
in the second column.
It is also possible to serialize and load an NERClassifierCombiner.
This command loads the three sample crfs with combinationMode=HIGH_RECALL
and SUTime=false, and dumps them to a file named
test_serialized_ncc.ncc.ser.gz
java -mx2g edu.stanford.nlp.ie.NERClassifierCombiner -ner.model \
classifiers/english.conll.4class.distsim.crf.ser.gz,classifiers/english.muc.7class.distsim.crf.ser.gz,\
classifiers/english.all.3class.distsim.crf.ser.gz -ner.useSUTime false \
-ner.combinationMode HIGH_RECALL -serializeTo test.serialized.ncc.ncc.ser.gz
An example serialized NERClassifierCombiner with these settings is supplied in
the classifiers directory. Here is an example of loading that classifier and
running it on the sample CONLL data:
java -mx2g edu.stanford.nlp.ie.NERClassifierCombiner -loadClassifier \
classifiers/example.serialized.ncc.ncc.ser.gz -map word=0,answer=1 \
-testFile sample-conll-file.txt
For a more exhaustive description of NERClassifierCombiner go to
http://nlp.stanford.edu/software/ncc-faq.html
PROGRAMMATIC USE
The NERDemo file illustrates a couple of ways of calling the system
programatically. You should get the same results from
java -cp stanford-ner.jar:lib/*:. -mx300m NERDemo classifiers/english.all.3class.distsim.crf.ser.gz sample.txt
as from using CRFClassifier. For more information on API calls, look in
the enclosed javadoc directory: load index.html in a browser and look
first at the edu.stanford.nlp.ie.crf package and CRFClassifier class.
If you wish to train your own NER systems, look also at the
edu.stanford.nlp.ie package NERFeatureFactory class.
SERVER VERSION
The NER code may also be run as a server listening on a socket:
java -mx1000m -cp stanford-ner.jar:lib/* edu.stanford.nlp.ie.NERServer 1234
You can specify which model to load with flags, either one on disk:
java -mx1000m -cp stanford-ner.jar:lib/* edu.stanford.nlp.ie.NERServer -loadClassifier classifiers/all.3class.crf.ser.gz 1234
Or if you have put a model inside the jar file, as a resource under, say, models:
java -mx1000m -cp stanford-ner.jar:lib/* edu.stanford.nlp.ie.NERServer -loadClassifier models/all.3class.crf.ser.gz 1234
RUNNING CLASSIFIERS FROM INSIDE A JAR FILE
The software can run any serialized classifier from within a jar file by
following the -loadClassifier flag by some resource available within a
jar file on the CLASSPATH. An end user can make
their own jar files with the desired NER models contained inside.
This allows single jar file deployment.
PERFORMANCE GUIDELINES
Performance depends on many factors. Speed and memory use depend on
hardware, operating system, and JVM. Accuracy depends on the data
tested on. Nevertheless, in the belief that something is better than
nothing, here are some statistics from one machine on one test set, in
semi-realistic conditions (where the test data is somewhat varied).
ner-eng-ie.crf-3-all2006-distsim.ser.gz (older version of ner-eng-ie.crf-3-all2008-distsim.ser.gz)
Memory: 320MB (on a 32 bit machine)
PERSON ORGANIZATION LOCATION
91.88 82.91 88.21
--------------------
CHANGES
--------------------
2018-02-27 3.9.1 KBP ner models for Chinese and Spanish
2017-06-09 3.8.0 Updated for compatibility
2016-10-31 3.7.0 Improved Chinese NER
2015-12-09 3.6.0 Updated for compatibility
2015-04-20 3.5.2 synch standalone and CoreNLP functionality
2015-01-29 3.5.1 Substantial accuracy improvements
2014-10-26 3.5.0 Upgrade to Java 1.8
2014-08-27 3.4.1 Add Spanish models
2014-06-16 3.4 Fix serialization bug
2014-01-04 3.3.1 Bugfix release
2013-11-12 3.3.0 Update for compatibility
2013-11-12 3.3.0 Update for compatibility
2013-06-19 3.2.0 Improve handling of line-by-line input
2013-04-04 1.2.8 nthreads option
2012-11-11 1.2.7 Improved English 3 class model by including
data from Wikipedia, release Chinese model
2012-07-09 1.2.6 Minor bug fixes
2012-05-22 1.2.5 Fix encoding issue
2012-04-07 1.2.4 Caseless version of English models supported
2012-01-06 1.2.3 Minor bug fixes
2011-09-14 1.2.2 Improved thread safety
2011-06-19 1.2.1 Models reduced in size but on average improved
in accuracy (improved distsim clusters)
2011-05-16 1.2 Normal download includes 3, 4, and 7
class models. Updated for compatibility
with other software releases.
2009-01-16 1.1.1 Minor bug and usability fixes, changed API
2008-05-07 1.1 Additional feature flags, various code updates
2006-09-18 1.0 Initial release

View File

@ -0,0 +1,193 @@
<!-- build.xml file for ant for JavaNLP -->
<!-- A "project" describes a set of targets that may be requested
when Ant is executed. The "default" attribute defines the
target which is executed if no specific target is requested,
and the "basedir" attribute defines the current working directory
from which Ant executes the requested task. This is normally
set to the current working directory.
-->
<project name="JavaNLP" default="compile" basedir=".">
<property name="build.home" value="${basedir}/classes"/>
<property name="build.tests" value="${basedir}/classes"/>
<property name="docs.home" value="${basedir}/docs"/>
<property name="src.home" value="${basedir}/src"/>
<property name="javadoc.home" value="${basedir}/javadoc"/>
<!-- ==================== Compilation Control Options ==================== -->
<!--
These properties control option settings on the Javac compiler when it
is invoked using the <javac> task.
compile.debug Should compilation include the debug option?
compile.deprecation Should compilation include the deprecation option?
compile.optimize Should compilation include the optimize option?
compile.source Source version compatibility
compile.target Target class version compatibility
-->
<property name="compile.debug" value="true"/>
<property name="compile.deprecation" value="false"/>
<property name="compile.optimize" value="true"/>
<property name="compile.source" value="1.8" />
<property name="compile.target" value="1.8" />
<!-- ==================== All Target ====================================== -->
<!--
The "all" target is a shortcut for running the "clean" target followed
by the "compile" target, to force a complete recompile.
-->
<target name="all" depends="clean,compile"
description="Clean build and dist directories, then compile"/>
<!-- ==================== Clean Target ==================================== -->
<!--
The "clean" target deletes any previous "build" and "dist" directory,
so that you can be ensured the application can be built from scratch.
-->
<target name="clean" description="Delete old classes">
<delete dir="${build.home}/edu"/>
</target>
<!-- ==================== Classpath Targets ==================================== -->
<!--
Sets the classpath for this project properly. We now always use the
lib dir within javanlp.
-->
<target name="classpath" description="Sets the classpath">
<path id="compile.classpath">
<fileset dir="${basedir}/lib">
<include name="*.jar"/>
<exclude name="javanlp*"/>
</fileset>
</path>
</target>
<!-- ==================== Compile Target ================================== -->
<!--
The "compile" target transforms source files (from your "src" directory)
into object files in the appropriate location in the build directory.
This example assumes that you will be including your classes in an
unpacked directory hierarchy under "/WEB-INF/classes".
-->
<target name="compile" depends="prepare,classpath"
description="Compile Java sources">
<!-- Compile Java classes as necessary -->
<mkdir dir="${build.home}"/>
<javac srcdir="${src.home}"
destdir="${build.home}"
debug="${compile.debug}"
encoding="utf-8"
deprecation="${compile.deprecation}"
includeantruntime="false"
optimize="${compile.optimize}"
source="${compile.source}"
target="${compile.target}">
<classpath refid="compile.classpath"/>
<compilerarg value="-Xmaxerrs"/>
<compilerarg value="20"/>
<!-- <compilerarg value="-Xlint"/> -->
</javac>
<!-- Copy application resources -->
<!--
<copy todir="${build.home}/WEB-INF/classes">
<fileset dir="${src.home}" excludes="**/*.java"/>
</copy>
-->
</target>
<!-- ==================== Javadoc Target ================================== -->
<!--
The "javadoc" target creates Javadoc API documentation for the Java
classes included in your application. Normally, this is only required
when preparing a distribution release, but is available as a separate
target in case the developer wants to create Javadocs independently.
-->
<target name="javadoc" depends="compile"
description="Create Javadoc API documentation">
<mkdir dir="${javadoc.home}"/>
<javadoc sourcepath="${src.home}"
destdir="${javadoc.home}"
maxmemory="768m"
author="true"
source="${compile.source}"
Overview="${src.home}/edu/stanford/nlp/overview.html"
Doctitle="Stanford JavaNLP API Documentation"
Windowtitle="Stanford JavaNLP API"
packagenames="*">
<bottom><![CDATA[<FONT SIZE=2><A HREF=\"http://nlp.stanford.edu\">Stanford NLP Group</A></FONT>]]></bottom>
<link href="https://docs.oracle.com/javase/8/docs/api/"/>
<classpath>
<fileset dir="${basedir}/lib">
<include name="*.jar"/>
</fileset>
</classpath>
</javadoc>
</target>
<!-- ==================== Prepare Target ================================== -->
<!--
The "prepare" target is used to create the "build" destination directory,
and copy the static contents of your web application to it. If you need
to copy static files from external dependencies, you can customize the
contents of this task.
Normally, this task is executed indirectly when needed.
-->
<target name="prepare">
<!-- Create build directories as needed -->
<mkdir dir="${build.home}"/>
</target>
</project>

View File

@ -0,0 +1,58 @@
# trainFileList = /u/nlp/data/ner/column_data/all.3class.train.old2,/u/nlp/data/ner/column_data/english.extra.3class.train
trainFileList = /u/nlp/data/ner/column_data/ace23.3class.train,/u/nlp/data/ner/column_data/muc6.3class.ptb.train,/u/nlp/data/ner/column_data/muc7.3class.ptb.train,/u/nlp/data/ner/column_data/conll.3class.train,/u/nlp/data/ner/column_data/wikiner.3class.train,/u/nlp/data/ner/column_data/ontonotes.3class.train,/u/nlp/data/ner/column_data/english.extra.3class.train
testFile = /u/nlp/data/ner/column_data/all.3class.test
serializeTo = english.all.3class.distsim.crf.ser.gz
type = crf
wordFunction = edu.stanford.nlp.process.AmericanizeFunction
#distSimLexicon = /u/nlp/data/pos_tags_are_useless/englishGigaword.200.pruned
#distSimLexicon = /u/nlp/data/pos_tags_are_useless/egw.bnc.200
distSimLexicon = /u/nlp/data/pos_tags_are_useless/egw4-reut.512.clusters
# right options for egw4-reut.512 (though effect of having or not is small)
numberEquivalenceDistSim = true
unknownWordDistSimClass = 0
useDistSim = true
map = word=0,answer=1
saveFeatureIndexToDisk = true
useClassFeature=true
useWord=true
#useWordPairs=true
useNGrams=true
noMidNGrams=true
maxNGramLeng=6
usePrev=true
useNext=true
#useTags=true
#useWordTag=true
useLongSequences=true
useSequences=true
usePrevSequences=true
useTypeSeqs=true
useTypeSeqs2=true
useTypeySequences=true
useOccurrencePatterns=true
useLastRealWord=true
useNextRealWord=true
#useReverse=false
normalize=true
# normalizeTimex=true
wordShape=chris2useLC
useDisjunctive=true
disjunctionWidth=5
#useDisjunctiveShapeInteraction=true
maxLeft=1
readerAndWriter=edu.stanford.nlp.sequences.ColumnDocumentReaderAndWriter
useObservedSequencesOnly=true
useQN = true
QNsize = 25
# makes it go faster
featureDiffThresh=0.05

View File

@ -0,0 +1,63 @@
# This is better than Jenny's either with or without distsim turned on
# And using iob2 is better for optimal CoNLL performance.
# Features titled "chris2009"
trainFile = /u/nlp/data/ner/column_data/conll.4class.train
# testFile = /u/nlp/data/ner/column_data/conll.4class.testa
serializeTo = english.conll.4class.distsim.crf.ser.gz
wordFunction = edu.stanford.nlp.process.AmericanizeFunction
useDistSim = true
distSimLexicon = /u/nlp/data/pos_tags_are_useless/egw4-reut.512.clusters
# right options for egw4-reut.512 (though effect of having or not is small)
numberEquivalenceDistSim = true
unknownWordDistSimClass = 0
map = word=0,answer=1
saveFeatureIndexToDisk = true
useTitle = true
useClassFeature=true
useWord=true
# useWordPairs=true
useNGrams=true
noMidNGrams=true
# maxNGramLeng=6 # Having them all helps, which is the default
usePrev=true
useNext=true
# useTags=true
# useWordTag=true
useLongSequences=true
useSequences=true
usePrevSequences=true
maxLeft=1
useTypeSeqs=true
useTypeSeqs2=true
useTypeySequences=true
useOccurrencePatterns=true
useLastRealWord=true
useNextRealWord=true
#useReverse=false
normalize=true
# normalizeTimex=true
# dan2 better than chris2 on CoNLL data...
wordShape=dan2useLC
useDisjunctive=true
# disjunctionWidth 4 is better than 5 on CoNLL data
disjunctionWidth=4
#useDisjunctiveShapeInteraction=true
type=crf
readerAndWriter=edu.stanford.nlp.sequences.ColumnDocumentReaderAndWriter
useObservedSequencesOnly=true
sigma = 20
useQN = true
QNsize = 25
# makes it go faster
featureDiffThresh=0.05

View File

@ -0,0 +1,54 @@
trainFileList = /u/nlp/data/ner/column_data/muc6.ptb.train,/u/nlp/data/ner/column_data/muc7.ptb.train
# testFile = /u/nlp/data/ner/column_data/muc7.ptb.devtest
serializeTo = english.muc.7class.distsim.crf.ser.gz
type=crf
wordFunction = edu.stanford.nlp.process.AmericanizeFunction
distSimLexicon = /u/nlp/data/pos_tags_are_useless/egw4-reut.512.clusters
numberEquivalenceDistSim = true
unknownWordDistSimClass = 0
useDistSim = true
map = word=0,answer=1
saveFeatureIndexToDisk = true
useClassFeature=true
useWord=true
#useWordPairs=true
useNGrams=true
noMidNGrams=true
maxNGramLeng=6
usePrev=true
useNext=true
#useTags=true
#useWordTag=true
useLongSequences=true
useSequences=true
usePrevSequences=true
useTypeSeqs=true
useTypeSeqs2=true
useTypeySequences=true
useOccurrencePatterns=true
useLastRealWord=true
useNextRealWord=true
#useReverse=false
normalize=true
# normalizeTimex=true
wordShape=chris2useLC
useDisjunctive=true
disjunctionWidth=5
#useDisjunctiveShapeInteraction=true
maxLeft=1
readerAndWriter=edu.stanford.nlp.sequences.ColumnDocumentReaderAndWriter
useObservedSequencesOnly=true
useQN = true
QNsize = 25
# makes it go faster
featureDiffThresh=0.05

View File

@ -0,0 +1,4 @@
ner.model=classifiers/english.conll.4class.distsim.crf.ser.gz,classifiers/english.muc.7class.distsim.crf.ser.gz,classifiers/english.all.3class.distsim.crf.ser.gz
ner.useSUTime=false
ner.combinationMode=HIGH_RECALL
serializeTo=example.serialized.ncc.ncc.ser.gz

View File

@ -0,0 +1 @@
java -mx1500m -cp "stanford-ner.jar;lib/*" edu.stanford.nlp.ie.crf.NERGUI

View File

@ -0,0 +1,2 @@
#!/bin/sh
java -mx500m -cp `dirname $0`/stanford-ner.jar:`dirname $0`/lib/* edu.stanford.nlp.ie.crf.NERGUI

View File

@ -0,0 +1,4 @@
#!/bin/sh
scriptdir=`dirname $0`
java -mx700m -cp "$scriptdir/stanford-ner.jar:lib/*" edu.stanford.nlp.ie.crf.NERGUI

View File

@ -0,0 +1 @@
java -mx1000m -cp stanford-ner.jar;lib/* edu.stanford.nlp.ie.crf.CRFClassifier -loadClassifier classifiers\english.all.3class.distsim.crf.ser.gz -textFile %1

View File

@ -0,0 +1,4 @@
#!/bin/sh
scriptdir=`dirname $0`
java -mx700m -cp "$scriptdir/stanford-ner.jar:$scriptdir/lib/*" edu.stanford.nlp.ie.crf.CRFClassifier -loadClassifier $scriptdir/classifiers/english.all.3class.distsim.crf.ser.gz -textFile $1

View File

@ -0,0 +1,9 @@
John PERSON
Kerry PERSON
will O
fly O
to O
Paris LOCATION
this O
weekend O
. O

View File

@ -0,0 +1,2 @@
Barack Obama was born on August 4, 1961 in Honolulu, Hawaii which was 4 days
ago.

View File

@ -0,0 +1,2 @@
The/O fate/O of/O Lehman/ORGANIZATION Brothers/ORGANIZATION ,/O the/O beleaguered/O investment/O bank/O ,/O hung/O in/O the/O balance/O on/O Sunday/O as/O Federal/ORGANIZATION Reserve/ORGANIZATION officials/O and/O the/O leaders/O of/O major/O financial/O institutions/O continued/O to/O gather/O in/O emergency/O meetings/O trying/O to/O complete/O a/O plan/O to/O rescue/O the/O stricken/O bank/O ./O
Several/O possible/O plans/O emerged/O from/O the/O talks/O ,/O held/O at/O the/O Federal/ORGANIZATION Reserve/ORGANIZATION Bank/ORGANIZATION of/ORGANIZATION New/ORGANIZATION York/ORGANIZATION and/O led/O by/O Timothy/PERSON R./PERSON Geithner/PERSON ,/O the/O president/O of/O the/O New/ORGANIZATION York/ORGANIZATION Fed/ORGANIZATION ,/O and/O Treasury/ORGANIZATION Secretary/O Henry/PERSON M./PERSON Paulson/PERSON Jr./PERSON ./O

View File

@ -0,0 +1 @@
The fate of Lehman Brothers, the beleaguered investment bank, hung in the balance on Sunday as Federal Reserve officials and the leaders of major financial institutions continued to gather in emergency meetings trying to complete a plan to rescue the stricken bank. Several possible plans emerged from the talks, held at the Federal Reserve Bank of New York and led by Timothy R. Geithner, the president of the New York Fed, and Treasury Secretary Henry M. Paulson Jr.

View File

@ -0,0 +1,58 @@
# trainFileList = /u/nlp/data/ner/column_data/all.3class.train.old2,/u/nlp/data/ner/column_data/english.extra.3class.train
trainFileList = /u/nlp/data/ner/column_data/ace23.3class.train,/u/nlp/data/ner/column_data/muc6.3class.ptb.train,/u/nlp/data/ner/column_data/muc7.3class.ptb.train,/u/nlp/data/ner/column_data/conll.3class.train,/u/nlp/data/ner/column_data/wikiner.3class.train,/u/nlp/data/ner/column_data/ontonotes.3class.train,/u/nlp/data/ner/column_data/english.extra.3class.train
testFile = /u/nlp/data/ner/column_data/all.3class.test
serializeTo = english.all.3class.distsim.crf.ser.gz
type = crf
wordFunction = edu.stanford.nlp.process.AmericanizeFunction
#distSimLexicon = /u/nlp/data/pos_tags_are_useless/englishGigaword.200.pruned
#distSimLexicon = /u/nlp/data/pos_tags_are_useless/egw.bnc.200
distSimLexicon = /u/nlp/data/pos_tags_are_useless/egw4-reut.512.clusters
# right options for egw4-reut.512 (though effect of having or not is small)
numberEquivalenceDistSim = true
unknownWordDistSimClass = 0
useDistSim = true
map = word=0,answer=1
saveFeatureIndexToDisk = true
useClassFeature=true
useWord=true
#useWordPairs=true
useNGrams=true
noMidNGrams=true
maxNGramLeng=6
usePrev=true
useNext=true
#useTags=true
#useWordTag=true
useLongSequences=true
useSequences=true
usePrevSequences=true
useTypeSeqs=true
useTypeSeqs2=true
useTypeySequences=true
useOccurrencePatterns=true
useLastRealWord=true
useNextRealWord=true
#useReverse=false
normalize=true
# normalizeTimex=true
wordShape=chris2useLC
useDisjunctive=true
disjunctionWidth=5
#useDisjunctiveShapeInteraction=true
maxLeft=1
readerAndWriter=edu.stanford.nlp.sequences.ColumnDocumentReaderAndWriter
useObservedSequencesOnly=true
useQN = true
QNsize = 25
# makes it go faster
featureDiffThresh=0.05

View File

@ -0,0 +1,63 @@
# This is better than Jenny's either with or without distsim turned on
# And using iob2 is better for optimal CoNLL performance.
# Features titled "chris2009"
trainFile = /u/nlp/data/ner/column_data/conll.4class.train
# testFile = /u/nlp/data/ner/column_data/conll.4class.testa
serializeTo = english.conll.4class.distsim.crf.ser.gz
wordFunction = edu.stanford.nlp.process.AmericanizeFunction
useDistSim = true
distSimLexicon = /u/nlp/data/pos_tags_are_useless/egw4-reut.512.clusters
# right options for egw4-reut.512 (though effect of having or not is small)
numberEquivalenceDistSim = true
unknownWordDistSimClass = 0
map = word=0,answer=1
saveFeatureIndexToDisk = true
useTitle = true
useClassFeature=true
useWord=true
# useWordPairs=true
useNGrams=true
noMidNGrams=true
# maxNGramLeng=6 # Having them all helps, which is the default
usePrev=true
useNext=true
# useTags=true
# useWordTag=true
useLongSequences=true
useSequences=true
usePrevSequences=true
maxLeft=1
useTypeSeqs=true
useTypeSeqs2=true
useTypeySequences=true
useOccurrencePatterns=true
useLastRealWord=true
useNextRealWord=true
#useReverse=false
normalize=true
# normalizeTimex=true
# dan2 better than chris2 on CoNLL data...
wordShape=dan2useLC
useDisjunctive=true
# disjunctionWidth 4 is better than 5 on CoNLL data
disjunctionWidth=4
#useDisjunctiveShapeInteraction=true
type=crf
readerAndWriter=edu.stanford.nlp.sequences.ColumnDocumentReaderAndWriter
useObservedSequencesOnly=true
sigma = 20
useQN = true
QNsize = 25
# makes it go faster
featureDiffThresh=0.05

View File

@ -0,0 +1,54 @@
trainFileList = /u/nlp/data/ner/column_data/muc6.ptb.train,/u/nlp/data/ner/column_data/muc7.ptb.train
# testFile = /u/nlp/data/ner/column_data/muc7.ptb.devtest
serializeTo = english.muc.7class.distsim.crf.ser.gz
type=crf
wordFunction = edu.stanford.nlp.process.AmericanizeFunction
distSimLexicon = /u/nlp/data/pos_tags_are_useless/egw4-reut.512.clusters
numberEquivalenceDistSim = true
unknownWordDistSimClass = 0
useDistSim = true
map = word=0,answer=1
saveFeatureIndexToDisk = true
useClassFeature=true
useWord=true
#useWordPairs=true
useNGrams=true
noMidNGrams=true
maxNGramLeng=6
usePrev=true
useNext=true
#useTags=true
#useWordTag=true
useLongSequences=true
useSequences=true
usePrevSequences=true
useTypeSeqs=true
useTypeSeqs2=true
useTypeySequences=true
useOccurrencePatterns=true
useLastRealWord=true
useNextRealWord=true
#useReverse=false
normalize=true
# normalizeTimex=true
wordShape=chris2useLC
useDisjunctive=true
disjunctionWidth=5
#useDisjunctiveShapeInteraction=true
maxLeft=1
readerAndWriter=edu.stanford.nlp.sequences.ColumnDocumentReaderAndWriter
useObservedSequencesOnly=true
useQN = true
QNsize = 25
# makes it go faster
featureDiffThresh=0.05

View File

@ -0,0 +1,4 @@
ner.model=classifiers/english.conll.4class.distsim.crf.ser.gz,classifiers/english.muc.7class.distsim.crf.ser.gz,classifiers/english.all.3class.distsim.crf.ser.gz
ner.useSUTime=false
ner.combinationMode=HIGH_RECALL
serializeTo=example.serialized.ncc.ncc.ser.gz

Binary file not shown.

View File

@ -2,6 +2,16 @@
AUTHOR="Lillian Pierson",
TITLE="Data Science für Dummies",
PUBLISHER="WILEY-VCH Verlag GmbH \& Co. KGaA",
YEAR=2016,
ADDRESS="Weinheim"
YEAR=2016
}
#stanford NER:
@PAPER{finkel2005,
AUTHOR="Jenny Rose Finkel, Trond Grenager, Christopher Manning",
TITLE="Incorporating Non-local Information into Information Extraction Systems by Gibbs Sampling. Proceedings of the 43nd Annual Meeting of the Association for Computational Linguistics",
PUBLISHER="ACL"
YEAR=2005
}
# pp. 363-370. #http://nlp.stanford.edu/~manning/papers/gibbscrf3.pdf
# webhose.io Dokumentation:
# https://docs.webhose.io/docs/output-reference

View File

@ -3,12 +3,16 @@
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage[pdftex]{graphicx}
%\usepackage[ngerman]{babel}
%for lists
\usepackage{listings}
\usepackage{enumitem}
\usepackage{colortbl}
\usepackage{xcolor}
\usepackage{soul}
\usepackage{cleveref}
\usepackage{todonotes}
%for hyperlinks
\usepackage{hyperref}
\AtBeginDocument{\renewcommand{\chaptername}{}}
@ -20,8 +24,6 @@
\definecolor{comments}{cmyk}{1,0,1,0}
\newcommand{\al}[1]{\todo[inline]{\color{comments}{AL: #1}}}
\definecolor{uhhred}{cmyk}{0,100,100,0}
\begin{document}
@ -35,8 +37,10 @@
\begin{center}
{\color{uhhred}\textbf{\so{BACHELORTHESIS}}}
\vspace*{2.0cm}\\
{\LARGE \textbf{Interactive Labeling of Unclassified Data\\Using the Example of Recognition of Company Mergers}}
%or: Incremental labeling of an unknown data set using the example of classification of news articles
{\LARGE \textbf{Prediction of Company Mergers\\Using Interactive Labeling\\and Machine Learning Methods}}
%or: Incremental labeling of an unknown data set using the example of classification of news articles OR
%Recognizing M\&As in News Articles\\Using Interactive Labeling\\and Machine Learning Methods
%Interactive Labeling of Unclassified Data\\Using the Example of Recognition of Company Mergers
\vspace*{2.0cm}\\
vorgelegt von
\vspace*{0.4cm}\\
@ -66,13 +70,13 @@ BLABLA ABSTRACT
\mainmatter
%Kapitel Einleitung
%Kapitel 1 Einleitung
%####################
\chapter{Introduction}
\label{chap:introduction}
\textit{
In this chapter...In \cref{sec:motivation} the motivation, then in \cref{sec:goals} the goals, blablabla...
In this chapter...In Section \ref{sec:motivation} the motivation, then in Section \ref{sec:goals} the goals...
}
\section{Motivation}
@ -89,14 +93,14 @@ In this thesis we want to present an alternative data labeling method that allow
We want to compare a conventional method of data labeling with an alternative, incremental method using the following example: The aim is to investigate news articles about recent mergers ('mergers and acquisitions') and to classify them accordingly. With the help of the labeled data set, different classification models will be applied and optimized so that a prediction about future news articles will be possible.
\section{Outline}
über die gliederung...
% hier steht was über die Gliederung...
\bigskip
\paragraph{Summary:}
\textit{\newline In this chapter we discussed ... The following chapter deals with blabla.}
%Kapitel Stand der Technik
%Kapitel 2 Stand der Technik
%##########################
\chapter{State of the Art}
\label{state_of_the_art}
@ -113,29 +117,27 @@ In this thesis we want to present an alternative data labeling method that allow
\textit{\newline In this chapter we have described ... are described in the next chapter. In the next chapter we describe...
}
%Kapitel Grundlagen
%Kapitel 3 Grundlagen
%####################
\chapter{Background and Related Work}
\label{chap:background}
\textit{
In this chapter...In \cref{sec:news} news sources are introduced, then blablabla...
In this chapter...In Section \ref{sec:news} news sources are introduced...
}
\section{Business News about Mergers}
\label{sec:news}
\subsection{Company Mergers}
When two companies merge, ... When shares of a company are sold, ... Blabla...
When two companies merge, ... When shares of a company are sold,...
\subsection{Webhose.io as Source for News Articles}
As a source for our initial data set, RSS feeds from established business news agencies such as Reuters or Bloomberg come into consideration. However, when crawling RSS feeds, it is not possible to retrieve news from a longer period in the past. Since we want to analyze news of the last 12 months, we obtain the data set from the provider webhose.io. It offers access to English news articles from the sections 'Financial News', 'Finance' and 'Business', among others. As we are only interested in reliable sources, we limit our request to the websites of Reuters, Bloomberg, Financial Times, The Economist and ...
As a source for our initial data set, RSS feeds from established business news agencies such as \textit{Reuters} or \textit{Bloomberg} come into consideration. However, when crawling RSS feeds, it is not possible to retrieve news from a longer period in the past. Since we want to analyze news of the period of 12 months, we obtain the data set from the provider \textit{webhose.io}\footnote{\url{<https://webhose.io/>}}. It offers access to English news articles from sections like \textit{Financial News}, \textit{Finance} and \textit{Business} at affordable fees compared to the news agencies' offers. As we are only interested in reliable sources, we limit our request to the websites of the news agengies \textit{Reuters, Bloomberg, Financial Times, CNN, The Economist} and \textit{The Guardian}.
\section{Supervised Machine Learning Problems}
\subsubsection{Structured / Unstructured Data}
\subsubsection{Structured and Unstructured Data}
\subsection{Classification Problems}
\subsubsection{Binary Classification}
@ -185,55 +187,119 @@ In this chapter we ... blabla are described in section bla.
In the next chapter we describe...
}
%Kapitel Design
%Kapitel 4 Design
%###########################
\chapter{Design}
\label{chap:design}
\textit{
In this chapter... In \cref{sec:overview} we give an overview of all, then in \cref{sec:pipeline} the data processing pipeline, blablabla...
In this chapter... In Section \ref{sec:overview} we give an overview of all, then in Section the data processing pipeline, blablabla...
}
\section{Overview}
\label{sec:overview}
\jk{Was muss insgesamt gemacht werden, welche Teilprobleme müssen addressiert werden}
\jk{Was muss insgesamt gemacht werden, welche Teilprobleme müssen addressiert werden. Alternativen besprechen, Entscheidungen fällen basierend auf Kriterien. Hier kommt Deine Arbeit hin, kein Related work oder Methoden die es schon gibt. Nur falls man es Vergleicht, dann relevant.}
\jk{Alternativen besprechen, Entscheidungen fällen basierend auf Kriterien}
First, we need to collect appropriate data, then label a data set manually, then, ....\\
\\
% Data Processing Pipeline als Schaubild einfügen:
Data Selection > Labeling > Preprocessing > Model Selection > Recognition of Merger Partners
\jk{Hier ist evtl. noch einiges drin was in Kapitel 'Grundlagen' verschoben wird. Hier kommt Deine Arbeit hin, kein Related work oder Methoden die es schon gibt. Nur falls man es Vergleicht, dann relevant.}
\section{Data Selection}
\label{sec:data_selection}
\section{Data Processing Pipeline}
\label{sec:pipeline}
Before we can start with the data processing, we need to identify and select appropriate data. We downloaded news articles of 12 months (year 2017) from the website \url{<webhose.io>} as described in Chapter \ref{chap:implementation}, Section \ref{sec:data_download}.
As webhose.io is a secondary source and only crawls the news feeds itself, it may occur that some RSS feeds are not parsed correctly or a article is tagged with a wrong topic as \textit{site categories}. The downloaded files also contain blog entries, user comments, videos or graphical content and other spam which we have to filter out. We also do not need pages quoting Reuters etc.. Besides this, we are only interested in English news articles. \\
After we have filtered out all the irrelevant data, we receive a data set of XX.XXX news articles that we store in a csv file.
\section{Preprocessing}
Tokenization, Stemming, Stop Words, Leaving Out Numbers
The csv file contains the following 9 columns:
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|c|c|c|}
\hline
SectionTitle & Title & SiteSection & Text & Uuid & Timestamp & Site & SiteFull & Url \\
\hline
\end{tabular}
\end{center}
The individual columns contain:
\begin{itemize}
\section{Data Labeling}
\item \textbf{SectionTitle:} The name of the news feed section, e.g. \textit{'Reuters | Financial News'}.
\item \textbf{Title:} The news article's headline, e.g. \textit{'EU antitrust ruling on Microsoft buy of GitHub due by October 19'}
\item \textbf{SiteSection:} The link to the section of the site where the thread was created, e.g. \textit{'http://feeds.reuters.com/reuters/financialsNews'}
\item \textbf{Text:} The article's plain text.
\item \textbf{Uuid:} Universally unique identifier, representing the article's thread.
\item \textbf{Timestamp:} The thread's publishing date/time in the format YYYY-MM-DDThh:mmGMT+3. E.g. \textit{2018-09-17T20:00:00.000+03:00'}
\item \textbf{Site:} The top level domain of the article's site, e.g. \textit{'reuters.com'}
\item \textbf{SiteFull:} The complete domain of the article's site, e.g. \textit{'reuters.com'}
\item \textbf{Url:} The link to the top of the article's thread, e.g. \textit{'https://www.reuters.com/article/us-github-m-a-microsoft-eu/eu-antitrust-ruling-on-microsoft-buy-of-github-due-by-october-19-idUSKCN1LX114'}
\end{itemize}
The columns \textbf{Title} and \textbf{Text} contain our main data, whereas the rest of the attributes is the meta data.
\section{Labeling}
From our dataset of XX.XXX news articles, we select 10.000 articles \footnote{833/844 articles of each month} to proceed with the labeling process.
\subsection{Conventional Method}
\subsubsection{Top-Down / Waterfall}
1) Data Labeling \\
2) Data Cleaning\\
3) Model Building\\
4) Analysis of wrong predicted instances
=> evtl. neu labeln, wird meistens nicht gemacht\\
5) Neue Hypothesen => 3); evl. zu 2)\\
\begin{enumerate}[label=(\alph*)]
\subsection{Incremental Method}
\item \textbf{Data Labeling}
\item \textbf{Data Cleaning}
\item \textbf{Model Building}
\item \textbf{Analysis of wrong predicted instances}\\
=> optionally back to step (a) \footnote{In practice, this step is rarely done.}
\item \textbf{New Hypotheses}\\
=> back to (c); optionally back to step (b)
\end{enumerate}
\subsubsection{Visual Analyticts, Agile Model Development}
\subsection{Interactive Method}
\subsubsection{Visual Analyticts}
\subsubsection{Agile Model Development}
\subsubsection{Unbalanced Data Set}
\section{Preprocessing}
In order to use the news articles for machine learning algorithms, we must first prepare and filter the texts appropriately:
\begin{description}
\item \textbf{Removing punctuation marks}\\
We replace all punctuation marks with white spaces.
\item \textbf{Tokenization}\\
Every news article is split into a list of single words.
\item \textbf{Leaving out numbers}\\
We ignore all numbers in the news article.
\item \textbf{Transforming words to lower case}\\
Every word is transformed to lower case.
\item \textbf{Word stemming}\\
We reduce every word to its word stem (i.e. 'approves' to 'approv').
\item \textbf{Ignoring stop words}\\
We filter out extremely common words ('a', 'about', 'above', 'after', 'again', etc.) and other unwanted terms ('reuters', 'bloomberg', etc.).
\end{description}
\section{Model Selection}
\subsection{Naive Bayes}
GaussianNB vs MultinomialNB
\subsection{SVM}
\subsection{Decision Tree}
\section{Recognition of merger partners}
\subsubsection{Named Entity Recognition (NER)}
@ -244,71 +310,122 @@ GaussianNB vs MultinomialNB
In this chapter we... In the next chapter...
}
% Kapitel Labeling
% Kapitel 5 Data Exploration
%###########################
\chapter{Data Exploration}
\label{chap:exploration}
\textit{
In this chapter we explore our textual corpus, which contains of the news articles headline and plain text.
}
\section{Text Corpus Exploration}
% Hier kommen Visualisierungen mit pyplot/seaborn rein.
\subsection{Number of Features}
% Wichtigste Features?
\subsection{Length of Articles}
The average length of the news articles examined is [X] words.
\subsection{Most Common Words}
% NACH data preprocessing! (eigenes BOW benutzen)
% Erst Schaubild/WordCloud von ganzem Korpus,
% dann nur die Artikel über Fusion.
\subsubsection{Word Cloud}
%z.B. Word Cloud mit Microsoft-Github-Fusion-Artikel.
\subsection{Distribution of Company Names}
'XY' is the most frequently used company name in the old dataset.
\bigskip
\paragraph{Summary:}
\textit{\newline
In this chapter we... In the next chapter...
}
% Kapitel 6 Labeling
%###########################
\chapter{Data Labeling}
\label{chap:labeling}
\textit{
This chapter describes the procedure for labeling. blabla
This chapter describes and compares two different data labeling processes; a conventional labeling method and an interactive method.
}
\section{Conventional Method}
\subsection{Data Set}
1497 Artikel\\
Zeitraum: 1 Monat\\
Quelle: Reuters.com\\
First, we label a slightly smaller data set in a conventional way. The dataset consists of 1497 news articles, which were downloaded via \textit{webhose.io}. The dataset contains news articles from different Reuters' RSS feeds dating from the period of one month \footnote{The timeframe is May 25 - June 25 2018, retrieved on June 25 2018.}. Here, we only filter out articles that contain at least one of the keywords \textit{'merger', 'acquisition', 'take over', 'deal', 'transaction'} or \textit{'buy'} in the heading.
With the following query we download the desired data from \textit{webhose.io}:\\\\
\texttt{
thread.title:(merger OR merges OR merge OR merged
OR acquisition
OR "take over"
\noindent\hspace*{42mm}%
OR "take-over" OR takeover
OR deal OR transaction OR buy) \\
is\_first:true \\
site\_type:news \\
site:reuters.com \\
language:english}
\subsection{Classification}
Daten binär klassifiziert, Zeitaufwand ca. 30 Stunden
The articles were classified binary with the two labels:
\begin{description}
\item[0:]{company A and B merge}
\item[1:]{other}
\end{description}
The process of reading and label the 1497 news articles took about 30 hours in total.
\subsection{Difficulties}
Hier ein paar Textbeispiele, die schwierig einzuordnen waren:\\
- wie soll mit Anteilsverkäufen > 50 \% umgegangen werden? => bedeutet eigentlich Eigentümerwechsel\\
- "X will buy Y", "X wants to buy Y" => findet es definitiv statt? => ganzer Artikel muss gelesen werden\\
- Fusion nur als Randbemerkung, ("letztes Jahr haben X und Y fusioniert..., jetzt entstehen neue geschäftsbereiche blabla") ansonsten aber irrelevanter Artikel
\\
Some article texts were difficult to classify even when read carefully.
Here are a few examples of the difficulties that showed up:
\begin{itemize}
=> aus diesen problemen heraus entstand die idee, verschiedene klassen zu verwenden
\item \textit{'Company A acquires more than 50\% of the shares of company B.'}\\ => How should share sales be handled? Actually, this means a change of ownership, even if it is not a real merger.
\item \textit{'Company X will buy/wants to buy company Y.'} \\=> Will the merger definitely take place? On what circumstances does it depend?
\item \textit{'Last year company X and company Y merged. Now company A wants to invest more in renewable energies.'}\\ => Only an incidental remark deals with a merger that is not taking place right now. The main topic of the article is about something completely different.
\end{itemize}
These difficulties led to the idea of using different labeling classes, which we finally implemented in the interactive labeling method.
\section{Incremental Method}
%Vorteil: könnte bessere Ergebnisse bringen, da allgemeiner / größere Menge
\subsection{Data Set}
10.000 Artikel aus 130.000\\
Zeitraum: 12 Monate\\
Quellen: Reuters.com, Bloomberg.com, ...\\
For the interactive labeling method, we use the data set of 10.000 articles from a whole year described in Chapter \ref{chap:design}, Section \ref{sec:data_selection}.
\subsection{Classification}
Daten mehrfach klassifiert mit 6 Klassen:\\
\\
1: Merger \\
2: Merger Pending\\
3: Merger Aborted\\
4: Sale of Shares\\
5: Incidental \\
6: Irrelevant \\
For the multiple classification we use the following 6 classes:
\begin{description}
\item[1:]{Company A and B merge}
\item[2:]{Merger is pending}
\item[3:]{Merger is aborted}
\item[4:]{Share sale}
\item[5:]{Merger as incidental remark}
\item[6:]{Irrelevant news}
\end{description}
\subsection{Selection of Articles}
\subsection{Procedure}
Wähle von jedem Monat 10 Artikel zufällig aus.
Es ist wahrscheinlich dann man nur Merger mit vielen Artikeln hat
=> Das könnte man minimieren indem man “stratified” sampling macht
=> Zuerst NER machen, danach fair über Klassen randomisieren
=> wähle 10 Artikel von 100 Kategorien aus => 10 Kategorien auswählen => darunter zufällig ein Artikel
Labeln von 1\% aller Artikel
1) Erste Modelle bauen z.b. Bayes
Auf alle Artikel anwenden => Wahrscheinlichkeit pro Klasse Vektor: (K1, K2, … , K6)
Klare Fälle: Kx > 80\% und alle anderen Ky < 10\% (mit x in {1-6}, y != x)
=> Label übernehmen => wie viele Fälle sind eindeutig?
Behauptung: 10\% aller Artikel sind eindeutig
Stichprobenartig überprüfen => 10 Artikel random auswählen von jeder Klasse
Identifikation von äußert unklaren Fällen
Mehr als eine Klasse hat ähnliche Wahrscheinlichkeit
(5\%, 5\%, 5\%, …) => (80\%, 80\%, 0\%, 0\%, …)
z.b. 100 Artikel angucken und manuell label
=> Wiederhole ich 3-4 mal gehe zu Schritt 1) (Modell bauen)
=> 95\% aller Fälle sind jetzt klar.
=> warum gehen die 5\% nicht? Stichprobenartig Artikel anschauen
Falls das nicht klappt, Modelle oder Preprozessing (z.b. NER) verbessern
%Wähle von jedem Monat 10 Artikel zufällig aus.
%Es ist wahrscheinlich dann man nur Merger mit vielen Artikeln hat => Das könnte man minimieren indem man “stratified” sampling macht => Zuerst NER machen, danach fair über Klassen randomisieren => wähle 10 Artikel von 100 Kategorien aus => 10 Kategorien auswählen => darunter zufällig ein Artikel . Labeln von 1\% aller Artikel
%1) Erste Modelle bauen z.b. Bayes . Auf alle Artikel anwenden => Wahrscheinlichkeit pro Klasse Vektor: (K1, K2, … , K6)
%Klare Fälle: Kx > 80\% und alle anderen Ky < 10\% (mit x in {1-6}, y != x)
%=> Label übernehmen => wie viele Fälle sind eindeutig?
%Behauptung: 10\% aller Artikel sind eindeutig
%Stichprobenartig überprüfen => 10 Artikel random auswählen von jeder Klasse
%Identifikation von äußert unklaren Fällen
%Mehr als eine Klasse hat ähnliche Wahrscheinlichkeit
%(5\%, 5\%, 5\%, …) => (80\%, 80\%, 0\%, 0\%, …)
%z.b. 100 Artikel angucken und manuell label
%=> Wiederhole ich 3-4 mal gehe zu Schritt 1) (Modell bauen)
%=> 95\% aller Fälle sind jetzt klar.
%=> warum gehen die 5\% nicht? Stichprobenartig Artikel anschauen
%Falls das nicht klappt, Modelle oder Preprozessing (z.b. NER) verbessern
\subsection{Tagging of Named Entities}
Histogram: X: Autoren/Personen, Unternehmen, Y: Anzahl der Nennungen
@ -320,7 +437,7 @@ Histogram: X: Autoren/Personen, Unternehmen, Y: Anzahl der Nennungen
In this chapter...in the next chapter...
}
% Kapitel Implementierung
% Kapitel 7 Implementierung
%##########################
\chapter{Implementation}
\label{chap:implementation}
@ -330,17 +447,49 @@ This chapter deals with the most relevant parts of the implementation.
}
\section{Data Download}
Query webhose.io:\\
% austauschen!
query\_params = \{'q':'site:(reuters.com OR ft.com OR cnn.com OR economist.com OR bloomberg.com OR theguardian.com) site\_category:(financial\_news OR finance OR business)',
'ts': '1533634070282',
'sort': 'crawled'\}
\label{sec:data_download}
To retrieve our data, we make the following request on the website
\url{<https://webhose.io>}:\\\\
\texttt{
site:(reuters.com OR ft.com OR cnn.com OR economist.com\\
\noindent\hspace*{12mm}%
OR bloomberg.com OR theguardian.com)\\
site\_category:(financial\_news OR finance OR business)\\
\\
timeframe: january 2017 - december 2017} \\
\\
The requested data was downloaded in September 2018 with JSON as file format. Every news article is saved in a single file, in total 1.478.508 files were downloaded (4,69 GiB).
Among others, one JSON file contains the information shown in the following example :\\
\begin{lstlisting}[breaklines=true]
{
"thread": {
"uuid": "a931e8221a6a55fac4badd5c6992d0a525ca3e83",
"url": "https://www.reuters.com/article/us-github-m-a-microsoft-eu/eu-antitrust-ruling-on-microsoft-buy-of-github-due-by-october-19-idUSKCN1LX114",
"site": "reuters.com",
"site_section": "http://feeds.reuters.com/reuters/financialsNews",
"section_title": "Reuters | Financial News"
"published": "2018-09-17T20:00:00.000+03:00"
"site_type": "news",
"spam_score": 0.0,
},
"title": "EU antitrust ruling on Microsoft buy of GitHub due by October 19",
"text": "BRUSSELS (Reuters)-EU antitrust regulators will decide by Oct. 19 whether to clear U.S. software giant Microsoft's $7.5 billion dollar acquisition of privately held coding website GitHub. Microsoft, which wants to acquire the firm to reinforce its cloud computing business against rival Amazon, requested European Union approval for the deal last Friday, a filing on the European Commission website showed on Monday. The EU competition enforcer can either give the green light with or without demanding concessions, or it can open a full-scale investigation if it has serious concerns. GitHub, the world's largest code host with more than 28 million developers using its platform, is Microsoft's largest takeover since the company bought LinkedIn for $26 billion in 2016. Microsoft Chief Executive Satya Nadella has tried to assuage users' worries that GitHub might favor Microsoft products over competitors after the deal, saying GitHub would continue to be an open platform that works with all the public clouds. Reporting by Foo Yun Chee; Editing by Edmund Blair",
"language": "english",
"crawled": "2018-09-18T01:52:42.035+03:00"
}
\end{lstlisting}
\section{Python Modules}
\subsection{nltk}
\subsection{pandas}
\subsection{sklearn}
\subsection{webhoseio}
\section{Jupyter Notebook}
For interactive coding, labeling, visualization and documentation.
\section{Own Implementation}
\subsection{Examples}
@ -351,34 +500,46 @@ query\_params = \{'q':'site:(reuters.com OR ft.com OR cnn.com OR economist.com O
In this chapter, we...In the next chapter...
}
% Kapitel Evaluation
% Kapitel 8 Evaluation
%##########################
\chapter{Evaluation}
\label{chap:evaluation}
\textit{
In this chapter we want to evaluate the different methods. blabla.
In this chapter we evaluate the different machine learning methods.
}
\section{News Articles Exploration}
\subsection{Length of Articles}
Oder was sonst noch interessant ist.
\subsection{Most Common Words}
Im Bezug auf die Artikel über Fusion.
\subsubsection{Word Cloud}
z.B. Word Cloud mit Microsoft-Github-Fusion-Artikel.
\section{Model Fitting}
dran denken: Hyperparameter SEPARAT variieren
% Algos auf Recall statt F1 optimieren bzw. beides ausgeben lassen
%dran denken: einzelne Hyperparameter SEPARAT variieren
% Variante: wenn ich modell nur auf 'Title' anwende, sogar noch besser!
% Alle Metriken, Robustheit, Over-/Underfit etc. in Tabelle zur Übersicht!!
% variieren: SelectPercentile, BOW/CountVectorizer, Preprocessing(stopwords, stemming,...) verändern, SelectPercentile (1,5,25,75,100), Hyperparameter(alpha, gamma=0.0001.,, C, ...) mit/ohne Text) => alles dokumentieren
\subsection{Naive Bayes Model}
Multinomial Naive Bayes
Grid-Search
\subsection{SVM}
% 5-fold-cross ausprobieren
% SVM bestes Ergebnis mit ALTEM Datensatz:
% best score: 0.876
% best parameters set found on development set:
% C: 0.1, gamma: 0.0001, kernel: linear, percentile: 50
\subsection{Decision Tree}
% wichtigste 20 features ausgeben lassen!
% einfaches test_train_split (0.25) nur auf Title in altem Dataset benutzt:
20 most important words in testing set:
['merger', 'buy', 'monsanto', 'warner', 'win', 'walmart', '2', 'billion', 'kkr', 'rival', 'uk', 'watch', 'jv', 'merg', 'get', 'non', 'anz', 'xerox', 'clear', 'deal']
\section{Recognition of Merger Partners}
% Stanford-Variante erzielt ganz gute Ergebnisse.
\section{Performance}
@ -389,10 +550,7 @@ Grid-Search
In this chapter we have described ... In the last chapter we describe...
}
\chapter{Discussion (?)}
\al{Braucht man das? Arbeit soll kritisch hinterfragt werden, z.B. 'war der datensatz gut gewählt?' etc.}
% Kapitel ZUsammenfassung
% Kapitel Zusammenfassung
%#############################
\chapter{Summary}
\label{chap:summary}
@ -404,8 +562,9 @@ In this chapter we have described ... In the last chapter we describe...
\section{Conclusions}
\section{Future Work}
Neuronales Netz
\subsubsection{}
The task of this work could also be solved by using an artificial neural network (ANN). %Genauere Erklärung fehlt noch.
This may lead to even better results.
\bigskip
\paragraph{Summary:}
@ -431,7 +590,7 @@ In the last chapter we have described ....
\begin{center}\textbf{Eidesstattliche Erklärung}\end{center}
Hiermit versichere ich an Eides statt, dass ich die vorliegende Arbeit im Bachelorstudiengang Wirtschaftsinformatik selbstständig verfasst und keine anderen als die angegebenen Hilfsmittel insbesondere keine im Quellenverzeichnis nicht benannten Internet-Quellen benutzt habe. Alle Stellen, die wörtlich oder sinngemäß aus Veröffentlichungen entnommen wurden, sind als solche kenntlich gemacht. Ich versichere weiterhin, dass ich die Arbeit vorher nicht in einem anderen Prüfungsverfahren eingereicht habe und die eingereichte schriftliche Fassung der auf dem elektronischen Speichermedium entspricht.
\vspace*{1cm}\\
Hamburg, den 01.02.2019
Hamburg, den 01.03.2019
\hspace*{\fill}\begin{tabular}{@{}l@{}}\hline
\makebox[5cm]{Anne Lorenz}
\end{tabular}
@ -440,7 +599,7 @@ Hamburg, den 01.02.2019
\begin{center}\textbf{Veröffentlichung}\end{center}
Ich stimme der Einstellung der Arbeit in die Bibliothek des Fachbereichs Informatik zu.
\vspace*{1cm}\\
Hamburg, den 01.02.2019
Hamburg, den 01.03.2019
\hspace*{\fill}\begin{tabular}{@{}l@{}}\hline
\makebox[5cm]{Anne Lorenz}
\end{tabular}