Initial commit
This commit is contained in:
17
NLP/.project
Normal file
17
NLP/.project
Normal file
@@ -0,0 +1,17 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<projectDescription>
|
||||
<name>NLP</name>
|
||||
<comment></comment>
|
||||
<projects>
|
||||
</projects>
|
||||
<buildSpec>
|
||||
<buildCommand>
|
||||
<name>org.python.pydev.PyDevBuilder</name>
|
||||
<arguments>
|
||||
</arguments>
|
||||
</buildCommand>
|
||||
</buildSpec>
|
||||
<natures>
|
||||
<nature>org.python.pydev.pythonNature</nature>
|
||||
</natures>
|
||||
</projectDescription>
|
8
NLP/.pydevproject
Normal file
8
NLP/.pydevproject
Normal file
@@ -0,0 +1,8 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<?eclipse-pydev version="1.0"?><pydev_project>
|
||||
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
|
||||
<path>/${PROJECT_DIR_NAME}/src</path>
|
||||
</pydev_pathproperty>
|
||||
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 3.0</pydev_property>
|
||||
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Python 3.5</pydev_property>
|
||||
</pydev_project>
|
1
NLP/data/text1.txt
Normal file
1
NLP/data/text1.txt
Normal file
@@ -0,0 +1 @@
|
||||
Maria are mere. Ea mai are șapte pere. Acestea sunt foarte delicioase.
|
6
NLP/data/text1_processed.xml
Normal file
6
NLP/data/text1_processed.xml
Normal file
@@ -0,0 +1,6 @@
|
||||
<?xml version="1.0" encoding="utf-8" ?>
|
||||
<segs>
|
||||
<seg lang="ro"><s id="id_temp_aiurea.1"><w lemma="Maria" ana="Np" chunk="Np#1">Maria</w><w lemma="avea" ana="Vmip3s" chunk="Vp#1">are</w><w lemma="măr" ana="Ncfp-n" chunk="Np#2">mere</w><c>.</c></s></seg>
|
||||
<seg lang="ro"><s id="id_temp_aiurea.2"><w lemma="el" ana="Pp3fsr--------s">Ea</w><w lemma="mai" ana="Rp" chunk="Ap#1">mai</w><w lemma="avea" ana="Vmip3s" chunk="Vp#1">are</w><w lemma="șapte" ana="Mc-p-l" chunk="Np#1">șapte</w><w lemma="pară" ana="Ncfp-n" chunk="Np#1">pere</w><c>.</c></s></seg>
|
||||
<seg lang="ro"><s id="id_temp_aiurea.3"><w lemma="acesta" ana="Pd3fpr">Acestea</w><w lemma="fi" ana="Vmip3p" chunk="Vp#1">sunt</w><w lemma="foarte" ana="Rp" chunk="Ap#1,Vp#1">foarte</w><w lemma="delicios" ana="Afpfp-n" chunk="Ap#1">delicioase</w><c>.</c></s></seg>
|
||||
</segs>
|
1
NLP/data/text2.txt
Normal file
1
NLP/data/text2.txt
Normal file
@@ -0,0 +1 @@
|
||||
Sabeer Bhatia a ajuns la Aeroportul Internațional din Los Angeles la ora 18 în data de 23 septembrie 1998. Zborul său din Bangalore a durat 22 ore, și el era înfometat.
|
5
NLP/data/text2_processed.xml
Normal file
5
NLP/data/text2_processed.xml
Normal file
@@ -0,0 +1,5 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<segs>
|
||||
<seg lang="ro"><s id="id_temp_aiurea.1"><w lemma="Sabeer" ana="Np" chunk="Np#1">Sabeer</w><w lemma="Bhatia" ana="Np" chunk="Np#1">Bhatia</w><w lemma="avea" ana="Va--3s" chunk="Vp#1">a</w><w lemma="ajunge" ana="Vmp--sm" chunk="Vp#1">ajuns</w><w lemma="la" ana="Spsa" chunk="Pp#1">la</w><w lemma="aeroport" ana="Ncmsry" chunk="Pp#1,Np#2">Aeroportul</w><w lemma="internațional" ana="Afpms-n" chunk="Pp#1,Np#2,Ap#1">Internațional</w><w lemma="din" ana="Spsa" chunk="Pp#2">din</w><w lemma="Los" ana="Np" chunk="Pp#2,Np#3">Los</w><w lemma="Angeles" ana="Np" chunk="Pp#2,Np#3">Angeles</w><w lemma="la" ana="Spsa" chunk="Pp#3">la</w><w lemma="oră" ana="Ncfsry" chunk="Pp#3,Np#4">ora</w><w lemma="18" ana="Mc" chunk="Pp#3,Np#4">18</w><w lemma="în" ana="Spsa" chunk="Pp#4">în</w><w lemma="dată" ana="Ncfsry" chunk="Pp#4,Np#5">data</w><w lemma="de" ana="Spsa" chunk="Pp#5">de</w><w lemma="23" ana="Mc" chunk="Pp#5,Np#6">23</w><w lemma="septembrie" ana="Ncms-n" chunk="Pp#5,Np#6">septembrie</w><w lemma="1998" ana="Mc" chunk="Pp#5,Np#6">1998</w><c>.</c></s></seg>
|
||||
<seg lang="ro"><s id="id_temp_aiurea.2"><w lemma="zbor" ana="Ncmsry" chunk="Np#1">Zborul</w><w lemma="său" ana="Ds3ms-s" chunk="Np#1">său</w><w lemma="din" ana="Spsa" chunk="Pp#1">din</w><w lemma="Bangalore" ana="Np" chunk="Pp#1,Np#2">Bangalore</w><w lemma="avea" ana="Va--3s" chunk="Vp#1">a</w><w lemma="dura" ana="Vmp--sm" chunk="Vp#1">durat</w><w lemma="22" ana="Mc" chunk="Np#3">22</w><w lemma="oră" ana="Ncfp-n" chunk="Np#3">ore</w><c>,</c><w lemma="și" ana="Crssp">și</w><w lemma="el" ana="Pp3msr--------s" chunk="Vp#2">el</w><w lemma="fi" ana="Vaii3s" chunk="Vp#2">era</w><w lemma="înfometa" ana="Vmp--sm" chunk="Vp#2">înfometat</w><c>.</c></s></seg>
|
||||
</segs>
|
1
NLP/data/text3.txt
Normal file
1
NLP/data/text3.txt
Normal file
@@ -0,0 +1 @@
|
||||
Sophia Loren spune că ea va fi întotdeauna mulțumitoare față de Bono. Actrița a dezvăluit că cântărețul trupei U2 a ajutat-o să se liniștească atunci când ea s-a speriat de o furtună în timp ce zburau cu avionul.
|
5
NLP/data/text3_processed.xml
Normal file
5
NLP/data/text3_processed.xml
Normal file
@@ -0,0 +1,5 @@
|
||||
<?xml version="1.0" encoding="utf-8" ?>
|
||||
<segs>
|
||||
<seg lang="ro"><s id="id_temp_aiurea.1"><w lemma="Sophia" ana="Np" chunk="Np#1">Sophia</w><w lemma="Loren" ana="Np" chunk="Np#1">Loren</w><w lemma="spune" ana="Vmnp" chunk="Vp#1">spune</w><w lemma="că" ana="Csssp">că</w><w lemma="el" ana="Pp3fsr--------s" chunk="Vp#2">ea</w><w lemma="vrea" ana="Va--3s" chunk="Vp#2">va</w><w lemma="fi" ana="Vmnp" chunk="Vp#2">fi</w><w lemma="întotdeauna" ana="Rgp" chunk="Vp#2,Ap#1">întotdeauna</w><w lemma="mulțumitor" ana="Afpf--n" chunk="Ap#1">mulțumitoare</w><w lemma="față_de" ana="Spca" chunk="Pp#1">față_de</w><w lemma="bonă" ana="Ncfsvy" chunk="Pp#1,Np#2">Bono</w><c>.</c></s></seg>
|
||||
<seg lang="ro"><s id="id_temp_aiurea.2"><w lemma="actriță" ana="Ncfsry" chunk="Np#1">Actrița</w><w lemma="avea" ana="Va--3s" chunk="Vp#1">a</w><w lemma="dezvălui" ana="Vmp--sm" chunk="Vp#1">dezvăluit</w><w lemma="că" ana="Csssp">că</w><w lemma="cântăreț" ana="Ncmsry" chunk="Np#2">cântărețul</w><w lemma="trupă" ana="Ncfsoy" chunk="Np#2">trupei</w><w lemma="U2" ana="Np" chunk="Np#2">U2</w><w lemma="avea" ana="Va--3s" chunk="Vp#2">a</w><w lemma="ajuta" ana="Vmp--sm" chunk="Vp#2">ajutat</w><w lemma="el" ana="Pp3fsa--y-----w">-o</w><w lemma="să" ana="Qs" chunk="Vp#3">să</w><w lemma="sine" ana="Px3--a--------w" chunk="Vp#3">se</w><w lemma="liniști" ana="Vmsp3" chunk="Vp#3">liniștească</w><w lemma="atunci_când" ana="Rw" chunk="Vp#3,Ap#1">atunci_când</w><w lemma="el" ana="Pp3fsr--------s">ea</w><w lemma="sine" ana="Px3--a--y-----w" chunk="Vp#4">s-</w><w lemma="avea" ana="Va--3s" chunk="Vp#4">a</w><w lemma="speria" ana="Vmp--sm" chunk="Vp#4">speriat</w><w lemma="de" ana="Spsa" chunk="Pp#1">de</w><w lemma="un" ana="Tifsr" chunk="Pp#1,Np#3">o</w><w lemma="furtună" ana="Ncfsrn" chunk="Pp#1,Np#3">furtună</w><w lemma="în_timp_ce" ana="Cscsp">în_timp_ce</w><w lemma="zbura" ana="Vmii3p" chunk="Vp#5">zburau</w><w lemma="cu" ana="Spsa" chunk="Pp#2">cu</w><w lemma="avion" ana="Ncmsry" chunk="Pp#2,Np#4">avionul</w><c>.</c></s></seg>
|
||||
</segs>
|
121
NLP/src/anaphora.py
Normal file
121
NLP/src/anaphora.py
Normal file
@@ -0,0 +1,121 @@
|
||||
'''
|
||||
Created on May 22, 2016
|
||||
|
||||
@author: tibi
|
||||
'''
|
||||
from model import Word
|
||||
|
||||
def getGender(word):
|
||||
|
||||
if word.isPronoun() and (word.pronounGetPerson() == '1' or word.pronounGetPerson() == '2'):
|
||||
return 'n'
|
||||
|
||||
return word.getGender()
|
||||
|
||||
def genderMatch(word1, word2):
|
||||
|
||||
g1 = getGender(word1)
|
||||
g2 = getGender(word2)
|
||||
|
||||
if g1 == g2:
|
||||
return 2
|
||||
|
||||
if g1 == 'n' or g2 == 'n':
|
||||
return 1
|
||||
|
||||
return 0
|
||||
|
||||
def isPrepositional(chunk):
|
||||
|
||||
for word in chunk:
|
||||
|
||||
if word.isPreposition():
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def countInText(noun, text):
|
||||
|
||||
c = 0
|
||||
for word in text:
|
||||
if word.text == noun.text:
|
||||
c += 1
|
||||
|
||||
return c
|
||||
|
||||
def anaphora(text, chunks):
|
||||
|
||||
nounPhrases = []
|
||||
|
||||
for word in text:
|
||||
|
||||
if word.isNoun():
|
||||
print("[n]", word)
|
||||
nounPhrases.append((word, (word.sentenceIndex, word.chunk)))
|
||||
|
||||
else:
|
||||
print(word)
|
||||
|
||||
if word.isPronoun():
|
||||
|
||||
candidates = []
|
||||
|
||||
for noun, chunkIndex in nounPhrases[:-30:-1]:
|
||||
|
||||
# If gender and number match
|
||||
if genderMatch(word, noun) > 0 and word.getNumber() == noun.getNumber():
|
||||
|
||||
npInd = genderMatch(word, noun)
|
||||
|
||||
# definiteness
|
||||
if not noun.nounIsDefinite():
|
||||
npInd -= 1
|
||||
|
||||
# non-prepositional noun phrase
|
||||
chunk = chunks[chunkIndex]
|
||||
if (isPrepositional(chunk)):
|
||||
npInd -= 1
|
||||
|
||||
# first in sentence
|
||||
if noun.sentenceIndex == 1:
|
||||
npInd += 1
|
||||
|
||||
# indicating verbs
|
||||
# todo...
|
||||
|
||||
# lexical reiteration
|
||||
c = countInText(noun, text)
|
||||
if c == 2:
|
||||
npInd += 1
|
||||
if c > 2:
|
||||
npInd += 2
|
||||
|
||||
# noun is representing term
|
||||
# how?
|
||||
|
||||
# identical collocation pattern to the pronoun
|
||||
# ???
|
||||
|
||||
# immediate reference, resolving 'it'
|
||||
# applicable?
|
||||
|
||||
# referential distance
|
||||
dist = word.sentenceIndex - noun.sentenceIndex
|
||||
if dist == 0:
|
||||
npInd += 2
|
||||
elif dist == 1:
|
||||
npInd += 1
|
||||
|
||||
candidates.append((noun, npInd))
|
||||
print("...> Candidate: {0} npInd = {1}".format(noun, npInd))
|
||||
|
||||
|
||||
if len(candidates) > 0:
|
||||
|
||||
pickedWord, pickedInd = candidates[0]
|
||||
for word, npInd in candidates:
|
||||
if npInd > pickedInd:
|
||||
pickedInd = npInd
|
||||
pickedWord = word
|
||||
|
||||
print(".>>> Picked: {0}".format(pickedWord))
|
55
NLP/src/fileparser.py
Normal file
55
NLP/src/fileparser.py
Normal file
@@ -0,0 +1,55 @@
|
||||
'''
|
||||
Created on May 22, 2016
|
||||
|
||||
@author: tibi
|
||||
'''
|
||||
|
||||
from xml.dom import minidom;
|
||||
from model.Word import Word
|
||||
|
||||
def parse(filename):
|
||||
|
||||
words = []
|
||||
chunks = {}
|
||||
|
||||
sentence_i = 0
|
||||
|
||||
# get the root "segs" element
|
||||
dom = minidom.parse(filename)
|
||||
alltext = dom.getElementsByTagName("segs")
|
||||
|
||||
# iterate paragraphs
|
||||
for paragraph in alltext[0].getElementsByTagName("seg"):
|
||||
|
||||
# iterate sentences
|
||||
for sentence in paragraph.getElementsByTagName("s"):
|
||||
|
||||
# increment sentence index
|
||||
sentence_i += 1
|
||||
word_i = 0
|
||||
|
||||
# iterate words
|
||||
for word in sentence.getElementsByTagName("w"):
|
||||
|
||||
# increment word index
|
||||
word_i += 1
|
||||
|
||||
# obtain word info
|
||||
wordText = word.firstChild.data
|
||||
lemma = word.getAttribute("lemma")
|
||||
ana = word.getAttribute("ana")
|
||||
chunk = word.getAttribute("chunk")
|
||||
|
||||
# create word
|
||||
#w = Word(wordText, lemma, ana, chunk, sentence_i, word_i)
|
||||
#words.append(w)
|
||||
|
||||
for c in chunk.split(","):
|
||||
w = Word(wordText, lemma, ana, c, sentence_i, word_i)
|
||||
words.append(w)
|
||||
if chunks.get((sentence_i, c)) == None:
|
||||
chunks[(sentence_i, c)] = [ w ]
|
||||
else:
|
||||
chunks[(sentence_i, c)].append(w)
|
||||
|
||||
return (words, chunks)
|
26
NLP/src/main.py
Normal file
26
NLP/src/main.py
Normal file
@@ -0,0 +1,26 @@
|
||||
'''
|
||||
Created on May 22, 2016
|
||||
|
||||
@author: tibi
|
||||
'''
|
||||
import fileparser
|
||||
from anaphora import anaphora
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
words, chunks = fileparser.parse("../data/text3_processed.xml")
|
||||
|
||||
print("Words:")
|
||||
for word in words:
|
||||
print("[{0} {1}] {2}".format(word.sentenceIndex, word.wordIndex, word))
|
||||
print("")
|
||||
|
||||
print("Chunks:")
|
||||
for key, value in chunks.items():
|
||||
print(key, ":")
|
||||
for word in value:
|
||||
print(" - ", word)
|
||||
print("")
|
||||
|
||||
print("Anaphora resolution:")
|
||||
anaphora(words, chunks)
|
88
NLP/src/model/Word.py
Normal file
88
NLP/src/model/Word.py
Normal file
@@ -0,0 +1,88 @@
|
||||
'''
|
||||
Created on May 22, 2016
|
||||
|
||||
@author: tibi
|
||||
'''
|
||||
|
||||
class Word:
|
||||
|
||||
text = ""
|
||||
lemma = ""
|
||||
ana = ""
|
||||
chunk = ""
|
||||
|
||||
sentenceIndex = 0
|
||||
wordIndex = 0
|
||||
|
||||
def __init__(self, text, lemma, ana, chunk, sentenceIndex, wordIndex):
|
||||
self.text = text
|
||||
self.lemma = lemma
|
||||
self.ana = ana
|
||||
self.chunk = chunk
|
||||
self.sentenceIndex = sentenceIndex
|
||||
self.wordIndex = wordIndex
|
||||
|
||||
def __str__(self):
|
||||
return "{0} (lemma {1}, ana {2}, chunk {3})".format(self.text, self.lemma, self.ana, self.chunk)
|
||||
|
||||
def isNoun(self):
|
||||
return self.ana[0] == "N"
|
||||
|
||||
def nounIsCommon(self):
|
||||
return self.isNoun() and self.ana[1] == "c"
|
||||
|
||||
def nounIsProper(self):
|
||||
return self.isNoun() and self.ana[1] == "p"
|
||||
|
||||
def nounGetCase(self):
|
||||
|
||||
if self.isNoun():
|
||||
return self.ana[4]
|
||||
|
||||
return None
|
||||
|
||||
'Este articulat?'
|
||||
def nounIsDefinite(self):
|
||||
if self.isNoun():
|
||||
if (self.nounIsProper()):
|
||||
return True
|
||||
|
||||
if len(self.ana) > 5:
|
||||
return self.ana[5]
|
||||
|
||||
return "n"
|
||||
|
||||
def pronounGetPerson(self):
|
||||
if self.isPronoun():
|
||||
return self.ana[2]
|
||||
|
||||
def getGender(self):
|
||||
if self.isNoun():
|
||||
if (len(self.ana) >= 3):
|
||||
return self.ana[2]
|
||||
return 'n'
|
||||
|
||||
if self.isPronoun():
|
||||
return self.ana[3]
|
||||
|
||||
return None
|
||||
|
||||
def getNumber(self):
|
||||
if self.isNoun():
|
||||
if self.nounIsProper():
|
||||
return 's'
|
||||
else:
|
||||
return self.ana[3]
|
||||
if self.isPronoun():
|
||||
return self.ana[4]
|
||||
|
||||
return None
|
||||
|
||||
def isPronoun(self):
|
||||
return self.ana[0] == "P"
|
||||
|
||||
def isVerb(self):
|
||||
return self.ana[0] == "V"
|
||||
|
||||
def isPreposition(self):
|
||||
return self.ana[0] == "S" and self.ana[1] == "p"
|
0
NLP/src/model/__init__.py
Normal file
0
NLP/src/model/__init__.py
Normal file
Reference in New Issue
Block a user