From b90843f6c0f675f1dfa268b2dad1aaf17ac500b6 Mon Sep 17 00:00:00 2001 From: Anery Patel Date: Wed, 22 Jan 2020 16:06:37 +0530 Subject: [PATCH 1/4] Update addIndex.py --- Elastic/addIndex.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Elastic/addIndex.py b/Elastic/addIndex.py index b6a10dc..20be014 100644 --- a/Elastic/addIndex.py +++ b/Elastic/addIndex.py @@ -11,7 +11,7 @@ docType = "doc" # by default we connect to localhost:9200 -es = Elasticsearch(['https://5e9acbee.ngrok.io/']) +es = Elasticsearch(['http://node1.research.tib.eu:9200/']) path_to_data = '/app/' # path_to_data = '../' From 540a36d6cd968abecf3c5e3ddebbecb405748ed5 Mon Sep 17 00:00:00 2001 From: Anery Patel Date: Wed, 22 Jan 2020 16:07:38 +0530 Subject: [PATCH 2/4] Update main.py --- main.py | 1 - 1 file changed, 1 deletion(-) diff --git a/main.py b/main.py index cfc76fb..0aecb40 100644 --- a/main.py +++ b/main.py @@ -18,7 +18,6 @@ nlp = spacy.load('en_core_web_sm') -#wikidataSPARQL="https://17d140f2.ngrok.io/sparql" wikidataSPARQL="http://node3.research.tib.eu:4010/sparql" stopWordsList=wiki_stopwords.getStopWords() From 6c74fbc563d5f4bccdb35f337be98e2c3e83eda7 Mon Sep 17 00:00:00 2001 From: Anery Patel Date: Wed, 22 Jan 2020 22:49:39 +0530 Subject: [PATCH 3/4] Update main.py --- main.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/main.py b/main.py index 0aecb40..4fd747f 100644 --- a/main.py +++ b/main.py @@ -471,7 +471,7 @@ def split_base_on_s(combinations): def process_word_E_long(question): global count - k=1 + k=5 entities=[] @@ -521,7 +521,7 @@ def process_word_E(question): #print(question) startTime=time.time() global count - k=1 + k=5 entities=[] question=question.replace("?","") @@ -610,7 +610,7 @@ def evaluate(raw): r_entity=0 p_relation=0 r_relation=0 - k=1 + k=5 correct=True questionRelationsNumber=0 entities=[] From 0624840e2d423be05b96983452728d97c7596f9b Mon Sep 17 00:00:00 2001 From: Anery Patel Date: Wed, 22 Jan 2020 23:03:58 +0530 Subject: [PATCH 4/4] Update main.py --- main.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/main.py b/main.py index 4fd747f..d626a3f 100644 --- a/main.py +++ b/main.py @@ -469,9 +469,8 @@ def split_base_on_s(combinations): result.append(comb) return result -def process_word_E_long(question): +def process_word_E_long(question, k=1): global count - k=5 entities=[] @@ -517,11 +516,10 @@ def process_word_E_long(question): results.append(entity) return [[entity[1],entity[4]] for entity in results] -def process_word_E(question): +def process_word_E(question,k=1): #print(question) startTime=time.time() global count - k=5 entities=[] question=question.replace("?","") @@ -545,7 +543,7 @@ def process_word_E(question): return [[entity[1],entity[2]] for entity in entities] def process_text_E_R(question,k=1): - raw=evaluate([question]) + raw=evaluate([question],k) #time=raw[1] #print(raw) question=question.replace("?","") @@ -591,7 +589,7 @@ def split_bas_on_comparison(combinations): return new_comb,compare_found -def evaluate(raw): +def evaluate(raw,k): <<<<<<< HEAD evaluation=False startTime=time.time() @@ -610,7 +608,6 @@ def evaluate(raw): r_entity=0 p_relation=0 r_relation=0 - k=5 correct=True questionRelationsNumber=0 entities=[] @@ -829,7 +826,7 @@ def datasets_evaluate(dataset_file): if __name__ == '__main__': #datasets_evaluate() - process_text_E_R('What is the operating income for Qantas?') + process_text_E_R('What is the operating income for Qantas?', 5)