forked from ianramzy/ai-chatbot
-
Notifications
You must be signed in to change notification settings - Fork 0
/
alfred.py
91 lines (67 loc) · 2.86 KB
/
alfred.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
# https://medium.com/analytics-vidhya/building-a-simple-chatbot-in-python-using-nltk-7c8c8215ac6e
import random
import string # to process standard python strings
import warnings
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import nltk
from nltk.stem import WordNetLemmatizer
warnings.filterwarnings("ignore")
nltk.download('popular', quiet=True) # for downloading packages
# Includes the following already.
# nltk.download('punkt') # first-time use only
# nltk.download('wordnet') # first-time use only
with open('alfred-knowledge-base.txt', 'r', encoding='utf8', errors='ignore') as fin:
raw = fin.read().lower()
sent_tokens = nltk.sent_tokenize(raw) # converts to list of sentences
word_tokens = nltk.word_tokenize(raw) # converts to list of words
lemmer = WordNetLemmatizer()
def LemTokens(tokens):
return [lemmer.lemmatize(token) for token in tokens]
remove_punct_dict = dict((ord(punct), None) for punct in string.punctuation)
def LemNormalize(text):
return LemTokens(nltk.word_tokenize(text.lower().translate(remove_punct_dict)))
GREETING_INPUTS = ("hello", "hi", "greetings", "sup", "what's up", "hey")
GREETING_RESPONSES = ["hi", "hey", "*nods*", "hi there", "hello", "I am glad! You are talking to me"]
# Checking for greetings
def greeting(sentence):
"""If user's input is a greeting, return a greeting response"""
for word in sentence.split():
if word.lower() in GREETING_INPUTS:
return random.choice(GREETING_RESPONSES)
# Generating response
def response(user_response):
robo_response = ''
sent_tokens.append(user_response)
TfidfVec = TfidfVectorizer(tokenizer=LemNormalize, stop_words='english')
tfidf = TfidfVec.fit_transform(sent_tokens)
vals = cosine_similarity(tfidf[-1], tfidf)
idx = vals.argsort()[0][-2]
flat = vals.flatten()
flat.sort()
req_tfidf = flat[-2]
if (req_tfidf == 0):
robo_response = robo_response + "I am sorry! I don't understand you"
return robo_response
else:
robo_response = robo_response + sent_tokens[idx]
return robo_response
flag = True
print("Alfred: My name is Alfred. I will answer your queries about Chatbots. If you want to exit, type Bye!")
while (flag == True):
user_response = input()
user_response = user_response.lower()
if (user_response != 'bye'):
if (user_response == 'thanks' or user_response == 'thank you'):
flag = False
print("Alfred: You are welcome..")
else:
if (greeting(user_response) != None):
print("Alfred: " + greeting(user_response))
else:
print("Alfred: ", end="")
print(response(user_response))
sent_tokens.remove(user_response)
else:
flag = False
print("Alfred: Bye! take care..")