-
Notifications
You must be signed in to change notification settings - Fork 301
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge branch 'Recode-Hive:main' into TheNaiveSamosa
- Loading branch information
Showing
77 changed files
with
261,391 additions
and
1,181 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,4 @@ | ||
# AI_CHATBOT | ||
It is an Ai chatbot developed using natural language toolkit NLTK, pytorch | ||
I have developed this bot for a website of a photographer ,snapitize(which is also developed by me) | ||
Feel free to update the intents.json to make the domain of the chatbot wider. |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,53 @@ | ||
import random | ||
import json | ||
|
||
import torch | ||
|
||
from model import NeuralNet | ||
from nltk_utils import bag_of_words, tokenize | ||
|
||
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | ||
|
||
with open('intents.json', 'r') as json_data: | ||
intents = json.load(json_data) | ||
|
||
FILE = "data.pth" | ||
data = torch.load(FILE) | ||
|
||
input_size = data["input_size"] | ||
hidden_size = data["hidden_size"] | ||
output_size = data["output_size"] | ||
all_words = data['all_words'] | ||
tags = data['tags'] | ||
model_state = data["model_state"] | ||
|
||
model = NeuralNet(input_size, hidden_size, output_size).to(device) | ||
model.load_state_dict(model_state) | ||
model.eval() | ||
|
||
bot_name = "Snapitizer" | ||
print("Let's chat! (type 'quit' to exit)") | ||
while True: | ||
# sentence = "do you use credit cards?" | ||
sentence = input("You: ") | ||
if sentence == "quit": | ||
break | ||
|
||
sentence = tokenize(sentence) | ||
X = bag_of_words(sentence, all_words) | ||
X = X.reshape(1, X.shape[0]) | ||
X = torch.from_numpy(X).to(device) | ||
|
||
output = model(X) | ||
_, predicted = torch.max(output, dim=1) | ||
|
||
tag = tags[predicted.item()] | ||
|
||
probs = torch.softmax(output, dim=1) | ||
prob = probs[0][predicted.item()] | ||
if prob.item() > 0.75: | ||
for intent in intents['intents']: | ||
if tag == intent["tag"]: | ||
print(f"{bot_name}: {random.choice(intent['responses'])}") | ||
else: | ||
print(f"{bot_name}: I do not understand...") |
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,157 @@ | ||
{ | ||
"intents": [ | ||
{ | ||
"tag": "greeting", | ||
"patterns": [ | ||
"Hi", | ||
"Hey", | ||
"How are you", | ||
"Is anyone there?", | ||
"Hello", | ||
"Good day" | ||
], | ||
"responses": [ | ||
"Hey :-)", | ||
"Hello, thanks for visiting", | ||
"Hi there, what can I do for you?", | ||
"Hi there, how can I help?" | ||
] | ||
}, | ||
{ | ||
"tag": "goodbye", | ||
"patterns": ["Bye", "See you later", "Goodbye"], | ||
"responses": [ | ||
"See you later, thanks for visiting", | ||
"Have a nice day", | ||
"Bye! Come back again soon." | ||
] | ||
}, | ||
{ | ||
"tag": "thanks", | ||
"patterns": ["Thanks", "Thank you", "That's helpful", "Thank's a lot!"], | ||
"responses": ["Happy to help!", "Any time!", "My pleasure"] | ||
}, | ||
{ | ||
"tag": "camera", | ||
"patterns": [ | ||
"Which cameras do you have?", | ||
"What kinds of equipments are there?", | ||
"What is the resolution of your pictures?" | ||
], | ||
"responses": [ | ||
"We have sony and nikon cameras", | ||
"We have drones, action cameras, video cameras and many more for diffrent type of shoots" | ||
] | ||
}, | ||
{ | ||
"tag": "payments", | ||
"patterns": [ | ||
"Do you take cards?", | ||
"Do you accept cheque payments?", | ||
"Can I pay with Paytm?", | ||
"Are you cash only?" | ||
], | ||
"responses": [ | ||
"We accept Credit cards, debit cards, cheques and Paytm", | ||
"We accept credit cards, and Paytm" | ||
] | ||
}, | ||
{ | ||
"tag": "shoot time", | ||
"patterns": [ | ||
"How long does shooting take?", | ||
"When will I get my album?", | ||
"How long it will take to print the album" | ||
], | ||
"responses": [ | ||
"Shoot takes 2-4 days", | ||
"processing the images and videos takes 2-4 days", | ||
"Album would be ready in 1 week after the shoot" | ||
] | ||
}, | ||
{ | ||
"tag": "location", | ||
"patterns": [ | ||
"Where are you located?", | ||
"What's your address?" | ||
], | ||
"responses": [ | ||
"We are located at New Delhi.", | ||
"Our office is in Delhi, at xyzz." | ||
] | ||
}, | ||
{ | ||
"tag": "services", | ||
"patterns": [ | ||
"What services do you offer?", | ||
"Tell me about your photography services." | ||
], | ||
"responses": [ | ||
"We offer a range of photography services including weddings, events, portraits, and more.", | ||
"Our services include photography for special occasions, corporate events, and individual portraits." | ||
] | ||
}, | ||
{ | ||
"tag": "packages", | ||
"patterns": [ | ||
"What are your photography packages?", | ||
"Tell me about your pricing." | ||
], | ||
"responses": [ | ||
"We have different packages to suit various needs. You can find detailed pricing on our website or contact us for more information." | ||
] | ||
}, | ||
{ | ||
"tag": "editing", | ||
"patterns": [ | ||
"Do you provide photo editing services?", | ||
"Can I get my photos retouched?" | ||
], | ||
"responses": [ | ||
"Yes, we offer professional photo editing services to enhance and retouch your images." | ||
] | ||
}, | ||
{ | ||
"tag": "availability", | ||
"patterns": [ | ||
"When are you available for a photoshoot?", | ||
"What are your working hours?" | ||
], | ||
"responses": [ | ||
"Our team is available for photoshoots during 6 from 10:00 to 00:00." | ||
] | ||
}, | ||
{ | ||
"tag": "refund policy", | ||
"patterns": [ | ||
"What is your refund policy?", | ||
"Can I get a refund if I cancel?" | ||
], | ||
"responses": [ | ||
"Our refund policy is that we process refunds for orders cancelled before 7 days from the date of event. Please refer to our terms and conditions for more details." | ||
] | ||
}, | ||
{ | ||
"tag": "reviews", | ||
"patterns": [ | ||
"Can I see customer reviews?", | ||
"What do your clients say about your services?" | ||
], | ||
"responses": [ | ||
"You can find customer reviews on our website or social media pages. We take pride in our positive feedback." | ||
] | ||
}, | ||
{ | ||
"tag": "funny", | ||
"patterns": [ | ||
"Tell me a joke!", | ||
"Tell me something funny!", | ||
"Do you know a joke?" | ||
], | ||
"responses": [ | ||
"What kind of candy do astronauts like? Mars bars.", | ||
"What's an egg's favorite vacation spot?New Yolk City" | ||
] | ||
} | ||
] | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,19 @@ | ||
import torch | ||
import torch.nn as nn | ||
|
||
|
||
class NeuralNet(nn.Module): | ||
def __init__(self, input_size, hidden_size, num_classes): | ||
super(NeuralNet, self).__init__() | ||
self.l1 = nn.Linear(input_size, hidden_size) | ||
self.l2 = nn.Linear(hidden_size, hidden_size) | ||
self.l3 = nn.Linear(hidden_size, num_classes) | ||
self.relu = nn.ReLU() | ||
|
||
def forward(self, x): | ||
out = self.l1(x) | ||
out = self.relu(out) | ||
out = self.l2(out) | ||
out = self.relu(out) | ||
out = self.l3(out) | ||
return out |
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,23 @@ | ||
import numpy as np | ||
import nltk | ||
nltk.download('punkt') | ||
from nltk.stem.porter import PorterStemmer | ||
stemmer = PorterStemmer() | ||
|
||
def tokenize(sentence): | ||
return nltk.word_tokenize(sentence) | ||
|
||
|
||
def stem(word): | ||
return stemmer.stem(word.lower()) | ||
|
||
|
||
def bag_of_words(tokenized_sentence, words): | ||
|
||
sentence_words = [stem(word) for word in tokenized_sentence] | ||
bag = np.zeros(len(words), dtype=np.float32) | ||
for idx, w in enumerate(words): | ||
if w in sentence_words: | ||
bag[idx] = 1 | ||
|
||
return bag |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,108 @@ | ||
import numpy as np | ||
import random | ||
import json | ||
|
||
import torch | ||
import torch.nn as nn | ||
from torch.utils.data import Dataset, DataLoader | ||
|
||
from nltk_utils import bag_of_words, tokenize, stem | ||
from model import NeuralNet | ||
|
||
with open('intents.json', 'r') as f: | ||
intents = json.load(f) | ||
|
||
all_words = [] | ||
tags = [] | ||
xy = [] | ||
for intent in intents['intents']: | ||
tag = intent['tag'] | ||
tags.append(tag) | ||
for pattern in intent['patterns']: | ||
w = tokenize(pattern) | ||
all_words.extend(w) | ||
xy.append((w, tag)) | ||
|
||
ignore_words = ['?', '.', '!'] | ||
all_words = [stem(w) for w in all_words if w not in ignore_words] | ||
all_words = sorted(set(all_words)) | ||
tags = sorted(set(tags)) | ||
|
||
print(len(xy), "patterns") | ||
print(len(tags), "tags:", tags) | ||
print(len(all_words), "unique stemmed words:", all_words) | ||
|
||
X_train = [] | ||
y_train = [] | ||
for (pattern_sentence, tag) in xy: | ||
bag = bag_of_words(pattern_sentence, all_words) | ||
X_train.append(bag) | ||
label = tags.index(tag) | ||
y_train.append(label) | ||
|
||
X_train = np.array(X_train) | ||
y_train = np.array(y_train) | ||
|
||
num_epochs = 1000 | ||
batch_size = 8 | ||
learning_rate = 0.01 | ||
input_size = len(X_train[0]) | ||
hidden_size = 8 | ||
output_size = len(tags) | ||
print(input_size, output_size) | ||
|
||
class ChatDataset(Dataset): | ||
|
||
def __init__(self): | ||
self.n_samples = len(X_train) | ||
self.x_data = X_train | ||
self.y_data = y_train | ||
|
||
def __getitem__(self, index): | ||
return self.x_data[index], self.y_data[index] | ||
def __len__(self): | ||
return self.n_samples | ||
|
||
dataset = ChatDataset() | ||
train_loader = DataLoader(dataset=dataset, | ||
batch_size=batch_size, | ||
shuffle=True, | ||
num_workers=0) | ||
|
||
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | ||
|
||
model = NeuralNet(input_size, hidden_size, output_size).to(device) | ||
|
||
criterion = nn.CrossEntropyLoss() | ||
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) | ||
|
||
for epoch in range(num_epochs): | ||
for (words, labels) in train_loader: | ||
words = words.to(device) | ||
labels = labels.to(dtype=torch.long).to(device) | ||
outputs = model(words) | ||
|
||
loss = criterion(outputs, labels) | ||
optimizer.zero_grad() | ||
loss.backward() | ||
optimizer.step() | ||
|
||
if (epoch+1) % 100 == 0: | ||
print (f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}') | ||
|
||
|
||
print(f'final loss: {loss.item():.4f}') | ||
|
||
data = { | ||
"model_state": model.state_dict(), | ||
"input_size": input_size, | ||
"hidden_size": hidden_size, | ||
"output_size": output_size, | ||
"all_words": all_words, | ||
"tags": tags | ||
} | ||
|
||
FILE = "data.pth" | ||
torch.save(data, FILE) | ||
|
||
print(f'training complete. file saved to {FILE}') |
Submodule Book-Recommendation
added at
940e78
Oops, something went wrong.