Skip to content

Commit

Permalink
Update main.py
Browse files Browse the repository at this point in the history
  • Loading branch information
Roverbk authored May 21, 2024
1 parent ad4d3ae commit d1ddf00
Showing 1 changed file with 36 additions and 11 deletions.
47 changes: 36 additions & 11 deletions main.py
Original file line number Diff line number Diff line change
@@ -1,26 +1,52 @@
# main.py

import requests
from bs4 import BeautifulSoup
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.metrics import silhouette_score
import matplotlib.pyplot as plt
from scraper import scrape_imdb_top_movies # Import the scraping function

# Scrape IMDb for summaries and titles of 250 top-rated movies

def scrape_imdb_top_movies(num_movies):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}
request = requests.get('https://www.imdb.com/chart/top/?ref_=login', headers=headers)
content = request.content
soup = BeautifulSoup(content, 'html.parser')
movie_link = soup.find_all('a', {"class": "ipc-title-link-wrapper"})

hrefs = []
movie_titles = []
for movie in movie_link:
text = movie.text
if text[0].isdigit():
movie_titles.append(text)
hrefs.append(movie.get("href"))

summaries = []
for index in range(num_movies):
url = "https://www.imdb.com" + hrefs[index]
print(f"Fetching summary for: {movie_titles[index]}")
r = requests.get(url, headers=headers)
url_soup = BeautifulSoup(r.content, 'html.parser')
summary = url_soup.find('span', {'data-testid': 'plot-l'}).text if url_soup.find('span', {'data-testid': 'plot-l'}) else "No summary available"
summaries.append(summary)

return movie_titles[:num_movies], summaries


num_movies = 250
movie_titles, summaries = scrape_imdb_top_movies(num_movies)

# TF-IDF Vectorization

vectorizer = TfidfVectorizer(stop_words='english')
tfidf_matrix = vectorizer.fit_transform(summaries)

# Dimensionality Reduction with PCA
pca = PCA(n_components=2)

pca = PCA(n_components=2)
tfidf_pca = pca.fit_transform(tfidf_matrix.toarray())

# Finding the Optimal Number of Clusters

# Elbow Method
sum_of_squared_distances = []
Expand All @@ -37,7 +63,7 @@
plt.title('Elbow Method for Optimal k')
plt.show()

# Silhouette Score

silhouette_avg = []
for k in range(2, min(10, num_movies)): # Adjust the range to be less than or equal to the number of samples
kmeans = KMeans(n_clusters=k)
Expand All @@ -53,8 +79,7 @@
plt.show()

# Choose the optimal number of clusters
optimal_k = 5 # Example value; replace with the best k determined from the plots

optimal_k = 5
# K-means Clustering with Optimal k
kmeans = KMeans(n_clusters=optimal_k)
kmeans.fit(tfidf_pca)
Expand Down

0 comments on commit d1ddf00

Please sign in to comment.