Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Projects under the 'Algorithms and Deep Learning Models' directory are missing README files are now added #1475

Merged
merged 9 commits into from
Oct 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
56 changes: 40 additions & 16 deletions Algorithms and Deep Learning Models/Boxoffice/Boxoffice.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
"metadata": {},
"outputs": [],
"source": [
"# Import necessary libraries\n",
"import numpy as np\n",
"import pandas as pd\n",
"import matplotlib.pyplot as plt\n",
Expand All @@ -22,7 +23,8 @@
"metadata": {},
"outputs": [],
"source": [
"df=pd.read_csv(\"D:/Documents/Data Sets/film.csv\")"
"# Load the dataset\n",
"df=pd.read_csv(\"D:/Documents/Data Sets/movie_dataset.csv\")"
]
},
{
Expand All @@ -31,6 +33,7 @@
"metadata": {},
"outputs": [],
"source": [
"# Display the first 5 rows of the dataset\n",
"df.head(5)"
]
},
Expand All @@ -40,6 +43,7 @@
"metadata": {},
"outputs": [],
"source": [
"# Get the shape of the dataset\n",
"df.shape"
]
},
Expand All @@ -49,6 +53,7 @@
"metadata": {},
"outputs": [],
"source": [
"# Check for missing values in the dataset\n",
"df.isnull().sum()"
]
},
Expand All @@ -58,6 +63,7 @@
"metadata": {},
"outputs": [],
"source": [
"# Drop rows with missing values\n",
"df.dropna(inplace=True)"
]
},
Expand All @@ -67,6 +73,7 @@
"metadata": {},
"outputs": [],
"source": [
"# Verify that there are no more missing values\n",
"df.isnull().sum()"
]
},
Expand All @@ -76,6 +83,7 @@
"metadata": {},
"outputs": [],
"source": [
"# Display information about the dataset\n",
"df.info()"
]
},
Expand All @@ -85,7 +93,8 @@
"metadata": {},
"outputs": [],
"source": [
"cor=df['Budget'].corr(df['Revenue'])\n",
"# Calculate and display correlation between budget and revenue\n",
"cor=df['budget'].corr(df['revenue'])\n",
"cor"
]
},
Expand All @@ -95,18 +104,18 @@
"metadata": {},
"outputs": [],
"source": [
"# Encode categorical variables using Label Encoding\n",
"lr=preprocessing.LabelEncoder()\n",
"df['Title']=lr.fit_transform(df['Title'])\n",
"df['Original Title']=lr.fit_transform(df['Original Title'])\n",
"df['Original Language']=lr.fit_transform(df['Original Language'])\n",
"df['Status']=lr.fit_transform(df['Status'])\n",
"df['Spoken Languages']=lr.fit_transform(df['Spoken Languages'])\n",
"df['Production Countries']=lr.fit_transform(df['Production Countries'])\n",
"df['Production Companies']=lr.fit_transform(df['Production Companies'])\n",
"df['Genres']=lr.fit_transform(df['Genres'])\n",
"df['Overview']=lr.fit_transform(df['Overview'])\n",
"df['Release Date']=lr.fit_transform(df['Release Date'])\n",
"df['Adult']=lr.fit_transform(df['Adult'])\n"
"df['title']=lr.fit_transform(df['title'])\n",
"df['original_title']=lr.fit_transform(df['original_title'])\n",
"df['original_language']=lr.fit_transform(df['original_language'])\n",
"df['status']=lr.fit_transform(df['status'])\n",
"df['spoken_languages']=lr.fit_transform(df['spoken_languages'])\n",
"df['production_countries']=lr.fit_transform(df['production_countries'])\n",
"df['production_companies']=lr.fit_transform(df['production_companies'])\n",
"df['genres']=lr.fit_transform(df['genres'])\n",
"df['overview']=lr.fit_transform(df['overview'])\n",
"df['release_date']=lr.fit_transform(df['release_date'])\n"
]
},
{
Expand All @@ -115,6 +124,7 @@
"metadata": {},
"outputs": [],
"source": [
"# Display information about the dataset after encoding\n",
"df.info()"
]
},
Expand All @@ -124,7 +134,15 @@
"metadata": {},
"outputs": [],
"source": [
"sns.heatmap(data=df)"
"# Create and display a heatmap of feature correlations\n",
"correlation_matrix = df.select_dtypes(include=[np.number]).corr()\n",
"plt.figure(figsize=(14, 10))\n",
"sns.heatmap(correlation_matrix, annot=True, fmt='.2f', cmap='coolwarm', square=True, cbar_kws={\"shrink\": .8})\n",
"plt.title('Heatmap of Feature Correlations', fontsize=20)\n",
"plt.xticks(rotation=45, ha='right')\n",
"plt.yticks(rotation=0)\n",
"plt.tight_layout()\n",
"plt.show()"
]
},
{
Expand All @@ -133,8 +151,9 @@
"metadata": {},
"outputs": [],
"source": [
"X=df[['Budget','Popularity','Runtime']]\n",
"Y=df['Revenue']\n"
"# Define features and target variable for the model\n",
"X=df[['budget','popularity','runtime']]\n",
"Y=df['revenue']\n"
]
},
{
Expand All @@ -143,6 +162,7 @@
"metadata": {},
"outputs": [],
"source": [
"# Split the data into training and testing sets\n",
"x_train, x_test, y_train, y_test=train_test_split(X,Y, test_size=0.4)"
]
},
Expand All @@ -152,6 +172,7 @@
"metadata": {},
"outputs": [],
"source": [
"# Create a Linear Regression model\n",
"lr=LinearRegression()"
]
},
Expand All @@ -161,6 +182,7 @@
"metadata": {},
"outputs": [],
"source": [
"# Fit the model to the training data\n",
"lr.fit(x_train, y_train)"
]
},
Expand All @@ -170,6 +192,7 @@
"metadata": {},
"outputs": [],
"source": [
"# Make predictions on the testing set\n",
"pred=lr.predict(x_test)"
]
},
Expand All @@ -179,6 +202,7 @@
"metadata": {},
"outputs": [],
"source": [
"# Evaluate the model using Mean Absolute Error\n",
"print(metrics.mean_absolute_error(y_test, pred))"
]
}
Expand Down
49 changes: 49 additions & 0 deletions Algorithms and Deep Learning Models/Boxoffice/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
# Movie Revenue Prediction
This project aims to predict movie revenue based on various features such as budget, popularity, and runtime using a linear regression model.

## Table of Contents
- [Installation](#installation)
- [Dataset](#dataset)
- [Usage](#usage)
- [Results](#results)

## Installation

To run this project, you'll need to have Python installed along with the following libraries:
```requirements.txt
NumPy
Pandas
Matplotlib
Seaborn
Scikit-learn
```
You can install the necessary libraries using pip:

```bash
pip install -r requirements.txt
```

## Dataset

The dataset used in this project is `movie_dataset.csv`, which contains information about various movies, including their budget, revenue, popularity, runtime, and more.

## Usage

1. Clone the repository:

```bash
git clone <your-repo-url>
cd <your-repo-directory>
```

2. Open the Jupyter Notebook:

```bash
jupyter notebook
```

3. Run the notebook cells sequentially to load the dataset, preprocess it, and train the linear regression model to make predictions on movie revenue.

## Results

After running the model, you will receive the Mean Absolute Error (MAE) as an evaluation metric to assess the prediction accuracy.
4,807 changes: 4,807 additions & 0 deletions Algorithms and Deep Learning Models/Boxoffice/movie_dataset.csv

Large diffs are not rendered by default.

5 changes: 5 additions & 0 deletions Algorithms and Deep Learning Models/Boxoffice/reqiurements.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
numpy
pandas
matplotlib
seaborn
scikit-learn
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
# Erasing Clouds from Satellite Imagery

This project focuses on developing a deep learning model to remove clouds from satellite imagery. Clouds obstruct critical information in satellite images, making it difficult to analyze geographic, agricultural, and environmental features. By building a model that can effectively "erase" clouds, this project aims to provide clearer, more accurate satellite images for better analysis and decision-making.

## Overview

Satellite images are essential for various industries such as agriculture, weather forecasting, environmental monitoring, and more. However, clouds often cover parts of these images, obstructing the view of the Earth's surface. This project provides a solution by processing satellite images and removing clouds, resulting in a clearer image of the terrain below.

The process involves training a deep learning model on paired images — one set of images containing clouds and another set of the same location without clouds. Through multiple training iterations, the model learns how to predict and generate cloud-free images, enhancing the usability of satellite imagery.

## Features

- **Cloud Removal from Satellite Images**: The model removes cloud cover from satellite images to reveal the underlying terrain.
- **Loss Visualization**: Training progress is visualized using loss plots for both the generator and discriminator components.
- **Customizable Design**: The model's training parameters can be adjusted to enhance performance based on specific datasets.
- **Visual Representation**: The model provides visual feedback on training progress, with clear indications of where improvements are being made.

## Setup

To run this project locally, follow these steps:

### Prerequisites

- Python 3.x
- Required libraries:
- `matplotlib`
- `numpy`
- `torch`
- `PIL`

Install the required libraries using pip:

```bash
pip install matplotlib numpy torch pillow
```

### Clone the Repository

Clone this repository to your local machine:

```bash
git clone <repository-url>
cd <repository-directory>
```

### Running the Project

1. Place the training and validation datasets in the appropriate folder (dataset structure to be defined based on your use case).
3. Losses for both the generator and discriminator are plotted and saved as images after training.

### Visualizing Losses

The loss curves for the training process can be visualized using `matplotlib`:

```python
plt.show()
```

You can find the generated loss plots in the project directory as `Loss.jpg`.

## Usage

After training, the model can be used to process new satellite images and remove cloud cover:

```python
python inference.py --input_image <path_to_cloudy_image>
```

This will output a cloud-free image that can be used for further analysis.

Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
44 changes: 44 additions & 0 deletions Algorithms and Deep Learning Models/QR-Scanner/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
# QR Code Scanner

This project is a simple QR code scanner that uses OpenCV to capture video from a webcam and detects QR codes in real-time. Once a QR code is detected, the URL or data encoded in the QR code will automatically be opened in your default web browser.

## Features
- Real-time QR code detection using a webcam.
- Automatically opens the decoded URL in the default web browser.
- Exits the application after a QR code is successfully scanned and opened.

## Requirements

Before running the project, ensure you have the following installed:

- Python 3.x
- OpenCV (`cv2`)
- A webcam (integrated or external)

### Python Dependencies
You can install the necessary dependencies using the following commands:

```bash
pip install opencv-python
```

## How to Run the Project
1. Clone or download the project to your local machine.
2. Navigate to the project directory.
3. Ensure you have a working webcam connected.
4. Run the Python script:
```bash
python qr_scan.py
```
The webcam will start, and as soon as a QR code is detected, its URL will be opened in your default web browser.

## Screenshots
- Here is an example of the QR code scanner in action:
1. **Camera Window Capturing QR Code:**
![QR Code Scanner Capturing](https://github.com/ananas304/machine-learning-repos/blob/main/Algorithms%20and%20Deep%20Learning%20Models/QR-Scanner/QR_Scanne-qr%20code%20image%20capture.png)

In this screenshot, the camera window is open, and it's capturing a QR code in real-time.

## Notes
- The application will exit automatically after a QR code is detected and the link is opened.
- Press the q key to exit the scanner manually at any time.
22 changes: 10 additions & 12 deletions Algorithms and Deep Learning Models/QR-Scanner/qr_scan.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import cv2
from pyzbar.pyzbar import decode
import webbrowser

# Function to open web browser with the decoded QR code link
Expand All @@ -11,26 +10,25 @@ def open_link_once(url):
cam.set(3, 640) # Width
cam.set(4, 480) # Height

# Initialize the QRCode detector
detector = cv2.QRCodeDetector()

while True:
success, frame = cam.read()

# Decode QR codes
for barcode in decode(frame):
# Extract barcode data
qr_data = barcode.data.decode('utf-8')
print(f"QR Code data: {qr_data}")
# Detect and decode the QR code
data, bbox, _ = detector.detectAndDecode(frame)

if data:
print(f"QR Code data: {data}")

# Open the URL in a web browser only once
open_link_once(qr_data)
break # Break out of the for loop after opening the link
open_link_once(data)
break # Break out of the loop after opening the link

# Display the camera frame
cv2.imshow('QR Scanner', frame)

# Check if link has been opened and break out of the main loop
if 'qr_data' in locals():
break

# Wait for key press and break loop if 'q' is pressed
if cv2.waitKey(1) & 0xFF == ord('q'):
break
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
opencv-python==4.10.0.84
Loading