Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove existing models #135

Open
wants to merge 5 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .user.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
id: a6084397-071e-43eb-8cb1-12d7e277cae5
671 changes: 671 additions & 0 deletions data/issues.csv

Large diffs are not rendered by default.

Binary file added data/issues.db
Binary file not shown.
25 changes: 18 additions & 7 deletions dbt_project.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name: 'jaffle_shop'

config-version: 2
version: '0.1'
version: '1.5.0'

profile: 'jaffle_shop'

Expand All @@ -13,14 +13,25 @@ macro-paths: ["macros"]

target-path: "target"
clean-targets:
- "target"
- "dbt_modules"
- "logs"
- "target"
- "dbt_modules"
- "logs"

require-dbt-version: [">=1.0.0", "<2.0.0"]

models:
jaffle_shop:
materialized: table
staging:
materialized: view
# Default materialization for the project
+materialized: table

staging:
+materialized: view # All models in the staging folder will be views by default

marts:
# Default materialization for marts folder
+materialized: table

core:
+schema: core # Models in the marts/core folder will use the "core" schema
intermediate:
+schema: core_intermediate # Models in marts/core/intermediate use "core_intermediate"
69 changes: 0 additions & 69 deletions models/customers.sql

This file was deleted.

14 changes: 0 additions & 14 deletions models/docs.md

This file was deleted.

56 changes: 0 additions & 56 deletions models/orders.sql

This file was deleted.

11 changes: 0 additions & 11 deletions models/overview.md

This file was deleted.

82 changes: 0 additions & 82 deletions models/schema.yml

This file was deleted.

11 changes: 11 additions & 0 deletions profiles.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
jaffle_shop:
target: dev
outputs:
dev:
type: sqlite
threads: 1
database: issues
schema: main
schema_directory: /Users/bonafide/Downloads/GitHub/fleetio/jaffle-shop-classic/scripts/data
schemas_and_paths:
main: /Users/bonafide/Downloads/GitHub/fleetio/jaffle-shop-classic/scripts/data
File renamed without changes.
51 changes: 51 additions & 0 deletions scripts/fetch_google_sheets.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import os
import requests
import pandas as pd

# Google Sheets API setup
API_KEY = "AIzaSyAJ7jxaWNSGsNetM9DMiMpK_h7kcS2RCNQ" # Replace with your API key
SPREADSHEET_ID = "1MGVed0Psao7WwIulcrASyjw_nvHslvLhuvBiKqXYmuI" # Replace with your Google Sheet ID
RANGE_NAME = "issues!A:N" # Adjust range as needed

# Build the URL
url = f"https://sheets.googleapis.com/v4/spreadsheets/{SPREADSHEET_ID}/values/{RANGE_NAME}?key={API_KEY}"
print(f"Fetching data from Google Sheets: {url}")

# Fetch data
response = requests.get(url)

print(f"Response status code: {response.status_code}")

if response.status_code == 200:
data = response.json()
print("Data fetched successfully from Google Sheets")

values = data.get("values", [])
print(f"Number of rows fetched: {len(values)}")

# Convert to DataFrame
if values:
print("Converting data to DataFrame...")
df = pd.DataFrame(values[1:], columns=values[0]) # Use first row as column names
print(f"DataFrame created with {df.shape[0]} rows and {df.shape[1]} columns")

script_dir = os.path.dirname(os.path.abspath(__file__)) # Get the directory of the current script
project_root = os.path.abspath(os.path.join(script_dir, '..')) # Move one level up to `jaffle-shop-classic`
output_dir = os.path.join(project_root, 'data') # Create the `data` folder inside `jaffle-shop-classic'
os.makedirs(output_dir, exist_ok=True)
output_file = os.path.join(output_dir, 'issues.csv')

# Save data to CSV
print(f"Saving data to {output_file}...")
df.to_csv(output_file, index=False)
print(f"Data saved to {output_file}")
else:
print("No data found.")
else:
print(f"Failed to fetch data: {response.status_code}")
if response.status_code == 403:
print("Permission denied: Ensure the sheet is publicly accessible or the API key has proper access.")
elif response.status_code == 404:
print("Sheet not found: Check the SPREADSHEET_ID and ensure the sheet exists.")
else:
print(response.text)
33 changes: 33 additions & 0 deletions scripts/load_to_sqlite.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
import sqlite3
import pandas as pd

# Load CSV data
csv_file = 'data/issues.csv'
df = pd.read_csv(csv_file)

# Debugging: Print column names
print("Columns in CSV:", df.columns)

# Connect to SQLite database (creates database if it doesn't exist)
conn = sqlite3.connect('data/issues.db')
cursor = conn.cursor()

# Drop the table if it exists
cursor.execute("DROP TABLE IF EXISTS issues")

# Dynamically create a table with columns matching the CSV
columns = ", ".join([f'"{col}" TEXT' for col in df.columns]) # Define all columns as TEXT
create_table_query = f"CREATE TABLE issues ({columns});"
cursor.execute(create_table_query)

# Insert data into the table
for _, row in df.iterrows():
placeholders = ", ".join(["?"] * len(row)) # Generate placeholders for all columns
insert_query = f"INSERT INTO issues VALUES ({placeholders})"
cursor.execute(insert_query, tuple(row))

# Commit changes and close the connection
conn.commit()
conn.close()

print("Data successfully loaded into SQLite database (data/issues.db).")
Loading