-
Notifications
You must be signed in to change notification settings - Fork 0
/
scraping.py
149 lines (104 loc) · 4.14 KB
/
scraping.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
# Import Splinter and BeautifulSoup
from splinter import Browser
from bs4 import BeautifulSoup as soup
from webdriver_manager.chrome import ChromeDriverManager
# Import Pandas
import pandas as pd
# Import datetime
import datetime as dt
def scrape_all():
# Initiate headless driver for deployment
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=True)
news_title, news_paragraph = mars_news(browser)
# Run all scraping functions and store results in dictionary
data = {
"news_title": news_title,
"news_paragraph": news_paragraph,
"featured_image": featured_image(browser),
"facts": mars_facts(),
"hemispheres": hemispheres(browser),
"last_modified": dt.datetime.now()
}
# Stop webdriver and return data
browser.quit()
return data
def mars_news(browser):
# Visit the Mars news site
url = 'https://redplanetscience.com'
browser.visit(url)
# Optional delay for loading the page
browser.is_element_present_by_css('div.list_text', wait_time=1)
# Set up HTML parser
html = browser.html
news_soup = soup(html, 'html.parser')
# Add try/except for error handling
try:
slide_elem = news_soup.select_one('div.list_text')
# Use the parent element to find the first 'a' tag and save it as 'news_title'
news_title = slide_elem.find('div', class_='content_title').get_text()
# Use the parent element to find the paragraph text
news_p = slide_elem.find('div', class_='article_teaser_body').get_text()
except AttributeError:
return None, None
return news_title, news_p
## JPL Space Images - Featured Image
def featured_image(browser):
# Visit URL
url = 'https://spaceimages-mars.com'
browser.visit(url)
# Find and click the full image button
full_image_elem = browser.find_by_tag('button')[1]
full_image_elem.click()
# Parse the resulting html with soup
html = browser.html
img_soup = soup(html, 'html.parser')
# Add try/except for error handling
try:
# Find the relative image url
img_url_rel = img_soup.find('img', class_='fancybox-image').get('src')
except AttributeError:
return None
# Use the base URL to create an absolute URL
img_url = f'https://spaceimages-mars.com/{img_url_rel}'
return img_url
# ## Mars Facts
def mars_facts():
# Add try/except for error handling
try:
# Use 'read_html' to scrape the facts table into a dataframe
df = pd.read_html('https://galaxyfacts-mars.com')[0]
df = df.iloc[1:,:]
except BaseException:
return None
# Assign columns and set index
df.columns=['Description', 'Mars', 'Earth']
df.set_index('Description', inplace=True)
# Convert DataFrame into HTML-ready code
return df.to_html(classes="table table-hover table-striped")
# Hemispheres
def hemispheres(browser):
# 1. Use browser to visit the URL
url = 'https://marshemispheres.com/'
browser.visit(url)
# 2. Create a list to hold the images and titles.
hemisphere_image_urls = []
# 3. Write code to retrieve the image urls and titles for each hemisphere.
links = browser.find_by_css('a.product-item img')
for i in range(len(links)):
# Create empty dictionary.
hemispheres = {}
# Find the image and click through to the next page.
browser.find_by_css('a.product-item img')[i].click()
# Find the sample image and extract.
sample_elem = browser.links.find_by_text('Sample').first
hemispheres['img_url'] = sample_elem['href']
# Get the title
hemispheres['title'] = browser.find_by_css('h2.title').text
# Append list to dictionary
hemisphere_image_urls.append(hemispheres)
browser.back()
return hemisphere_image_urls
if __name__ == "__main__":
# If running as script, print scraped data
print(scrape_all())