diff --git a/Scrapper.py b/Scrapper.py new file mode 100644 index 00000000..b7c25b59 --- /dev/null +++ b/Scrapper.py @@ -0,0 +1,40 @@ +from bs4 import BeautifulSoup +from urllib import urlopen +import pymysql + +#link bukalapak +bukalapak = "https://www.bukalapak.com/c/handphone?source=navbar&from=navbar_categories" + +#buka bukalapak +bukalapakClient = urlopen(bukalapak) +html_page = bukalapakClient.read() +bukalapakClient.close() + +#buka database +db = pymysql.connect(db='base', user='root', passwd='pwd', unix_socket="/tmp/mysql.sock") +cursor = db.cursor() +cursor.execute("DROP TABLE IF EXISTS database1") + +sql = """CREATE TABLE database1 (Nama Varchar(100), Harga Int )""" + + + +#scrap harga dan nama dari bukalapak +bukalapakPage = BeautifulSoup(html_page, "html5lib") +products = bukalapakPage.find_all("li", class_ = "product--sem col-12--2") +for product in products : + product_name = product.div.article.div.a["title"] + product_price = product.find("div", class_ = "product-price").find("span", class_ = "amount positive").text + #insert to table + try: + cursor.execute("""INSERT INTO database1 VALUES (%s,%s)""",(product_name,product_price)) + db.commit() + except: + db.rollback() + +#showtable +cursor.execute("""SELECT * FROM database1;""") + +print cursor.fetchall() + +db.close() diff --git a/tokopedia/scrapy.cfg b/tokopedia/scrapy.cfg new file mode 100644 index 00000000..44d8f940 --- /dev/null +++ b/tokopedia/scrapy.cfg @@ -0,0 +1,11 @@ +# Automatically created by: scrapy startproject +# +# For more information about the [deploy] section see: +# https://scrapyd.readthedocs.org/en/latest/deploy.html + +[settings] +default = tokopedia.settings + +[deploy] +#url = http://localhost:6800/ +project = tokopedia diff --git a/tokopedia/tokopedia/__init__.py b/tokopedia/tokopedia/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tokopedia/tokopedia/__init__.pyc b/tokopedia/tokopedia/__init__.pyc new file mode 100644 index 00000000..771cd14e Binary files /dev/null and b/tokopedia/tokopedia/__init__.pyc differ diff --git a/tokopedia/tokopedia/items.json b/tokopedia/tokopedia/items.json new file mode 100644 index 00000000..e69de29b diff --git a/tokopedia/tokopedia/items.py b/tokopedia/tokopedia/items.py new file mode 100644 index 00000000..89b7ffc4 --- /dev/null +++ b/tokopedia/tokopedia/items.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- + +# Define here the models for your scraped items +# +# See documentation in: +# http://doc.scrapy.org/en/latest/topics/items.html + +from scrapy.item import Item, Field + + +class TokopediaItem(Item): + # define the fields for your item here like: + # name = scrapy.Field() + name = Field() + price = Field() + url = Field() diff --git a/tokopedia/tokopedia/items.pyc b/tokopedia/tokopedia/items.pyc new file mode 100644 index 00000000..d13ea833 Binary files /dev/null and b/tokopedia/tokopedia/items.pyc differ diff --git a/tokopedia/tokopedia/middlewares.py b/tokopedia/tokopedia/middlewares.py new file mode 100644 index 00000000..f8356fad --- /dev/null +++ b/tokopedia/tokopedia/middlewares.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- + +# Define here the models for your spider middleware +# +# See documentation in: +# http://doc.scrapy.org/en/latest/topics/spider-middleware.html + +from scrapy import signals + + +class TokopediaSpiderMiddleware(object): + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the spider middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_spider_input(self, response, spider): + # Called for each response that goes through the spider + # middleware and into the spider. + + # Should return None or raise an exception. + return None + + def process_spider_output(self, response, result, spider): + # Called with the results returned from the Spider, after + # it has processed the response. + + # Must return an iterable of Request, dict or Item objects. + for i in result: + yield i + + def process_spider_exception(self, response, exception, spider): + # Called when a spider or process_spider_input() method + # (from other spider middleware) raises an exception. + + # Should return either None or an iterable of Response, dict + # or Item objects. + pass + + def process_start_requests(self, start_requests, spider): + # Called with the start requests of the spider, and works + # similarly to the process_spider_output() method, except + # that it doesn’t have a response associated. + + # Must return only requests (not items). + for r in start_requests: + yield r + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) diff --git a/tokopedia/tokopedia/pipelines.py b/tokopedia/tokopedia/pipelines.py new file mode 100644 index 00000000..2157426d --- /dev/null +++ b/tokopedia/tokopedia/pipelines.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# Define your item pipelines here +# +# Don't forget to add your pipeline to the ITEM_PIPELINES setting +# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html + + +import pymongo + +from scrapy.conf import settings +from scrapy.exceptions import DropItem +from scrapy import log + + +class TokopediaPipeline(object): + + def __init__(self): + connection = pymongo.MongoClient( + settings['MONGODB_SERVER'], + settings['MONGODB_PORT'] + ) + db = connection[settings['MONGODB_DB']] + self.collection = db[settings['MONGODB_COLLECTION']] + + def process_item(self, item, spider): + valid = True + for data in item: + if not data: + valid = False + raise DropItem("Missing {0}!".format(data)) + if valid: + self.collection.insert(dict(item)) + log.msg("Item added to MongoDB database!", + level=log.DEBUG, spider=spider) + return item diff --git a/tokopedia/tokopedia/pipelines.pyc b/tokopedia/tokopedia/pipelines.pyc new file mode 100644 index 00000000..c6c698d1 Binary files /dev/null and b/tokopedia/tokopedia/pipelines.pyc differ diff --git a/tokopedia/tokopedia/settings.py b/tokopedia/tokopedia/settings.py new file mode 100644 index 00000000..c6d49df3 --- /dev/null +++ b/tokopedia/tokopedia/settings.py @@ -0,0 +1,96 @@ +# -*- coding: utf-8 -*- + +# Scrapy settings for tokopedia project +# +# For simplicity, this file contains only settings considered important or +# commonly used. You can find more settings consulting the documentation: +# +# http://doc.scrapy.org/en/latest/topics/settings.html +# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html +# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html + +BOT_NAME = 'tokopedia' + +SPIDER_MODULES = ['tokopedia.spiders'] +NEWSPIDER_MODULE = 'tokopedia.spiders' + + +# Crawl responsibly by identifying yourself (and your website) on the user-agent +#USER_AGENT = 'tokopedia (+http://www.yourdomain.com)' + +# Obey robots.txt rules +ROBOTSTXT_OBEY = True + +# Configure maximum concurrent requests performed by Scrapy (default: 16) +#CONCURRENT_REQUESTS = 32 + +# Configure a delay for requests for the same website (default: 0) +# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay +# See also autothrottle settings and docs +#DOWNLOAD_DELAY = 3 +# The download delay setting will honor only one of: +#CONCURRENT_REQUESTS_PER_DOMAIN = 16 +#CONCURRENT_REQUESTS_PER_IP = 16 + +# Disable cookies (enabled by default) +#COOKIES_ENABLED = False + +# Disable Telnet Console (enabled by default) +#TELNETCONSOLE_ENABLED = False + +# Override the default request headers: +#DEFAULT_REQUEST_HEADERS = { +# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', +# 'Accept-Language': 'en', +#} + +# Enable or disable spider middlewares +# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html +#SPIDER_MIDDLEWARES = { +# 'tokopedia.middlewares.TokopediaSpiderMiddleware': 543, +#} + +# Enable or disable downloader middlewares +# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html +#DOWNLOADER_MIDDLEWARES = { +# 'tokopedia.middlewares.MyCustomDownloaderMiddleware': 543, +#} + +# Enable or disable extensions +# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html +#EXTENSIONS = { +# 'scrapy.extensions.telnet.TelnetConsole': None, +#} + +# Configure item pipelines +# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html +#ITEM_PIPELINES = { +# 'tokopedia.pipelines.TokopediaPipeline': 300, +#} +ITEM_PIPELINES = ['tokopedia.pipelines.TokopediaPipeline'] + +MONGODB_SERVER = "localhost" +MONGODB_PORT = 27017 +MONGODB_DB = "tokopedia" +MONGODB_COLLECTION = "item" + +# Enable and configure the AutoThrottle extension (disabled by default) +# See http://doc.scrapy.org/en/latest/topics/autothrottle.html +#AUTOTHROTTLE_ENABLED = True +# The initial download delay +#AUTOTHROTTLE_START_DELAY = 5 +# The maximum download delay to be set in case of high latencies +#AUTOTHROTTLE_MAX_DELAY = 60 +# The average number of requests Scrapy should be sending in parallel to +# each remote server +#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 +# Enable showing throttling stats for every response received: +#AUTOTHROTTLE_DEBUG = False + +# Enable and configure HTTP caching (disabled by default) +# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings +#HTTPCACHE_ENABLED = True +#HTTPCACHE_EXPIRATION_SECS = 0 +#HTTPCACHE_DIR = 'httpcache' +#HTTPCACHE_IGNORE_HTTP_CODES = [] +#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' diff --git a/tokopedia/tokopedia/settings.pyc b/tokopedia/tokopedia/settings.pyc new file mode 100644 index 00000000..dc624bbc Binary files /dev/null and b/tokopedia/tokopedia/settings.pyc differ diff --git a/tokopedia/tokopedia/spiders/__init__.py b/tokopedia/tokopedia/spiders/__init__.py new file mode 100644 index 00000000..ebd689ac --- /dev/null +++ b/tokopedia/tokopedia/spiders/__init__.py @@ -0,0 +1,4 @@ +# This package will contain the spiders of your Scrapy project +# +# Please refer to the documentation for information on how to create and manage +# your spiders. diff --git a/tokopedia/tokopedia/spiders/__init__.pyc b/tokopedia/tokopedia/spiders/__init__.pyc new file mode 100644 index 00000000..7d7260a0 Binary files /dev/null and b/tokopedia/tokopedia/spiders/__init__.pyc differ diff --git a/tokopedia/tokopedia/spiders/tokopedia_spider.py b/tokopedia/tokopedia/spiders/tokopedia_spider.py new file mode 100644 index 00000000..08e3d712 --- /dev/null +++ b/tokopedia/tokopedia/spiders/tokopedia_spider.py @@ -0,0 +1,25 @@ +from scrapy import Spider +from scrapy.selector import Selector + +from tokopedia.items import TokopediaItem + + +class TokopediaSpider(Spider): + name = "tokopedia" + allowed_domains = ["tokopedia.com"] + start_urls = [ + "https://www.tokopedia.com/p/handphone-tablet/handphone?page=1", + ] + + def parse(self, response): + cards = Selector(response).xpath('//div[@class="product-summary"]') + + for card in cards: + item = TokopediaItem() + item['name'] = question.xpath( + 'div[@class="product-name ng-binding"]/text()').extract()[0] + item['price'] = question.xpath( + 'div[@class="product-price ng-binding"]/text()').extract()[0] + item['url'] = question.xpath( + 'a[@class="ng-href"]/@href').extract()[0] + yield item diff --git a/tokopedia/tokopedia/spiders/tokopedia_spider.pyc b/tokopedia/tokopedia/spiders/tokopedia_spider.pyc new file mode 100644 index 00000000..dd657fd2 Binary files /dev/null and b/tokopedia/tokopedia/spiders/tokopedia_spider.pyc differ