diff --git a/crawlab/app.py b/crawlab/app.py index 4eebc804..c984f140 100644 --- a/crawlab/app.py +++ b/crawlab/app.py @@ -9,6 +9,7 @@ from flask import Flask from flask_cors import CORS from flask_restful import Api # from flask_restplus import Api +from routes.sites import SiteApi from utils.log import other from constants.node import NodeStatus from db.manager import db_manager @@ -68,6 +69,9 @@ api.add_resource(StatsApi, api.add_resource(ScheduleApi, '/api/schedules', '/api/schedules/') +api.add_resource(SiteApi, + '/api/sites', + '/api/sites/') def monitor_nodes_status(celery_app): diff --git a/crawlab/db/manager.py b/crawlab/db/manager.py index 71ea9b2b..4c5535e7 100644 --- a/crawlab/db/manager.py +++ b/crawlab/db/manager.py @@ -13,7 +13,7 @@ class DbManager(object): """ def __init__(self): - self.mongo = MongoClient(host=MONGO_HOST, port=MONGO_PORT) + self.mongo = MongoClient(host=MONGO_HOST, port=MONGO_PORT, connect=False) self.db = self.mongo[MONGO_DB] def save(self, col_name: str, item: dict, **kwargs) -> None: diff --git a/crawlab/routes/sites.py b/crawlab/routes/sites.py new file mode 100644 index 00000000..6874af2b --- /dev/null +++ b/crawlab/routes/sites.py @@ -0,0 +1,72 @@ +import json + +from bson import ObjectId +from pymongo import ASCENDING + +from db.manager import db_manager +from routes.base import BaseApi +from utils import jsonify + + +class SiteApi(BaseApi): + col_name = 'sites' + + arguments = ( + ('keyword', str), + ('category', str), + ) + + def get(self, id: str = None, action: str = None): + # action by id + if action is not None: + if not hasattr(self, action): + return { + 'status': 'ok', + 'code': 400, + 'error': 'action "%s" invalid' % action + }, 400 + return getattr(self, action)(id) + + elif id is not None: + site = db_manager.get(col_name=self.col_name, id=id) + return jsonify(site) + + # list tasks + args = self.parser.parse_args() + page_size = args.get('page_size') or 10 + page_num = args.get('page_num') or 1 + filter_str = args.get('filter') + keyword = args.get('keyword') + filter_ = {} + if filter_str is not None: + filter_ = json.loads(filter_str) + if keyword is not None: + filter_['$or'] = [ + {'description': {'$regex': keyword}}, + {'name': {'$regex': keyword}}, + {'domain': {'$regex': keyword}} + ] + + items = db_manager.list( + col_name=self.col_name, + cond=filter_, + limit=page_size, + skip=page_size * (page_num - 1), + sort_key='rank', + sort_direction=ASCENDING + ) + + sites = [] + for site in items: + # get spider count + site['spider_count'] = db_manager.count('spiders', {'site': site['_id']}) + + sites.append(site) + + return { + 'status': 'ok', + 'total_count': db_manager.count(self.col_name, filter_), + 'page_num': page_num, + 'page_size': page_size, + 'items': jsonify(sites) + } diff --git a/crawlab/routes/spiders.py b/crawlab/routes/spiders.py index 157218ee..51bae78c 100644 --- a/crawlab/routes/spiders.py +++ b/crawlab/routes/spiders.py @@ -61,6 +61,9 @@ class SpiderApi(BaseApi): # spider schedule cron enabled ('envs', str), + + # spider site + ('site', str), ) def get(self, id=None, action=None): @@ -125,6 +128,12 @@ class SpiderApi(BaseApi): if last_task is not None: spider['task_ts'] = last_task['create_ts'] + # get site + if spider.get('site') is not None: + site = db_manager.get('sites', spider['site']) + if site is not None: + spider['site_name'] = site['name'] + # file stats stats = get_file_suffix_stats(dir_path) diff --git a/crawlab/routes/tasks.py b/crawlab/routes/tasks.py index 2afb0cf9..e0cdd0e7 100644 --- a/crawlab/routes/tasks.py +++ b/crawlab/routes/tasks.py @@ -36,7 +36,6 @@ class TaskApi(BaseApi): 'code': 400, 'error': 'action "%s" invalid' % action }, 400 - # other.info(f"到这了{action},{id}") return getattr(self, action)(id) elif id is not None: @@ -78,9 +77,6 @@ class TaskApi(BaseApi): sort_key='create_ts') items = [] for task in tasks: - # celery tasks - # _task = db_manager.get('tasks_celery', id=task['_id']) - # get spider _spider = db_manager.get(col_name='spiders', id=str(task['spider_id'])) diff --git a/crawlab/tasks/scheduler.py b/crawlab/tasks/scheduler.py index 55e8fc36..c600029c 100644 --- a/crawlab/tasks/scheduler.py +++ b/crawlab/tasks/scheduler.py @@ -9,7 +9,7 @@ from db.manager import db_manager class Scheduler(object): - mongo = MongoClient(host=MONGO_HOST, port=MONGO_PORT) + mongo = MongoClient(host=MONGO_HOST, port=MONGO_PORT, connect=False) task_col = 'apscheduler_jobs' # scheduler jobstore diff --git a/frontend/package.json b/frontend/package.json index 3e8fc9a1..1e005431 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -1,6 +1,6 @@ { "name": "crawlab", - "version": "0.1.0", + "version": "0.2.0", "private": true, "scripts": { "serve": "cross-env NODE_ENV=development vue-cli-service serve --ip=0.0.0.0", diff --git a/frontend/src/components/InfoView/SpiderInfoView.vue b/frontend/src/components/InfoView/SpiderInfoView.vue index 6bc1a157..fe384f14 100644 --- a/frontend/src/components/InfoView/SpiderInfoView.vue +++ b/frontend/src/components/InfoView/SpiderInfoView.vue @@ -23,6 +23,14 @@ + + + + @@ -38,26 +46,6 @@ - - - - - - - - - - - - - - - - - - - - @@ -172,6 +160,22 @@ export default { }) } }) + }, + fetchSiteSuggestions (keyword, callback) { + this.$request.get('/sites', { + keyword: keyword, + page_num: 1, + page_size: 100 + }).then(response => { + const data = response.data.items.map(d => { + d.value = `${d.name} | ${d.domain}` + return d + }) + callback(data) + }) + }, + onSiteSelect (item) { + this.spiderForm.site = item._id } } } @@ -187,4 +191,8 @@ export default { width: 100%; text-align: right; } + + .el-autocomplete { + width: 100%; + } diff --git a/frontend/src/i18n/zh.js b/frontend/src/i18n/zh.js index c3d3dc6d..dd1d529e 100644 --- a/frontend/src/i18n/zh.js +++ b/frontend/src/i18n/zh.js @@ -10,6 +10,7 @@ export default { 'Task Detail': '任务详情', 'Schedules': '定时任务', 'Deploys': '部署', + 'Sites': '网站', // 标签 Overview: '概览', @@ -70,7 +71,7 @@ export default { // 节点状态 Online: '在线', - Offline: '在线', + Offline: '离线', Unavailable: '未知', // 爬虫 @@ -130,6 +131,15 @@ export default { 'Parameters': '参数', 'Add Schedule': '添加定时任务', + // 网站 + 'Site': '网站', + 'Rank': '排名', + 'Domain': '域名', + 'Category': '类别', + 'Select': '请选择', + 'Select Category': '请选择类别', + 'Spider Count': '爬虫数', + // 文件 'Choose Folder': '选择文件', diff --git a/frontend/src/router/index.js b/frontend/src/router/index.js index 9cf2acb0..bf96c11b 100644 --- a/frontend/src/router/index.js +++ b/frontend/src/router/index.js @@ -183,6 +183,26 @@ export const constantRouterMap = [ } ] }, + { + name: 'Site', + path: '/sites', + component: Layout, + meta: { + title: 'Site', + icon: 'fa fa-sitemap' + }, + children: [ + { + path: '', + name: 'SiteList', + component: () => import('../views/site/SiteList'), + meta: { + title: 'Sites', + icon: 'fa fa-sitemap' + } + } + ] + }, { path: '*', redirect: '/404', hidden: true } ] diff --git a/frontend/src/store/index.js b/frontend/src/store/index.js index 92653f15..33ae9f40 100644 --- a/frontend/src/store/index.js +++ b/frontend/src/store/index.js @@ -11,6 +11,7 @@ import task from './modules/task' import file from './modules/file' import schedule from './modules/schedule' import lang from './modules/lang' +import site from './modules/site' import getters from './getters' Vue.use(Vuex) @@ -27,7 +28,8 @@ const store = new Vuex.Store({ task, file, schedule, - lang + lang, + site }, getters }) diff --git a/frontend/src/store/modules/site.js b/frontend/src/store/modules/site.js new file mode 100644 index 00000000..d05b8de4 --- /dev/null +++ b/frontend/src/store/modules/site.js @@ -0,0 +1,67 @@ +import request from '../../api/request' + +const state = { + siteList: [], + + // filter + filter: { + category: undefined + }, + keyword: '', + + // pagination + pageNum: 1, + pageSize: 10, + totalCount: 0 +} + +const getters = {} + +const mutations = { + SET_KEYWORD (state, value) { + state.keyword = value + }, + SET_SITE_LIST (state, value) { + state.siteList = value + }, + SET_PAGE_NUM (state, value) { + state.pageNum = value + }, + SET_PAGE_SIZE (state, value) { + state.pageSize = value + }, + SET_TOTAL_COUNT (state, value) { + state.totalCount = value + } +} + +const actions = { + editSite ({ state, dispatch }, payload) { + const { id, category } = payload + return request.post(`/sites/${id}`, { + category + }) + }, + getSiteList ({ state, commit }) { + return request.get('/sites', { + page_num: state.pageNum, + page_size: state.pageSize, + keyword: state.keyword || undefined, + filter: { + category: state.filter.category || undefined + } + }) + .then(response => { + commit('SET_SITE_LIST', response.data.items) + commit('SET_TOTAL_COUNT', response.data.total_count) + }) + } +} + +export default { + namespaced: true, + state, + getters, + mutations, + actions +} diff --git a/frontend/src/store/modules/spider.js b/frontend/src/store/modules/spider.js index b8345082..b7e702fd 100644 --- a/frontend/src/store/modules/spider.js +++ b/frontend/src/store/modules/spider.js @@ -55,7 +55,7 @@ const mutations = { }, SET_NODE_STATS (state, value) { state.nodeStats = value - }, + } } const actions = { @@ -74,7 +74,8 @@ const actions = { lang: state.spiderForm.lang, col: state.spiderForm.col, cron: state.spiderForm.cron, - cron_enabled: state.spiderForm.cron_enabled ? 1 : 0 + cron_enabled: state.spiderForm.cron_enabled ? 1 : 0, + site: state.spiderForm.site }) .then(() => { dispatch('getSpiderList') @@ -89,7 +90,8 @@ const actions = { lang: state.spiderForm.lang, col: state.spiderForm.col, cron: state.spiderForm.cron, - cron_enabled: state.spiderForm.cron_enabled ? 1 : 0 + cron_enabled: state.spiderForm.cron_enabled ? 1 : 0, + site: state.spiderForm.site }) .then(() => { dispatch('getSpiderList') diff --git a/frontend/src/views/site/SiteList.vue b/frontend/src/views/site/SiteList.vue new file mode 100644 index 00000000..9c053dc5 --- /dev/null +++ b/frontend/src/views/site/SiteList.vue @@ -0,0 +1,205 @@ + + + + + diff --git a/frontend/src/views/spider/SpiderList.vue b/frontend/src/views/spider/SpiderList.vue index 14a9ffea..874dcc0b 100644 --- a/frontend/src/views/spider/SpiderList.vue +++ b/frontend/src/views/spider/SpiderList.vue @@ -160,8 +160,9 @@ export default { // tableData, columns: [ { name: 'name', label: 'Name', width: 'auto' }, - { name: 'type', label: 'Spider Type', width: '160', sortable: true }, - { name: 'lang', label: 'Language', width: '160', sortable: true }, + { name: 'site_name', label: 'Site', width: '120' }, + { name: 'type', label: 'Spider Type', width: '120', sortable: true }, + { name: 'lang', label: 'Language', width: '120', sortable: true }, { name: 'task_ts', label: 'Last Run', width: '160' }, { name: 'last_7d_tasks', label: 'Last 7-Day Tasks', width: '80' }, { name: 'last_5_errors', label: 'Last 5-Run Errors', width: '80' } diff --git a/spiders/chinaz/chinaz/__init__.py b/spiders/chinaz/chinaz/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/spiders/chinaz/chinaz/items.py b/spiders/chinaz/chinaz/items.py new file mode 100644 index 00000000..dbec9f33 --- /dev/null +++ b/spiders/chinaz/chinaz/items.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- + +# Define here the models for your scraped items +# +# See documentation in: +# https://doc.scrapy.org/en/latest/topics/items.html + +import scrapy + + +class ChinazItem(scrapy.Item): + # define the fields for your item here like: + _id = scrapy.Field() + task_id = scrapy.Field() + name = scrapy.Field() + domain = scrapy.Field() + description = scrapy.Field() + rank = scrapy.Field() diff --git a/spiders/chinaz/chinaz/middlewares.py b/spiders/chinaz/chinaz/middlewares.py new file mode 100644 index 00000000..c98995d5 --- /dev/null +++ b/spiders/chinaz/chinaz/middlewares.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- + +# Define here the models for your spider middleware +# +# See documentation in: +# https://doc.scrapy.org/en/latest/topics/spider-middleware.html + +from scrapy import signals + + +class ChinazSpiderMiddleware(object): + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the spider middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_spider_input(self, response, spider): + # Called for each response that goes through the spider + # middleware and into the spider. + + # Should return None or raise an exception. + return None + + def process_spider_output(self, response, result, spider): + # Called with the results returned from the Spider, after + # it has processed the response. + + # Must return an iterable of Request, dict or Item objects. + for i in result: + yield i + + def process_spider_exception(self, response, exception, spider): + # Called when a spider or process_spider_input() method + # (from other spider middleware) raises an exception. + + # Should return either None or an iterable of Response, dict + # or Item objects. + pass + + def process_start_requests(self, start_requests, spider): + # Called with the start requests of the spider, and works + # similarly to the process_spider_output() method, except + # that it doesn’t have a response associated. + + # Must return only requests (not items). + for r in start_requests: + yield r + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) + + +class ChinazDownloaderMiddleware(object): + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the downloader middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_request(self, request, spider): + # Called for each request that goes through the downloader + # middleware. + + # Must either: + # - return None: continue processing this request + # - or return a Response object + # - or return a Request object + # - or raise IgnoreRequest: process_exception() methods of + # installed downloader middleware will be called + return None + + def process_response(self, request, response, spider): + # Called with the response returned from the downloader. + + # Must either; + # - return a Response object + # - return a Request object + # - or raise IgnoreRequest + return response + + def process_exception(self, request, exception, spider): + # Called when a download handler or a process_request() + # (from other downloader middleware) raises an exception. + + # Must either: + # - return None: continue processing this exception + # - return a Response object: stops process_exception() chain + # - return a Request object: stops process_exception() chain + pass + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) diff --git a/spiders/chinaz/chinaz/pipelines.py b/spiders/chinaz/chinaz/pipelines.py new file mode 100644 index 00000000..747de355 --- /dev/null +++ b/spiders/chinaz/chinaz/pipelines.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- + +# Define your item pipelines here +# +# Don't forget to add your pipeline to the ITEM_PIPELINES setting +# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html + +import os + +from pymongo import MongoClient + +MONGO_HOST = os.environ.get('MONGO_HOST') or 'localhost' +MONGO_PORT = int(os.environ.get('MONGO_PORT') or '27017') +MONGO_DB = os.environ.get('MONGO_DB') or 'crawlab_test' + + +class MongoPipeline(object): + mongo = MongoClient(host=MONGO_HOST, port=MONGO_PORT) + db = mongo[MONGO_DB] + col_name = os.environ.get('CRAWLAB_COLLECTION') or 'sites' + col = db[col_name] + + def process_item(self, item, spider): + item['task_id'] = os.environ.get('CRAWLAB_TASK_ID') + item['_id'] = item['domain'] + if self.col.find_one({'_id': item['_id']}) is None: + self.col.save(item) + return item diff --git a/spiders/chinaz/chinaz/settings.py b/spiders/chinaz/chinaz/settings.py new file mode 100644 index 00000000..41fb31bf --- /dev/null +++ b/spiders/chinaz/chinaz/settings.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- + +# Scrapy settings for chinaz project +# +# For simplicity, this file contains only settings considered important or +# commonly used. You can find more settings consulting the documentation: +# +# https://doc.scrapy.org/en/latest/topics/settings.html +# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html +# https://doc.scrapy.org/en/latest/topics/spider-middleware.html + +BOT_NAME = 'chinaz' + +SPIDER_MODULES = ['chinaz.spiders'] +NEWSPIDER_MODULE = 'chinaz.spiders' + + +# Crawl responsibly by identifying yourself (and your website) on the user-agent +#USER_AGENT = 'chinaz (+http://www.yourdomain.com)' + +# Obey robots.txt rules +ROBOTSTXT_OBEY = True + +# Configure maximum concurrent requests performed by Scrapy (default: 16) +#CONCURRENT_REQUESTS = 32 + +# Configure a delay for requests for the same website (default: 0) +# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay +# See also autothrottle settings and docs +#DOWNLOAD_DELAY = 3 +# The download delay setting will honor only one of: +#CONCURRENT_REQUESTS_PER_DOMAIN = 16 +#CONCURRENT_REQUESTS_PER_IP = 16 + +# Disable cookies (enabled by default) +#COOKIES_ENABLED = False + +# Disable Telnet Console (enabled by default) +#TELNETCONSOLE_ENABLED = False + +# Override the default request headers: +#DEFAULT_REQUEST_HEADERS = { +# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', +# 'Accept-Language': 'en', +#} + +# Enable or disable spider middlewares +# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html +#SPIDER_MIDDLEWARES = { +# 'chinaz.middlewares.ChinazSpiderMiddleware': 543, +#} + +# Enable or disable downloader middlewares +# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html +#DOWNLOADER_MIDDLEWARES = { +# 'chinaz.middlewares.ChinazDownloaderMiddleware': 543, +#} + +# Enable or disable extensions +# See https://doc.scrapy.org/en/latest/topics/extensions.html +#EXTENSIONS = { +# 'scrapy.extensions.telnet.TelnetConsole': None, +#} + +# Configure item pipelines +# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html +ITEM_PIPELINES = { + 'chinaz.pipelines.MongoPipeline': 300, +} + +# Enable and configure the AutoThrottle extension (disabled by default) +# See https://doc.scrapy.org/en/latest/topics/autothrottle.html +#AUTOTHROTTLE_ENABLED = True +# The initial download delay +#AUTOTHROTTLE_START_DELAY = 5 +# The maximum download delay to be set in case of high latencies +#AUTOTHROTTLE_MAX_DELAY = 60 +# The average number of requests Scrapy should be sending in parallel to +# each remote server +#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 +# Enable showing throttling stats for every response received: +#AUTOTHROTTLE_DEBUG = False + +# Enable and configure HTTP caching (disabled by default) +# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings +#HTTPCACHE_ENABLED = True +#HTTPCACHE_EXPIRATION_SECS = 0 +#HTTPCACHE_DIR = 'httpcache' +#HTTPCACHE_IGNORE_HTTP_CODES = [] +#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' diff --git a/spiders/chinaz/chinaz/spiders/__init__.py b/spiders/chinaz/chinaz/spiders/__init__.py new file mode 100644 index 00000000..ebd689ac --- /dev/null +++ b/spiders/chinaz/chinaz/spiders/__init__.py @@ -0,0 +1,4 @@ +# This package will contain the spiders of your Scrapy project +# +# Please refer to the documentation for information on how to create and manage +# your spiders. diff --git a/spiders/chinaz/chinaz/spiders/chinaz_spider.py b/spiders/chinaz/chinaz/spiders/chinaz_spider.py new file mode 100644 index 00000000..2359daa9 --- /dev/null +++ b/spiders/chinaz/chinaz/spiders/chinaz_spider.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +import scrapy +from chinaz.items import ChinazItem + + +class ChinazSpiderSpider(scrapy.Spider): + name = 'chinaz_spider' + allowed_domains = ['chinaz.com'] + start_urls = ['http://top.chinaz.com/hangye/'] + + def parse(self, response): + for item in response.css('.listCentent > li'): + name = item.css('h3.rightTxtHead > a::text').extract_first() + domain = item.css('h3.rightTxtHead > span::text').extract_first() + description = item.css('p.RtCInfo::text').extract_first() + rank = item.css('.RtCRateCent > strong::text').extract_first() + rank = int(rank) + yield ChinazItem( + _id=domain, + name=name, + domain=domain, + description=description, + rank=rank, + ) + + # pagination + a_list = response.css('.ListPageWrap > a::attr("href")').extract() + url = 'http://top.chinaz.com/hangye/' + a_list[-1] + yield scrapy.Request(url=url) diff --git a/spiders/chinaz/scrapy.cfg b/spiders/chinaz/scrapy.cfg new file mode 100644 index 00000000..d3b44a1a --- /dev/null +++ b/spiders/chinaz/scrapy.cfg @@ -0,0 +1,11 @@ +# Automatically created by: scrapy startproject +# +# For more information about the [deploy] section see: +# https://scrapyd.readthedocs.io/en/latest/deploy.html + +[settings] +default = chinaz.settings + +[deploy] +#url = http://localhost:6800/ +project = chinaz diff --git a/spiders/jd/jd/__init__.py b/spiders/jd/jd/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/spiders/jd/jd/items.py b/spiders/jd/jd/items.py new file mode 100644 index 00000000..9a7ba1cb --- /dev/null +++ b/spiders/jd/jd/items.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- + +# Define here the models for your scraped items +# +# See documentation in: +# https://doc.scrapy.org/en/latest/topics/items.html + +import scrapy + + +class JdItem(scrapy.Item): + # define the fields for your item here like: + name = scrapy.Field() + price = scrapy.Field() diff --git a/spiders/jd/jd/middlewares.py b/spiders/jd/jd/middlewares.py new file mode 100644 index 00000000..6fceded5 --- /dev/null +++ b/spiders/jd/jd/middlewares.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- + +# Define here the models for your spider middleware +# +# See documentation in: +# https://doc.scrapy.org/en/latest/topics/spider-middleware.html + +from scrapy import signals + + +class JdSpiderMiddleware(object): + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the spider middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_spider_input(self, response, spider): + # Called for each response that goes through the spider + # middleware and into the spider. + + # Should return None or raise an exception. + return None + + def process_spider_output(self, response, result, spider): + # Called with the results returned from the Spider, after + # it has processed the response. + + # Must return an iterable of Request, dict or Item objects. + for i in result: + yield i + + def process_spider_exception(self, response, exception, spider): + # Called when a spider or process_spider_input() method + # (from other spider middleware) raises an exception. + + # Should return either None or an iterable of Response, dict + # or Item objects. + pass + + def process_start_requests(self, start_requests, spider): + # Called with the start requests of the spider, and works + # similarly to the process_spider_output() method, except + # that it doesn’t have a response associated. + + # Must return only requests (not items). + for r in start_requests: + yield r + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) + + +class JdDownloaderMiddleware(object): + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the downloader middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_request(self, request, spider): + # Called for each request that goes through the downloader + # middleware. + + # Must either: + # - return None: continue processing this request + # - or return a Response object + # - or return a Request object + # - or raise IgnoreRequest: process_exception() methods of + # installed downloader middleware will be called + return None + + def process_response(self, request, response, spider): + # Called with the response returned from the downloader. + + # Must either; + # - return a Response object + # - return a Request object + # - or raise IgnoreRequest + return response + + def process_exception(self, request, exception, spider): + # Called when a download handler or a process_request() + # (from other downloader middleware) raises an exception. + + # Must either: + # - return None: continue processing this exception + # - return a Response object: stops process_exception() chain + # - return a Request object: stops process_exception() chain + pass + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) diff --git a/spiders/jd/jd/pipelines.py b/spiders/jd/jd/pipelines.py new file mode 100644 index 00000000..b862b7e7 --- /dev/null +++ b/spiders/jd/jd/pipelines.py @@ -0,0 +1,17 @@ +# -*- coding: utf-8 -*- + +# Define your item pipelines here +# +# Don't forget to add your pipeline to the ITEM_PIPELINES setting +# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html +from pymongo import MongoClient + + +class JdPipeline(object): + mongo = MongoClient(host=MONGO_HOST, port=MONGO_PORT) + db = mongo[MONGO_DB] + col_name = os.environ.get('CRAWLAB_COLLECTION') or 'jd_products' + col = db[col_name] + + def process_item(self, item, spider): + return item diff --git a/spiders/jd/jd/settings.py b/spiders/jd/jd/settings.py new file mode 100644 index 00000000..d83206b2 --- /dev/null +++ b/spiders/jd/jd/settings.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- + +# Scrapy settings for jd project +# +# For simplicity, this file contains only settings considered important or +# commonly used. You can find more settings consulting the documentation: +# +# https://doc.scrapy.org/en/latest/topics/settings.html +# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html +# https://doc.scrapy.org/en/latest/topics/spider-middleware.html + +BOT_NAME = 'jd' + +SPIDER_MODULES = ['jd.spiders'] +NEWSPIDER_MODULE = 'jd.spiders' + + +# Crawl responsibly by identifying yourself (and your website) on the user-agent +#USER_AGENT = 'jd (+http://www.yourdomain.com)' + +# Obey robots.txt rules +ROBOTSTXT_OBEY = True + +# Configure maximum concurrent requests performed by Scrapy (default: 16) +#CONCURRENT_REQUESTS = 32 + +# Configure a delay for requests for the same website (default: 0) +# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay +# See also autothrottle settings and docs +#DOWNLOAD_DELAY = 3 +# The download delay setting will honor only one of: +#CONCURRENT_REQUESTS_PER_DOMAIN = 16 +#CONCURRENT_REQUESTS_PER_IP = 16 + +# Disable cookies (enabled by default) +#COOKIES_ENABLED = False + +# Disable Telnet Console (enabled by default) +#TELNETCONSOLE_ENABLED = False + +# Override the default request headers: +#DEFAULT_REQUEST_HEADERS = { +# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', +# 'Accept-Language': 'en', +#} + +# Enable or disable spider middlewares +# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html +#SPIDER_MIDDLEWARES = { +# 'jd.middlewares.JdSpiderMiddleware': 543, +#} + +# Enable or disable downloader middlewares +# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html +#DOWNLOADER_MIDDLEWARES = { +# 'jd.middlewares.JdDownloaderMiddleware': 543, +#} + +# Enable or disable extensions +# See https://doc.scrapy.org/en/latest/topics/extensions.html +#EXTENSIONS = { +# 'scrapy.extensions.telnet.TelnetConsole': None, +#} + +# Configure item pipelines +# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html +ITEM_PIPELINES = { + 'jd.pipelines.JdPipeline': 300, +} + +# Enable and configure the AutoThrottle extension (disabled by default) +# See https://doc.scrapy.org/en/latest/topics/autothrottle.html +#AUTOTHROTTLE_ENABLED = True +# The initial download delay +#AUTOTHROTTLE_START_DELAY = 5 +# The maximum download delay to be set in case of high latencies +#AUTOTHROTTLE_MAX_DELAY = 60 +# The average number of requests Scrapy should be sending in parallel to +# each remote server +#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 +# Enable showing throttling stats for every response received: +#AUTOTHROTTLE_DEBUG = False + +# Enable and configure HTTP caching (disabled by default) +# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings +#HTTPCACHE_ENABLED = True +#HTTPCACHE_EXPIRATION_SECS = 0 +#HTTPCACHE_DIR = 'httpcache' +#HTTPCACHE_IGNORE_HTTP_CODES = [] +#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' diff --git a/spiders/jd/jd/spiders/__init__.py b/spiders/jd/jd/spiders/__init__.py new file mode 100644 index 00000000..ebd689ac --- /dev/null +++ b/spiders/jd/jd/spiders/__init__.py @@ -0,0 +1,4 @@ +# This package will contain the spiders of your Scrapy project +# +# Please refer to the documentation for information on how to create and manage +# your spiders. diff --git a/spiders/jd/jd/spiders/jd_spider.py b/spiders/jd/jd/spiders/jd_spider.py new file mode 100644 index 00000000..01113a7e --- /dev/null +++ b/spiders/jd/jd/spiders/jd_spider.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +import scrapy + + +class JdSpiderSpider(scrapy.Spider): + name = 'jd_spider' + allowed_domains = ['jd.com'] + start_urls = ['http://jd.com/'] + + def parse(self, response): + pass diff --git a/spiders/jd/scrapy.cfg b/spiders/jd/scrapy.cfg new file mode 100644 index 00000000..87cf0280 --- /dev/null +++ b/spiders/jd/scrapy.cfg @@ -0,0 +1,11 @@ +# Automatically created by: scrapy startproject +# +# For more information about the [deploy] section see: +# https://scrapyd.readthedocs.io/en/latest/deploy.html + +[settings] +default = jd.settings + +[deploy] +#url = http://localhost:6800/ +project = jd