From 68e0b98d8a91af2bc48bc6022ed6823ec7352d27 Mon Sep 17 00:00:00 2001 From: Marvin Zhang Date: Wed, 6 Mar 2019 10:46:41 +0800 Subject: [PATCH] updated README.md --- README.md | 14 ++- crawlab/constants/manage.py | 1 + crawlab/manage.py | 13 +++ .../baidu/baidu}/__init__.py | 0 spiders/baidu/baidu/items.py | 14 +++ spiders/baidu/baidu/middlewares.py | 103 ++++++++++++++++++ spiders/baidu/baidu/pipelines.py | 11 ++ spiders/baidu/baidu/settings.py | 91 ++++++++++++++++ spiders/baidu/baidu/spiders/__init__.py | 4 + spiders/baidu/baidu/spiders/baidu_spider.py | 13 +++ spiders/baidu/scrapy.cfg | 11 ++ spiders/taobao/dump.rdb | Bin 0 -> 760 bytes {db => spiders/taobao/taobao}/__init__.py | 0 13 files changed, 271 insertions(+), 4 deletions(-) rename {constants => spiders/baidu/baidu}/__init__.py (100%) create mode 100644 spiders/baidu/baidu/items.py create mode 100644 spiders/baidu/baidu/middlewares.py create mode 100644 spiders/baidu/baidu/pipelines.py create mode 100644 spiders/baidu/baidu/settings.py create mode 100644 spiders/baidu/baidu/spiders/__init__.py create mode 100644 spiders/baidu/baidu/spiders/baidu_spider.py create mode 100644 spiders/baidu/scrapy.cfg create mode 100644 spiders/taobao/dump.rdb rename {db => spiders/taobao/taobao}/__init__.py (100%) diff --git a/README.md b/README.md index 20da8ba3..6c045ef5 100644 --- a/README.md +++ b/README.md @@ -32,11 +32,17 @@ python ./bin/run_worker.py # TODO: frontend ``` -## Nodes +## Architecture + +The architecture of Crawlab is as below. It's very similar to Celery architecture, but a few more modules including Frontend, Spiders and Flower are added to feature the crawling management functionality. + +![crawlab-architecture](./docs/img/crawlab-architecture.png) + +### Nodes Nodes are actually the workers defined in Celery. A node is running and connected to a task queue, redis for example, to receive and run tasks. As spiders need to be deployed to the nodes, users should specify their ip addresses and ports before the deployment. -## Spiders +### Spiders #### Auto Discovery In `config.py` file, edit `PROJECT_SOURCE_FILE_FOLDER` as the directory where the spiders projects are located. The web app will discover spider projects automatically. @@ -49,6 +55,6 @@ All spiders need to be deployed to a specific node before crawling. Simply click After deploying the spider, you can click "Run" button on spider detail page and select a specific node to start crawling. It will triggers a task for the crawling, where you can see in detail in tasks page. -## Tasks +### Tasks -Tasks are triggered and run by the workers. Users can check the task status info and logs in the task detail page. \ No newline at end of file +Tasks are triggered and run by the workers. Users can check the task status info and logs in the task detail page. diff --git a/crawlab/constants/manage.py b/crawlab/constants/manage.py index f1e60691..f5447bf2 100644 --- a/crawlab/constants/manage.py +++ b/crawlab/constants/manage.py @@ -1,4 +1,5 @@ class ActionType: APP = 'app' FLOWER = 'flower' + WORKER = 'worker' RUN_ALL = 'run_all' diff --git a/crawlab/manage.py b/crawlab/manage.py index 65272099..2b6223f6 100644 --- a/crawlab/manage.py +++ b/crawlab/manage.py @@ -1,5 +1,6 @@ import os import subprocess +import sys from multiprocessing import Process import click @@ -7,6 +8,10 @@ from flask import Flask from flask_cors import CORS from flask_restful import Api +file_dir = os.path.dirname(os.path.realpath(__file__)) +root_path = os.path.abspath(os.path.join(file_dir, '.')) +sys.path.append(root_path) + from config import FLASK_HOST, FLASK_PORT, PROJECT_LOGS_FOLDER, BROKER_URL from constants.manage import ActionType from routes.deploys import DeployApi @@ -15,6 +20,7 @@ from routes.nodes import NodeApi from routes.spiders import SpiderApi, SpiderImportApi, SpiderManageApi from routes.stats import StatsApi from routes.tasks import TaskApi +from tasks.celery import celery_app # flask app instance app = Flask(__name__) @@ -72,6 +78,13 @@ def run_flower(): print(line.decode('utf-8')) +def run_worker(): + if sys.platform == 'windows': + celery_app.start(argv=['tasks', 'worker', '-P', 'eventlet', '-E', '-l', 'INFO']) + else: + celery_app.start(argv=['tasks', 'worker', '-E', '-l', 'INFO']) + + @click.command() @click.argument('action', type=click.Choice([ActionType.APP, ActionType.FLOWER, ActionType.RUN_ALL])) def main(action): diff --git a/constants/__init__.py b/spiders/baidu/baidu/__init__.py similarity index 100% rename from constants/__init__.py rename to spiders/baidu/baidu/__init__.py diff --git a/spiders/baidu/baidu/items.py b/spiders/baidu/baidu/items.py new file mode 100644 index 00000000..26b5888c --- /dev/null +++ b/spiders/baidu/baidu/items.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- + +# Define here the models for your scraped items +# +# See documentation in: +# https://doc.scrapy.org/en/latest/topics/items.html + +import scrapy + + +class BaiduItem(scrapy.Item): + # define the fields for your item here like: + title = scrapy.Field() + url = scrapy.Field() diff --git a/spiders/baidu/baidu/middlewares.py b/spiders/baidu/baidu/middlewares.py new file mode 100644 index 00000000..3911485d --- /dev/null +++ b/spiders/baidu/baidu/middlewares.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- + +# Define here the models for your spider middleware +# +# See documentation in: +# https://doc.scrapy.org/en/latest/topics/spider-middleware.html + +from scrapy import signals + + +class BaiduSpiderMiddleware(object): + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the spider middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_spider_input(self, response, spider): + # Called for each response that goes through the spider + # middleware and into the spider. + + # Should return None or raise an exception. + return None + + def process_spider_output(self, response, result, spider): + # Called with the results returned from the Spider, after + # it has processed the response. + + # Must return an iterable of Request, dict or Item objects. + for i in result: + yield i + + def process_spider_exception(self, response, exception, spider): + # Called when a spider or process_spider_input() method + # (from other spider middleware) raises an exception. + + # Should return either None or an iterable of Response, dict + # or Item objects. + pass + + def process_start_requests(self, start_requests, spider): + # Called with the start requests of the spider, and works + # similarly to the process_spider_output() method, except + # that it doesn’t have a response associated. + + # Must return only requests (not items). + for r in start_requests: + yield r + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) + + +class BaiduDownloaderMiddleware(object): + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the downloader middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_request(self, request, spider): + # Called for each request that goes through the downloader + # middleware. + + # Must either: + # - return None: continue processing this request + # - or return a Response object + # - or return a Request object + # - or raise IgnoreRequest: process_exception() methods of + # installed downloader middleware will be called + return None + + def process_response(self, request, response, spider): + # Called with the response returned from the downloader. + + # Must either; + # - return a Response object + # - return a Request object + # - or raise IgnoreRequest + return response + + def process_exception(self, request, exception, spider): + # Called when a download handler or a process_request() + # (from other downloader middleware) raises an exception. + + # Must either: + # - return None: continue processing this exception + # - return a Response object: stops process_exception() chain + # - return a Request object: stops process_exception() chain + pass + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) diff --git a/spiders/baidu/baidu/pipelines.py b/spiders/baidu/baidu/pipelines.py new file mode 100644 index 00000000..beae9c24 --- /dev/null +++ b/spiders/baidu/baidu/pipelines.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- + +# Define your item pipelines here +# +# Don't forget to add your pipeline to the ITEM_PIPELINES setting +# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html + + +class BaiduPipeline(object): + def process_item(self, item, spider): + return item diff --git a/spiders/baidu/baidu/settings.py b/spiders/baidu/baidu/settings.py new file mode 100644 index 00000000..34218961 --- /dev/null +++ b/spiders/baidu/baidu/settings.py @@ -0,0 +1,91 @@ +# -*- coding: utf-8 -*- + +# Scrapy settings for baidu project +# +# For simplicity, this file contains only settings considered important or +# commonly used. You can find more settings consulting the documentation: +# +# https://doc.scrapy.org/en/latest/topics/settings.html +# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html +# https://doc.scrapy.org/en/latest/topics/spider-middleware.html + +BOT_NAME = 'baidu' + +SPIDER_MODULES = ['baidu.spiders'] +NEWSPIDER_MODULE = 'baidu.spiders' + + +# Crawl responsibly by identifying yourself (and your website) on the user-agent +#USER_AGENT = 'baidu (+http://www.yourdomain.com)' + +# Obey robots.txt rules +# ROBOTSTXT_OBEY = True +ROBOTSTXT_OBEY = False + +# Configure maximum concurrent requests performed by Scrapy (default: 16) +#CONCURRENT_REQUESTS = 32 + +# Configure a delay for requests for the same website (default: 0) +# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay +# See also autothrottle settings and docs +#DOWNLOAD_DELAY = 3 +# The download delay setting will honor only one of: +#CONCURRENT_REQUESTS_PER_DOMAIN = 16 +#CONCURRENT_REQUESTS_PER_IP = 16 + +# Disable cookies (enabled by default) +#COOKIES_ENABLED = False + +# Disable Telnet Console (enabled by default) +#TELNETCONSOLE_ENABLED = False + +# Override the default request headers: +#DEFAULT_REQUEST_HEADERS = { +# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', +# 'Accept-Language': 'en', +#} + +# Enable or disable spider middlewares +# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html +#SPIDER_MIDDLEWARES = { +# 'baidu.middlewares.BaiduSpiderMiddleware': 543, +#} + +# Enable or disable downloader middlewares +# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html +#DOWNLOADER_MIDDLEWARES = { +# 'baidu.middlewares.BaiduDownloaderMiddleware': 543, +#} + +# Enable or disable extensions +# See https://doc.scrapy.org/en/latest/topics/extensions.html +#EXTENSIONS = { +# 'scrapy.extensions.telnet.TelnetConsole': None, +#} + +# Configure item pipelines +# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html +#ITEM_PIPELINES = { +# 'baidu.pipelines.BaiduPipeline': 300, +#} + +# Enable and configure the AutoThrottle extension (disabled by default) +# See https://doc.scrapy.org/en/latest/topics/autothrottle.html +#AUTOTHROTTLE_ENABLED = True +# The initial download delay +#AUTOTHROTTLE_START_DELAY = 5 +# The maximum download delay to be set in case of high latencies +#AUTOTHROTTLE_MAX_DELAY = 60 +# The average number of requests Scrapy should be sending in parallel to +# each remote server +#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 +# Enable showing throttling stats for every response received: +#AUTOTHROTTLE_DEBUG = False + +# Enable and configure HTTP caching (disabled by default) +# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings +#HTTPCACHE_ENABLED = True +#HTTPCACHE_EXPIRATION_SECS = 0 +#HTTPCACHE_DIR = 'httpcache' +#HTTPCACHE_IGNORE_HTTP_CODES = [] +#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' diff --git a/spiders/baidu/baidu/spiders/__init__.py b/spiders/baidu/baidu/spiders/__init__.py new file mode 100644 index 00000000..ebd689ac --- /dev/null +++ b/spiders/baidu/baidu/spiders/__init__.py @@ -0,0 +1,4 @@ +# This package will contain the spiders of your Scrapy project +# +# Please refer to the documentation for information on how to create and manage +# your spiders. diff --git a/spiders/baidu/baidu/spiders/baidu_spider.py b/spiders/baidu/baidu/spiders/baidu_spider.py new file mode 100644 index 00000000..f84ffc8d --- /dev/null +++ b/spiders/baidu/baidu/spiders/baidu_spider.py @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- +from time import sleep + +import scrapy + + +class BaiduSpiderSpider(scrapy.Spider): + name = 'baidu_spider' + allowed_domains = ['baidu.com'] + start_urls = ['http://baidu.com/s?wd=百度'] + + def parse(self, response): + sleep(30) diff --git a/spiders/baidu/scrapy.cfg b/spiders/baidu/scrapy.cfg new file mode 100644 index 00000000..492b18d1 --- /dev/null +++ b/spiders/baidu/scrapy.cfg @@ -0,0 +1,11 @@ +# Automatically created by: scrapy startproject +# +# For more information about the [deploy] section see: +# https://scrapyd.readthedocs.io/en/latest/deploy.html + +[settings] +default = baidu.settings + +[deploy] +#url = http://localhost:6800/ +project = baidu diff --git a/spiders/taobao/dump.rdb b/spiders/taobao/dump.rdb new file mode 100644 index 0000000000000000000000000000000000000000..48df713409ebce5b6617a84280df170eb7137ad1 GIT binary patch literal 760 zcmZvZv2K+y3`TD|bOMQm@&K^9iWA3)cNo|ZJV0f~4qTO9B%l@SycGkF!N3d;fe?_Y z1JW;+#KU)VJ}1wgJbwBjrNqB?zdk>EmG?)x-R|zyLB~b-`Q_F7`0&U$v{kC0%@}AJ zf|)uLvKX~sVzlG?=NE4-?dr_-2j3rFo{e}NZ$BR0yS-0NPfl;A;|Ko@#`T^2@Lwvf z&nzq97!y>h0@c#iBm}K^>5_=k>>Ff`Dz2e0Q(Z-C3$UOGp0ac*9u{qbur(jGErDnS zB39^31&}3Ul93$i2B}4(4I&_~2xO#ST{_^vs_Y@T&<5$sjM&o(_P&aRFN+CSqM13G z7u{OKEsJr5V!Dv%1kzN1bDCUj_Efi)+&KrWI_YKuY9m3XM&QN`r)t3*n_`lwb(@Kw=@%br`G1)UAb!uiB}D-yCFGK!h>~siPL*o@HanhE&t65L0|TTgqVU19;eM zbE0S5T1-+Jf(ZmM*1u2(n>7K3_ZtYCq>ZHq4dKF2eJrFb-&&7hT4br67(@U3{PF4Q Jw=eI${{;yZ)Xe|@ literal 0 HcmV?d00001 diff --git a/db/__init__.py b/spiders/taobao/taobao/__init__.py similarity index 100% rename from db/__init__.py rename to spiders/taobao/taobao/__init__.py