diff --git a/crawlab/db/manager.py b/crawlab/db/manager.py index 902992cf..d210b81c 100644 --- a/crawlab/db/manager.py +++ b/crawlab/db/manager.py @@ -147,7 +147,7 @@ class DbManager(object): def get_last_deploy(self, spider_id): """ - @deprecated + Get latest deploy for a given spider_id """ col = self.db['deploys'] for item in col.find({'spider_id': ObjectId(spider_id)}) \ @@ -155,6 +155,16 @@ class DbManager(object): return item return None + def get_last_task(self, spider_id): + """ + Get latest deploy for a given spider_id + """ + col = self.db['tasks'] + for item in col.find({'spider_id': ObjectId(spider_id)}) \ + .sort('create_ts', DESCENDING): + return item + return None + def aggregate(self, col_name: str, pipelines, **kwargs): """ Perform MongoDB col.aggregate action to aggregate stats given collection name and pipelines. diff --git a/crawlab/routes/base.py b/crawlab/routes/base.py index 689b8f6a..8a3d6709 100644 --- a/crawlab/routes/base.py +++ b/crawlab/routes/base.py @@ -38,8 +38,8 @@ class BaseApi(Resource): :param action: :return: """ - import pdb - pdb.set_trace() + # import pdb + # pdb.set_trace() args = self.parser.parse_args() # action by id @@ -85,13 +85,13 @@ class BaseApi(Resource): # TODO: getting status for node - return jsonify({ + return { 'status': 'ok', 'total_count': total_count, 'page_num': page, 'page_size': page_size, - 'items': items - }) + 'items': jsonify(items) + } # get item by id else: @@ -108,6 +108,9 @@ class BaseApi(Resource): if k not in DEFAULT_ARGS: item[k] = args.get(k) item = db_manager.save(col_name=self.col_name, item=item) + + self.after_update(item._id) + return item def update(self, id: str = None) -> (dict, tuple): diff --git a/crawlab/routes/schedules.py b/crawlab/routes/schedules.py index 1eceabde..f966e2cb 100644 --- a/crawlab/routes/schedules.py +++ b/crawlab/routes/schedules.py @@ -13,6 +13,8 @@ class ScheduleApi(BaseApi): col_name = 'schedules' arguments = ( + ('name', str), + ('description', str), ('cron', str), ('spider_id', str) ) diff --git a/crawlab/routes/spiders.py b/crawlab/routes/spiders.py index b4b3aab1..f36903e3 100644 --- a/crawlab/routes/spiders.py +++ b/crawlab/routes/spiders.py @@ -81,7 +81,14 @@ class SpiderApi(BaseApi): # get one node elif id is not None: - return jsonify(db_manager.get('spiders', id=id)) + spider = db_manager.get('spiders', id=id) + + # get deploy + last_deploy = db_manager.get_last_deploy(spider_id=spider['_id']) + if last_deploy is not None: + spider['deploy_ts'] = last_deploy['finish_ts'] + + return jsonify(spider) # get a list of items else: @@ -108,8 +115,23 @@ class SpiderApi(BaseApi): # existing spider else: + # get last deploy + last_deploy = db_manager.get_last_deploy(spider_id=spider['_id']) + if last_deploy is not None: + spider['deploy_ts'] = last_deploy['finish_ts'] + + # get last task + last_task = db_manager.get_last_task(spider_id=spider['_id']) + if last_task is not None: + spider['task_ts'] = last_task['create_ts'] + + # file stats stats = get_file_suffix_stats(dir_path) + + # language lang = get_lang_by_stats(stats) + + # update spider data db_manager.update_one('spiders', id=str(spider['_id']), values={ 'lang': lang, 'suffix_stats': stats, diff --git a/crawlab/routes/tasks.py b/crawlab/routes/tasks.py index d864f859..59e8469b 100644 --- a/crawlab/routes/tasks.py +++ b/crawlab/routes/tasks.py @@ -1,8 +1,9 @@ import json +from datetime import datetime import requests from bson import ObjectId -from celery.worker.control import revoke +from tasks.celery import celery_app from constants.task import TaskStatus from db.manager import db_manager @@ -42,6 +43,8 @@ class TaskApi(BaseApi): task = db_manager.get(col_name=self.col_name, id=id) spider = db_manager.get(col_name='spiders', id=str(task['spider_id'])) task['spider_name'] = spider['name'] + if task.get('finish_ts') is not None: + task['duration'] = (task['finish_ts'] - task['create_ts']).total_seconds() try: with open(task['log_file_path']) as f: task['log'] = f.read() @@ -61,7 +64,8 @@ class TaskApi(BaseApi): _spider = db_manager.get(col_name='spiders', id=str(task['spider_id'])) if task.get('status') is None: task['status'] = TaskStatus.UNAVAILABLE - task['spider_name'] = _spider['name'] + if _spider: + task['spider_name'] = _spider['name'] items.append(task) return { 'status': 'ok', @@ -146,11 +150,13 @@ class TaskApi(BaseApi): def stop(self, id): """ Stop the task in progress. - TODO: work in progress :param id: :return: """ - revoke(id, terminate=True) + celery_app.control.revoke(id, terminate=True) + db_manager.update_one('tasks', id=id, values={ + 'status': TaskStatus.REVOKED + }) return { 'id': id, 'status': 'ok', diff --git a/crawlab/tasks/scheduler.py b/crawlab/tasks/scheduler.py index d4249bf7..da6303c9 100644 --- a/crawlab/tasks/scheduler.py +++ b/crawlab/tasks/scheduler.py @@ -2,6 +2,7 @@ import requests from apscheduler.schedulers.background import BackgroundScheduler from apscheduler.jobstores.mongodb import MongoDBJobStore from pymongo import MongoClient +from flask import current_app from config import MONGO_DB, MONGO_HOST, MONGO_PORT, FLASK_HOST, FLASK_PORT from constants.spider import CronEnabled @@ -29,8 +30,11 @@ class Scheduler(object): def restart(self): self.scheduler.shutdown() self.scheduler.start() + current_app.logger.info('restarted') def update(self): + current_app.logger.info('updating...') + # remove all existing periodic jobs self.scheduler.remove_all_jobs() @@ -50,6 +54,8 @@ class Scheduler(object): day_of_week=day_of_week, month=month, day=day, hour=hour, minute=minute, second=second) + current_app.logger.info('updated') + def run(self): self.update() self.scheduler.start() diff --git a/crawlab/utils/node.py b/crawlab/utils/node.py index 07a45c01..3a0b7b92 100644 --- a/crawlab/utils/node.py +++ b/crawlab/utils/node.py @@ -33,9 +33,10 @@ def update_nodes_status(refresh=False): # new node if node is None: - node = {'_id': node_name, 'name': node_name, 'status': node_status} + node = {'_id': node_name, 'name': node_name, 'status': node_status, 'ip': 'localhost', 'port': '8000'} db_manager.save('nodes', node) + # existing node else: node['status'] = node_status db_manager.save('nodes', node) diff --git a/frontend/src/components/InfoView/SpiderInfoView.vue b/frontend/src/components/InfoView/SpiderInfoView.vue index 852b9c1f..a02088ad 100644 --- a/frontend/src/components/InfoView/SpiderInfoView.vue +++ b/frontend/src/components/InfoView/SpiderInfoView.vue @@ -61,7 +61,7 @@ - {{$t('Run')}} + {{$t('Run')}} {{$t('Deploy')}} {{$t('Save')}} @@ -109,7 +109,16 @@ export default { computed: { ...mapState('spider', [ 'spiderForm' - ]) + ]), + isShowRun () { + if (!this.spiderForm.deploy_ts) { + return false + } + if (!this.spiderForm.cmd) { + return false + } + return true + } }, methods: { onRun () { @@ -131,6 +140,11 @@ export default { }, onDeploy () { const row = this.spiderForm + + // save spider + this.$store.dispatch('spider/editSpider', row._id) + + // validate fields this.$refs['spiderForm'].validate(res => { if (res) { this.$confirm(this.$t('Are you sure to deploy this spider?'), this.$t('Notification'), { diff --git a/frontend/src/components/InfoView/TaskInfoView.vue b/frontend/src/components/InfoView/TaskInfoView.vue index 98ba7c8f..8b6fdd16 100644 --- a/frontend/src/components/InfoView/TaskInfoView.vue +++ b/frontend/src/components/InfoView/TaskInfoView.vue @@ -32,13 +32,13 @@ - {{taskForm.result}} + {{taskForm.log}} - Stop + {{$t('Stop')}} diff --git a/frontend/src/i18n/zh.js b/frontend/src/i18n/zh.js index f1f0cf6f..7e127114 100644 --- a/frontend/src/i18n/zh.js +++ b/frontend/src/i18n/zh.js @@ -8,6 +8,7 @@ export default { 'Task': '任务', 'Tasks': '任务', 'Task Detail': '任务详情', + 'Schedules': '定时任务', 'Deploys': '部署', // 标签 @@ -31,6 +32,7 @@ export default { SUCCESS: '成功', FAILURE: '错误', UNAVAILABLE: '未知', + REVOKED: '已取消', // 操作 Run: '运行', @@ -46,6 +48,7 @@ export default { Edit: '编辑', Remove: '删除', Confirm: '确认', + Stop: '停止', // 主页 'Total Tasks': '总任务数', diff --git a/frontend/src/router/index.js b/frontend/src/router/index.js index 46b1e741..9cf2acb0 100644 --- a/frontend/src/router/index.js +++ b/frontend/src/router/index.js @@ -140,7 +140,7 @@ export const constantRouterMap = [ title: 'Schedules', icon: 'fa fa-calendar' }, - hidden: true, + hidden: false, children: [ { path: '', diff --git a/frontend/src/store/index.js b/frontend/src/store/index.js index 25c179ba..92653f15 100644 --- a/frontend/src/store/index.js +++ b/frontend/src/store/index.js @@ -9,6 +9,7 @@ import spider from './modules/spider' import deploy from './modules/deploy' import task from './modules/task' import file from './modules/file' +import schedule from './modules/schedule' import lang from './modules/lang' import getters from './getters' @@ -25,6 +26,7 @@ const store = new Vuex.Store({ deploy, task, file, + schedule, lang }, getters diff --git a/frontend/src/store/modules/schedule.js b/frontend/src/store/modules/schedule.js new file mode 100644 index 00000000..3c322133 --- /dev/null +++ b/frontend/src/store/modules/schedule.js @@ -0,0 +1,43 @@ +import request from '../../api/request' + +const state = { + scheduleList: [], + scheduleForm: {} +} + +const getters = {} + +const mutations = { + SET_SCHEDULE_LIST (state, value) { + state.scheduleList = value + }, + SET_SCHEDULE_FORM (state, value) { + state.scheduleForm = value + } +} + +const actions = { + getScheduleList ({ state, commit }) { + request.get('/schedules') + .then(response => { + commit('SET_SCHEDULE_LIST', response.data.items) + }) + }, + addSchedule ({ state }) { + request.put('/schedules', state.scheduleForm) + }, + editSchedule ({ state }, id) { + request.post(`/schedules/${id}`, state.scheduleForm) + }, + removeSchedule ({ state }, id) { + request.delete(`/schedules/${id}`) + } +} + +export default { + namespaced: true, + state, + getters, + mutations, + actions +} diff --git a/frontend/src/store/modules/spider.js b/frontend/src/store/modules/spider.js index dba4876e..1e18ecde 100644 --- a/frontend/src/store/modules/spider.js +++ b/frontend/src/store/modules/spider.js @@ -95,6 +95,10 @@ const actions = { .then(response => { console.log(response.data) }) + .then(response => { + dispatch('getSpiderData', id) + dispatch('getSpiderList') + }) }, crawlSpider ({ state, dispatch }, id) { return request.post(`/spiders/${id}/on_crawl`) diff --git a/frontend/src/views/node/NodeList.vue b/frontend/src/views/node/NodeList.vue index d0cb11d4..35dc7d18 100644 --- a/frontend/src/views/node/NodeList.vue +++ b/frontend/src/views/node/NodeList.vue @@ -50,9 +50,9 @@ - - - + + + diff --git a/frontend/src/views/schedule/ScheduleList.vue b/frontend/src/views/schedule/ScheduleList.vue index d1b8a5bb..3b1e3307 100644 --- a/frontend/src/views/schedule/ScheduleList.vue +++ b/frontend/src/views/schedule/ScheduleList.vue @@ -1,15 +1,200 @@ - Schedule List + + + + + + + + + + + + + + + + {{$t('Cron')}} + + + + + + + + + + + + {{$t('Cancel')}} + {{$t('Submit')}} + + + + + + + + {{$t('Add Schedule')}} + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/frontend/src/views/spider/SpiderList.vue b/frontend/src/views/spider/SpiderList.vue index 4ae640ea..60785efe 100644 --- a/frontend/src/views/spider/SpiderList.vue +++ b/frontend/src/views/spider/SpiderList.vue @@ -93,21 +93,21 @@ :width="col.width"> - + - - - + + + - + @@ -151,7 +151,7 @@ export default { { name: 'name', label: 'Name', width: 'auto' }, { name: 'type', label: 'Spider Type', width: '160', sortable: true }, { name: 'lang', label: 'Language', width: '160', sortable: true }, - { name: 'last_run_ts', label: 'Last Run', width: '120' } + { name: 'task_ts', label: 'Last Run', width: '160' } ], spiderFormRules: { name: [{ required: true, message: 'Required Field', trigger: 'change' }] @@ -301,6 +301,15 @@ export default { this.$message.success(this.$t('Deployed all spiders successfully')) }) }) + }, + isShowRun (row) { + if (!row.deploy_ts) { + return false + } + if (!row.cmd) { + return false + } + return true } }, created () { diff --git a/frontend/src/views/task/TaskList.vue b/frontend/src/views/task/TaskList.vue index bf42dfce..87fa6804 100644 --- a/frontend/src/views/task/TaskList.vue +++ b/frontend/src/views/task/TaskList.vue @@ -71,6 +71,9 @@ + + + diff --git a/spiders/csdn/csdn_spider.js b/spiders/csdn/csdn_spider.js index edda3b00..0f65c0ad 100644 --- a/spiders/csdn/csdn_spider.js +++ b/spiders/csdn/csdn_spider.js @@ -1,6 +1,10 @@ const puppeteer = require('puppeteer'); const MongoClient = require('mongodb').MongoClient; +const MONGO_HOST = process.env.MONGO_HOST; +const MONGO_PORT = process.env.MONGO_PORT; +const MONGO_DB = process.env.MONGO_DB; + (async () => { // browser const browser = await (puppeteer.launch({ @@ -53,8 +57,8 @@ const MongoClient = require('mongodb').MongoClient; }); // open database connection - const client = await MongoClient.connect('mongodb://127.0.0.1:27017'); - let db = await client.db('crawlab_test'); + const client = await MongoClient.connect(`mongodb://${MONGO_HOST}:${MONGO_PORT}`); + let db = await client.db(MONGO_DB); const colName = process.env.CRAWLAB_COLLECTION || 'results_juejin'; const taskId = process.env.CRAWLAB_TASK_ID; const col = db.collection(colName); diff --git a/spiders/example_juejin/juejin/pipelines.py b/spiders/example_juejin/juejin/pipelines.py index 4a497f54..1c4ffdc1 100644 --- a/spiders/example_juejin/juejin/pipelines.py +++ b/spiders/example_juejin/juejin/pipelines.py @@ -9,9 +9,8 @@ import os from pymongo import MongoClient MONGO_HOST = os.environ['MONGO_HOST'] -MONGO_PORT = os.environ['MONGO_PORT'] +MONGO_PORT = int(os.environ['MONGO_PORT']) MONGO_DB = os.environ['MONGO_DB'] -print(MONGO_HOST) class JuejinPipeline(object): diff --git a/spiders/realestate/realestate/__init__.py b/spiders/realestate/realestate/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/spiders/realestate/realestate/items.py b/spiders/realestate/realestate/items.py new file mode 100644 index 00000000..189b92ed --- /dev/null +++ b/spiders/realestate/realestate/items.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- + +# Define here the models for your scraped items +# +# See documentation in: +# https://doc.scrapy.org/en/latest/topics/items.html + +import scrapy + + +class RealEstateItem(scrapy.Item): + # _id + _id = scrapy.Field() + + # task_id + task_id = scrapy.Field() + + # 房产名 + name = scrapy.Field() + + # url + url = scrapy.Field() + + # 类别 + type = scrapy.Field() + + # 价格(万) + price = scrapy.Field() + + # 大小 + size = scrapy.Field() + + # 小区 + region = scrapy.Field() + + # 城市 + city = scrapy.Field() diff --git a/spiders/realestate/realestate/middlewares.py b/spiders/realestate/realestate/middlewares.py new file mode 100644 index 00000000..ed845f57 --- /dev/null +++ b/spiders/realestate/realestate/middlewares.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- + +# Define here the models for your spider middleware +# +# See documentation in: +# https://doc.scrapy.org/en/latest/topics/spider-middleware.html + +from scrapy import signals + + +class RealestateSpiderMiddleware(object): + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the spider middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_spider_input(self, response, spider): + # Called for each response that goes through the spider + # middleware and into the spider. + + # Should return None or raise an exception. + return None + + def process_spider_output(self, response, result, spider): + # Called with the results returned from the Spider, after + # it has processed the response. + + # Must return an iterable of Request, dict or Item objects. + for i in result: + yield i + + def process_spider_exception(self, response, exception, spider): + # Called when a spider or process_spider_input() method + # (from other spider middleware) raises an exception. + + # Should return either None or an iterable of Response, dict + # or Item objects. + pass + + def process_start_requests(self, start_requests, spider): + # Called with the start requests of the spider, and works + # similarly to the process_spider_output() method, except + # that it doesn’t have a response associated. + + # Must return only requests (not items). + for r in start_requests: + yield r + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) + + +class RealestateDownloaderMiddleware(object): + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the downloader middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_request(self, request, spider): + # Called for each request that goes through the downloader + # middleware. + + # Must either: + # - return None: continue processing this request + # - or return a Response object + # - or return a Request object + # - or raise IgnoreRequest: process_exception() methods of + # installed downloader middleware will be called + return None + + def process_response(self, request, response, spider): + # Called with the response returned from the downloader. + + # Must either; + # - return a Response object + # - return a Request object + # - or raise IgnoreRequest + return response + + def process_exception(self, request, exception, spider): + # Called when a download handler or a process_request() + # (from other downloader middleware) raises an exception. + + # Must either: + # - return None: continue processing this exception + # - return a Response object: stops process_exception() chain + # - return a Request object: stops process_exception() chain + pass + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) diff --git a/spiders/realestate/realestate/pipelines.py b/spiders/realestate/realestate/pipelines.py new file mode 100644 index 00000000..a73934b3 --- /dev/null +++ b/spiders/realestate/realestate/pipelines.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- + +# Define your item pipelines here +# +# Don't forget to add your pipeline to the ITEM_PIPELINES setting +# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html +import os + +from pymongo import MongoClient + +MONGO_HOST = os.environ['MONGO_HOST'] +MONGO_PORT = int(os.environ['MONGO_PORT']) +MONGO_DB = os.environ['MONGO_DB'] + + +class MongoPipeline(object): + mongo = MongoClient(host=MONGO_HOST, port=MONGO_PORT) + db = mongo[MONGO_DB] + col_name = os.environ.get('CRAWLAB_COLLECTION') + col = db[col_name] + + def process_item(self, item, spider): + item['task_id'] = os.environ.get('CRAWLAB_TASK_ID') + self.col.save(item) + return item diff --git a/spiders/realestate/realestate/settings.py b/spiders/realestate/realestate/settings.py new file mode 100644 index 00000000..da1ada29 --- /dev/null +++ b/spiders/realestate/realestate/settings.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- + +# Scrapy settings for realestate project +# +# For simplicity, this file contains only settings considered important or +# commonly used. You can find more settings consulting the documentation: +# +# https://doc.scrapy.org/en/latest/topics/settings.html +# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html +# https://doc.scrapy.org/en/latest/topics/spider-middleware.html + +BOT_NAME = 'realestate' + +SPIDER_MODULES = ['realestate.spiders'] +NEWSPIDER_MODULE = 'realestate.spiders' + +# Crawl responsibly by identifying yourself (and your website) on the user-agent +# USER_AGENT = 'realestate (+http://www.yourdomain.com)' + +# Obey robots.txt rules +ROBOTSTXT_OBEY = True + +# Configure maximum concurrent requests performed by Scrapy (default: 16) +# CONCURRENT_REQUESTS = 32 + +# Configure a delay for requests for the same website (default: 0) +# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay +# See also autothrottle settings and docs +# DOWNLOAD_DELAY = 3 +# The download delay setting will honor only one of: +# CONCURRENT_REQUESTS_PER_DOMAIN = 16 +# CONCURRENT_REQUESTS_PER_IP = 16 + +# Disable cookies (enabled by default) +# COOKIES_ENABLED = False + +# Disable Telnet Console (enabled by default) +# TELNETCONSOLE_ENABLED = False + +# Override the default request headers: +# DEFAULT_REQUEST_HEADERS = { +# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', +# 'Accept-Language': 'en', +# } + +# Enable or disable spider middlewares +# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html +# SPIDER_MIDDLEWARES = { +# 'realestate.middlewares.RealestateSpiderMiddleware': 543, +# } + +# Enable or disable downloader middlewares +# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html +# DOWNLOADER_MIDDLEWARES = { +# 'realestate.middlewares.RealestateDownloaderMiddleware': 543, +# } + +# Enable or disable extensions +# See https://doc.scrapy.org/en/latest/topics/extensions.html +# EXTENSIONS = { +# 'scrapy.extensions.telnet.TelnetConsole': None, +# } + +# Configure item pipelines +# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html +ITEM_PIPELINES = { + 'realestate.pipelines.MongoPipeline': 300, +} + +# Enable and configure the AutoThrottle extension (disabled by default) +# See https://doc.scrapy.org/en/latest/topics/autothrottle.html +# AUTOTHROTTLE_ENABLED = True +# The initial download delay +# AUTOTHROTTLE_START_DELAY = 5 +# The maximum download delay to be set in case of high latencies +# AUTOTHROTTLE_MAX_DELAY = 60 +# The average number of requests Scrapy should be sending in parallel to +# each remote server +# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 +# Enable showing throttling stats for every response received: +# AUTOTHROTTLE_DEBUG = False + +# Enable and configure HTTP caching (disabled by default) +# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings +# HTTPCACHE_ENABLED = True +# HTTPCACHE_EXPIRATION_SECS = 0 +# HTTPCACHE_DIR = 'httpcache' +# HTTPCACHE_IGNORE_HTTP_CODES = [] +# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' diff --git a/spiders/realestate/realestate/spiders/__init__.py b/spiders/realestate/realestate/spiders/__init__.py new file mode 100644 index 00000000..ebd689ac --- /dev/null +++ b/spiders/realestate/realestate/spiders/__init__.py @@ -0,0 +1,4 @@ +# This package will contain the spiders of your Scrapy project +# +# Please refer to the documentation for information on how to create and manage +# your spiders. diff --git a/spiders/realestate/realestate/spiders/lianjia.py b/spiders/realestate/realestate/spiders/lianjia.py new file mode 100644 index 00000000..cad054f3 --- /dev/null +++ b/spiders/realestate/realestate/spiders/lianjia.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +import scrapy + +from realestate.items import RealEstateItem + + +class LianjiaSpider(scrapy.Spider): + name = 'lianjia' + allowed_domains = ['lianjia.com'] + start_urls = ['https://cq.lianjia.com/ershoufang/'] + + def start_requests(self): + for i in range(100): + url = 'https://cq.lianjia.com/ershoufang/pg%s' % i + yield scrapy.Request(url=url) + + def parse(self, response): + for item in response.css('.sellListContent > li'): + yield RealEstateItem( + name=item.css('.title > a::text').extract_first(), + url=item.css('.title > a::attr("href")').extract_first(), + type='secondhand', + price=item.css('.totalPrice > span::text').extract_first(), + region=item.css('.houseInfo > a::text').extract_first(), + size=item.css('.houseInfo::text').extract_first().split(' | ')[2] + ) + + # 分页 + # a_next = response.css('.house-lst-page-box > a')[-1] + # href = a_next.css('a::attr("href")') + # yield scrapy.Response(url='https://cq.lianjia.com' + href) diff --git a/spiders/realestate/scrapy.cfg b/spiders/realestate/scrapy.cfg new file mode 100644 index 00000000..d630e123 --- /dev/null +++ b/spiders/realestate/scrapy.cfg @@ -0,0 +1,11 @@ +# Automatically created by: scrapy startproject +# +# For more information about the [deploy] section see: +# https://scrapyd.readthedocs.io/en/latest/deploy.html + +[settings] +default = realestate.settings + +[deploy] +#url = http://localhost:6800/ +project = realestate