diff --git a/crawlab/routes/sites.py b/crawlab/routes/sites.py
index d9bd4593..6874af2b 100644
--- a/crawlab/routes/sites.py
+++ b/crawlab/routes/sites.py
@@ -56,10 +56,17 @@ class SiteApi(BaseApi):
sort_direction=ASCENDING
)
+ sites = []
+ for site in items:
+ # get spider count
+ site['spider_count'] = db_manager.count('spiders', {'site': site['_id']})
+
+ sites.append(site)
+
return {
'status': 'ok',
'total_count': db_manager.count(self.col_name, filter_),
'page_num': page_num,
'page_size': page_size,
- 'items': jsonify(items)
+ 'items': jsonify(sites)
}
diff --git a/crawlab/routes/spiders.py b/crawlab/routes/spiders.py
index 5473d824..51bae78c 100644
--- a/crawlab/routes/spiders.py
+++ b/crawlab/routes/spiders.py
@@ -128,6 +128,12 @@ class SpiderApi(BaseApi):
if last_task is not None:
spider['task_ts'] = last_task['create_ts']
+ # get site
+ if spider.get('site') is not None:
+ site = db_manager.get('sites', spider['site'])
+ if site is not None:
+ spider['site_name'] = site['name']
+
# file stats
stats = get_file_suffix_stats(dir_path)
diff --git a/frontend/src/i18n/zh.js b/frontend/src/i18n/zh.js
index a6c9a5b6..dd1d529e 100644
--- a/frontend/src/i18n/zh.js
+++ b/frontend/src/i18n/zh.js
@@ -138,6 +138,7 @@ export default {
'Category': '类别',
'Select': '请选择',
'Select Category': '请选择类别',
+ 'Spider Count': '爬虫数',
// 文件
'Choose Folder': '选择文件',
diff --git a/frontend/src/views/site/SiteList.vue b/frontend/src/views/site/SiteList.vue
index d4393a42..9c053dc5 100644
--- a/frontend/src/views/site/SiteList.vue
+++ b/frontend/src/views/site/SiteList.vue
@@ -41,6 +41,17 @@
+
+
+
+ {{scope.row[col.name]}}
+
+
+
>> .el-select .el-select__caret {
line-height: 32px;
}
+
+ .table >>> .domain {
+ text-decoration: underline;
+ }
diff --git a/frontend/src/views/spider/SpiderList.vue b/frontend/src/views/spider/SpiderList.vue
index 14a9ffea..874dcc0b 100644
--- a/frontend/src/views/spider/SpiderList.vue
+++ b/frontend/src/views/spider/SpiderList.vue
@@ -160,8 +160,9 @@ export default {
// tableData,
columns: [
{ name: 'name', label: 'Name', width: 'auto' },
- { name: 'type', label: 'Spider Type', width: '160', sortable: true },
- { name: 'lang', label: 'Language', width: '160', sortable: true },
+ { name: 'site_name', label: 'Site', width: '120' },
+ { name: 'type', label: 'Spider Type', width: '120', sortable: true },
+ { name: 'lang', label: 'Language', width: '120', sortable: true },
{ name: 'task_ts', label: 'Last Run', width: '160' },
{ name: 'last_7d_tasks', label: 'Last 7-Day Tasks', width: '80' },
{ name: 'last_5_errors', label: 'Last 5-Run Errors', width: '80' }
diff --git a/spiders/jd/jd/__init__.py b/spiders/jd/jd/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/spiders/jd/jd/items.py b/spiders/jd/jd/items.py
new file mode 100644
index 00000000..9a7ba1cb
--- /dev/null
+++ b/spiders/jd/jd/items.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+
+# Define here the models for your scraped items
+#
+# See documentation in:
+# https://doc.scrapy.org/en/latest/topics/items.html
+
+import scrapy
+
+
+class JdItem(scrapy.Item):
+ # define the fields for your item here like:
+ name = scrapy.Field()
+ price = scrapy.Field()
diff --git a/spiders/jd/jd/middlewares.py b/spiders/jd/jd/middlewares.py
new file mode 100644
index 00000000..6fceded5
--- /dev/null
+++ b/spiders/jd/jd/middlewares.py
@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+
+# Define here the models for your spider middleware
+#
+# See documentation in:
+# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
+
+from scrapy import signals
+
+
+class JdSpiderMiddleware(object):
+ # Not all methods need to be defined. If a method is not defined,
+ # scrapy acts as if the spider middleware does not modify the
+ # passed objects.
+
+ @classmethod
+ def from_crawler(cls, crawler):
+ # This method is used by Scrapy to create your spiders.
+ s = cls()
+ crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
+ return s
+
+ def process_spider_input(self, response, spider):
+ # Called for each response that goes through the spider
+ # middleware and into the spider.
+
+ # Should return None or raise an exception.
+ return None
+
+ def process_spider_output(self, response, result, spider):
+ # Called with the results returned from the Spider, after
+ # it has processed the response.
+
+ # Must return an iterable of Request, dict or Item objects.
+ for i in result:
+ yield i
+
+ def process_spider_exception(self, response, exception, spider):
+ # Called when a spider or process_spider_input() method
+ # (from other spider middleware) raises an exception.
+
+ # Should return either None or an iterable of Response, dict
+ # or Item objects.
+ pass
+
+ def process_start_requests(self, start_requests, spider):
+ # Called with the start requests of the spider, and works
+ # similarly to the process_spider_output() method, except
+ # that it doesn’t have a response associated.
+
+ # Must return only requests (not items).
+ for r in start_requests:
+ yield r
+
+ def spider_opened(self, spider):
+ spider.logger.info('Spider opened: %s' % spider.name)
+
+
+class JdDownloaderMiddleware(object):
+ # Not all methods need to be defined. If a method is not defined,
+ # scrapy acts as if the downloader middleware does not modify the
+ # passed objects.
+
+ @classmethod
+ def from_crawler(cls, crawler):
+ # This method is used by Scrapy to create your spiders.
+ s = cls()
+ crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
+ return s
+
+ def process_request(self, request, spider):
+ # Called for each request that goes through the downloader
+ # middleware.
+
+ # Must either:
+ # - return None: continue processing this request
+ # - or return a Response object
+ # - or return a Request object
+ # - or raise IgnoreRequest: process_exception() methods of
+ # installed downloader middleware will be called
+ return None
+
+ def process_response(self, request, response, spider):
+ # Called with the response returned from the downloader.
+
+ # Must either;
+ # - return a Response object
+ # - return a Request object
+ # - or raise IgnoreRequest
+ return response
+
+ def process_exception(self, request, exception, spider):
+ # Called when a download handler or a process_request()
+ # (from other downloader middleware) raises an exception.
+
+ # Must either:
+ # - return None: continue processing this exception
+ # - return a Response object: stops process_exception() chain
+ # - return a Request object: stops process_exception() chain
+ pass
+
+ def spider_opened(self, spider):
+ spider.logger.info('Spider opened: %s' % spider.name)
diff --git a/spiders/jd/jd/pipelines.py b/spiders/jd/jd/pipelines.py
new file mode 100644
index 00000000..b862b7e7
--- /dev/null
+++ b/spiders/jd/jd/pipelines.py
@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+
+# Define your item pipelines here
+#
+# Don't forget to add your pipeline to the ITEM_PIPELINES setting
+# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
+from pymongo import MongoClient
+
+
+class JdPipeline(object):
+ mongo = MongoClient(host=MONGO_HOST, port=MONGO_PORT)
+ db = mongo[MONGO_DB]
+ col_name = os.environ.get('CRAWLAB_COLLECTION') or 'jd_products'
+ col = db[col_name]
+
+ def process_item(self, item, spider):
+ return item
diff --git a/spiders/jd/jd/settings.py b/spiders/jd/jd/settings.py
new file mode 100644
index 00000000..d83206b2
--- /dev/null
+++ b/spiders/jd/jd/settings.py
@@ -0,0 +1,90 @@
+# -*- coding: utf-8 -*-
+
+# Scrapy settings for jd project
+#
+# For simplicity, this file contains only settings considered important or
+# commonly used. You can find more settings consulting the documentation:
+#
+# https://doc.scrapy.org/en/latest/topics/settings.html
+# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
+# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
+
+BOT_NAME = 'jd'
+
+SPIDER_MODULES = ['jd.spiders']
+NEWSPIDER_MODULE = 'jd.spiders'
+
+
+# Crawl responsibly by identifying yourself (and your website) on the user-agent
+#USER_AGENT = 'jd (+http://www.yourdomain.com)'
+
+# Obey robots.txt rules
+ROBOTSTXT_OBEY = True
+
+# Configure maximum concurrent requests performed by Scrapy (default: 16)
+#CONCURRENT_REQUESTS = 32
+
+# Configure a delay for requests for the same website (default: 0)
+# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
+# See also autothrottle settings and docs
+#DOWNLOAD_DELAY = 3
+# The download delay setting will honor only one of:
+#CONCURRENT_REQUESTS_PER_DOMAIN = 16
+#CONCURRENT_REQUESTS_PER_IP = 16
+
+# Disable cookies (enabled by default)
+#COOKIES_ENABLED = False
+
+# Disable Telnet Console (enabled by default)
+#TELNETCONSOLE_ENABLED = False
+
+# Override the default request headers:
+#DEFAULT_REQUEST_HEADERS = {
+# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
+# 'Accept-Language': 'en',
+#}
+
+# Enable or disable spider middlewares
+# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
+#SPIDER_MIDDLEWARES = {
+# 'jd.middlewares.JdSpiderMiddleware': 543,
+#}
+
+# Enable or disable downloader middlewares
+# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
+#DOWNLOADER_MIDDLEWARES = {
+# 'jd.middlewares.JdDownloaderMiddleware': 543,
+#}
+
+# Enable or disable extensions
+# See https://doc.scrapy.org/en/latest/topics/extensions.html
+#EXTENSIONS = {
+# 'scrapy.extensions.telnet.TelnetConsole': None,
+#}
+
+# Configure item pipelines
+# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
+ITEM_PIPELINES = {
+ 'jd.pipelines.JdPipeline': 300,
+}
+
+# Enable and configure the AutoThrottle extension (disabled by default)
+# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
+#AUTOTHROTTLE_ENABLED = True
+# The initial download delay
+#AUTOTHROTTLE_START_DELAY = 5
+# The maximum download delay to be set in case of high latencies
+#AUTOTHROTTLE_MAX_DELAY = 60
+# The average number of requests Scrapy should be sending in parallel to
+# each remote server
+#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
+# Enable showing throttling stats for every response received:
+#AUTOTHROTTLE_DEBUG = False
+
+# Enable and configure HTTP caching (disabled by default)
+# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
+#HTTPCACHE_ENABLED = True
+#HTTPCACHE_EXPIRATION_SECS = 0
+#HTTPCACHE_DIR = 'httpcache'
+#HTTPCACHE_IGNORE_HTTP_CODES = []
+#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
diff --git a/spiders/jd/jd/spiders/__init__.py b/spiders/jd/jd/spiders/__init__.py
new file mode 100644
index 00000000..ebd689ac
--- /dev/null
+++ b/spiders/jd/jd/spiders/__init__.py
@@ -0,0 +1,4 @@
+# This package will contain the spiders of your Scrapy project
+#
+# Please refer to the documentation for information on how to create and manage
+# your spiders.
diff --git a/spiders/jd/jd/spiders/jd_spider.py b/spiders/jd/jd/spiders/jd_spider.py
new file mode 100644
index 00000000..01113a7e
--- /dev/null
+++ b/spiders/jd/jd/spiders/jd_spider.py
@@ -0,0 +1,11 @@
+# -*- coding: utf-8 -*-
+import scrapy
+
+
+class JdSpiderSpider(scrapy.Spider):
+ name = 'jd_spider'
+ allowed_domains = ['jd.com']
+ start_urls = ['http://jd.com/']
+
+ def parse(self, response):
+ pass
diff --git a/spiders/jd/scrapy.cfg b/spiders/jd/scrapy.cfg
new file mode 100644
index 00000000..87cf0280
--- /dev/null
+++ b/spiders/jd/scrapy.cfg
@@ -0,0 +1,11 @@
+# Automatically created by: scrapy startproject
+#
+# For more information about the [deploy] section see:
+# https://scrapyd.readthedocs.io/en/latest/deploy.html
+
+[settings]
+default = jd.settings
+
+[deploy]
+#url = http://localhost:6800/
+project = jd