`

scrapy缺省配置

阅读更多
BOT_NAME = ‘scrapybot’

CLOSESPIDER_TIMEOUT = 0
CLOSESPIDER_PAGECOUNT = 0
CLOSESPIDER_ITEMCOUNT = 0
CLOSESPIDER_ERRORCOUNT = 0

COMMANDS_MODULE = ”

CONCURRENT_ITEMS = 100

CONCURRENT_REQUESTS = 16
CONCURRENT_REQUESTS_PER_DOMAIN = 8
CONCURRENT_REQUESTS_PER_IP = 0

COOKIES_ENABLED = True
COOKIES_DEBUG = False

DEFAULT_ITEM_CLASS = ‘scrapy.item.Item’

DEFAULT_REQUEST_HEADERS = {
‘Accept’: ‘text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8′,
‘Accept-Language’: ‘en’,
}

DEPTH_LIMIT = 0
DEPTH_STATS = True
DEPTH_PRIORITY = 0

DNSCACHE_ENABLED = True

DOWNLOAD_DELAY = 0

DOWNLOAD_HANDLERS = {}
DOWNLOAD_HANDLERS_BASE = {
‘file’: ‘scrapy.core.downloader.handlers.file.FileDownloadHandler’,
‘http’: ‘scrapy.core.downloader.handlers.http.HttpDownloadHandler’,
‘https’: ‘scrapy.core.downloader.handlers.http.HttpDownloadHandler’,
‘s3′: ‘scrapy.core.downloader.handlers.s3.S3DownloadHandler’,
}

DOWNLOAD_TIMEOUT = 180      # 3mins

DOWNLOADER_DEBUG = False

DOWNLOADER_HTTPCLIENTFACTORY = ‘scrapy.core.downloader.webclient.ScrapyHTTPClientFactory’
DOWNLOADER_CLIENTCONTEXTFACTORY = ‘scrapy.core.downloader.webclient.ScrapyClientContextFactory’

DOWNLOADER_MIDDLEWARES = {}

DOWNLOADER_MIDDLEWARES_BASE = {
# Engine side
‘scrapy.contrib.downloadermiddleware.robotstxt.RobotsTxtMiddleware’: 100,
‘scrapy.contrib.downloadermiddleware.httpauth.HttpAuthMiddleware’: 300,
‘scrapy.contrib.downloadermiddleware.downloadtimeout.DownloadTimeoutMiddleware’: 350,
‘scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware’: 400,
‘scrapy.contrib.downloadermiddleware.retry.RetryMiddleware’: 500,
‘scrapy.contrib.downloadermiddleware.defaultheaders.DefaultHeadersMiddleware’: 550,
‘scrapy.contrib.downloadermiddleware.redirect.RedirectMiddleware’: 600,
‘scrapy.contrib.downloadermiddleware.cookies.CookiesMiddleware’: 700,
‘scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware’: 750,
‘scrapy.contrib.downloadermiddleware.httpcompression.HttpCompressionMiddleware’: 800,
‘scrapy.contrib.downloadermiddleware.chunked.ChunkedTransferMiddleware’: 830,
‘scrapy.contrib.downloadermiddleware.stats.DownloaderStats’: 850,
‘scrapy.contrib.downloadermiddleware.httpcache.HttpCacheMiddleware’: 900,
# Downloader side
}

DOWNLOADER_STATS = True

DUPEFILTER_CLASS = ‘scrapy.dupefilter.RFPDupeFilter’

try:
EDITOR = os.environ['EDITOR']
except KeyError:
if sys.platform == ‘win32′:
EDITOR = ‘%s -m idlelib.idle’
else:
EDITOR = ‘vi’

EXTENSIONS = {}

EXTENSIONS_BASE = {
‘scrapy.contrib.corestats.CoreStats’: 0,
‘scrapy.webservice.WebService’: 0,
‘scrapy.telnet.TelnetConsole’: 0,
‘scrapy.contrib.memusage.MemoryUsage’: 0,
‘scrapy.contrib.memdebug.MemoryDebugger’: 0,
‘scrapy.contrib.closespider.CloseSpider’: 0,
‘scrapy.contrib.feedexport.FeedExporter’: 0,
‘scrapy.contrib.logstats.LogStats’: 0,
‘scrapy.contrib.spiderstate.SpiderState’: 0,
‘scrapy.contrib.throttle.AutoThrottle’: 0,
}

FEED_URI = None
FEED_URI_PARAMS = None # a function to extend uri arguments
FEED_FORMAT = ‘jsonlines’
FEED_STORE_EMPTY = False
FEED_STORAGES = {}
FEED_STORAGES_BASE = {
”: ‘scrapy.contrib.feedexport.FileFeedStorage’,
‘file’: ‘scrapy.contrib.feedexport.FileFeedStorage’,
‘stdout’: ‘scrapy.contrib.feedexport.StdoutFeedStorage’,
‘s3′: ‘scrapy.contrib.feedexport.S3FeedStorage’,
‘ftp’: ‘scrapy.contrib.feedexport.FTPFeedStorage’,
}
FEED_EXPORTERS = {}
FEED_EXPORTERS_BASE = {
‘json’: ‘scrapy.contrib.exporter.JsonItemExporter’,
‘jsonlines’: ‘scrapy.contrib.exporter.JsonLinesItemExporter’,
‘csv’: ‘scrapy.contrib.exporter.CsvItemExporter’,
‘xml’: ‘scrapy.contrib.exporter.XmlItemExporter’,
‘marshal’: ‘scrapy.contrib.exporter.MarshalItemExporter’,
‘pickle’: ‘scrapy.contrib.exporter.PickleItemExporter’,
}

HTTPCACHE_ENABLED = False
HTTPCACHE_DIR = ‘httpcache’
HTTPCACHE_IGNORE_MISSING = False
HTTPCACHE_STORAGE = ‘scrapy.contrib.httpcache.DbmCacheStorage’
HTTPCACHE_EXPIRATION_SECS = 0
HTTPCACHE_IGNORE_HTTP_CODES = []
HTTPCACHE_IGNORE_SCHEMES = ['file']
HTTPCACHE_DBM_MODULE = ‘anydbm’

ITEM_PROCESSOR = ‘scrapy.contrib.pipeline.ItemPipelineManager’

# Item pipelines are typically set in specific commands settings
ITEM_PIPELINES = []

LOG_ENABLED = True
LOG_ENCODING = ‘utf-8′
LOG_FORMATTER = ‘scrapy.logformatter.LogFormatter’
LOG_STDOUT = False
LOG_LEVEL = ‘DEBUG’
LOG_FILE = None

LOG_UNSERIALIZABLE_REQUESTS = False

LOGSTATS_INTERVAL = 60.0

MAIL_DEBUG = False
MAIL_HOST = ‘localhost’
MAIL_PORT = 25
MAIL_FROM = ‘scrapy@localhost’
MAIL_PASS = None
MAIL_USER = None

MEMDEBUG_ENABLED = False        # enable memory debugging
MEMDEBUG_NOTIFY = []            # send memory debugging report by mail at engine shutdown

MEMUSAGE_ENABLED = False
MEMUSAGE_LIMIT_MB = 0
MEMUSAGE_NOTIFY_MAIL = []
MEMUSAGE_REPORT = False
MEMUSAGE_WARNING_MB = 0

NEWSPIDER_MODULE = ”

RANDOMIZE_DOWNLOAD_DELAY = True

REDIRECT_ENABLED = True
REDIRECT_MAX_METAREFRESH_DELAY = 100
REDIRECT_MAX_TIMES = 20 # uses Firefox default setting
REDIRECT_PRIORITY_ADJUST = +2

REFERER_ENABLED = True

RETRY_ENABLED = True
RETRY_TIMES = 2 # initial response + 2 retries = 3 requests
RETRY_HTTP_CODES = [500, 503, 504, 400, 408]
RETRY_PRIORITY_ADJUST = -1

ROBOTSTXT_OBEY = False

SCHEDULER = ‘scrapy.core.scheduler.Scheduler’
SCHEDULER_DISK_QUEUE = ‘scrapy.squeue.PickleLifoDiskQueue’
SCHEDULER_MEMORY_QUEUE = ‘scrapy.squeue.LifoMemoryQueue’

SPIDER_MANAGER_CLASS = ‘scrapy.spidermanager.SpiderManager’

SPIDER_MIDDLEWARES = {}

SPIDER_MIDDLEWARES_BASE = {
# Engine side
‘scrapy.contrib.spidermiddleware.httperror.HttpErrorMiddleware’: 50,
‘scrapy.contrib.spidermiddleware.offsite.OffsiteMiddleware’: 500,
‘scrapy.contrib.spidermiddleware.referer.RefererMiddleware’: 700,
‘scrapy.contrib.spidermiddleware.urllength.UrlLengthMiddleware’: 800,
‘scrapy.contrib.spidermiddleware.depth.DepthMiddleware’: 900,
# Spider side
}

SPIDER_MODULES = []

STATS_CLASS = ‘scrapy.statscol.MemoryStatsCollector’
STATS_DUMP = True

STATSMAILER_RCPTS = []

TEMPLATES_DIR = abspath(join(dirname(__file__), ‘..’, ‘templates’))

URLLENGTH_LIMIT = 2083

USER_AGENT = ‘Scrapy/%s (+http://scrapy.org)’ % __import__(‘scrapy’).__version__

TELNETCONSOLE_ENABLED = 1
TELNETCONSOLE_PORT = [6023, 6073]
TELNETCONSOLE_HOST = ’0.0.0.0′

WEBSERVICE_ENABLED = True
WEBSERVICE_LOGFILE = None
WEBSERVICE_PORT = [6080, 7030]
WEBSERVICE_HOST = ’0.0.0.0′
WEBSERVICE_RESOURCES = {}
WEBSERVICE_RESOURCES_BASE = {
‘scrapy.contrib.webservice.crawler.CrawlerResource’: 1,
‘scrapy.contrib.webservice.enginestatus.EngineStatusResource’: 1,
‘scrapy.contrib.webservice.stats.StatsResource’: 1,
}

SPIDER_CONTRACTS = {}
SPIDER_CONTRACTS_BASE = {
‘scrapy.contracts.default.UrlContract’ : 1,
‘scrapy.contracts.default.ReturnsContract’: 2,
‘scrapy.contracts.default.ScrapesContract’: 3,
}
分享到:
评论

相关推荐

    Python2.7-64bit-win-爬虫框架Scrapy安装配置相关资源

    我的系统是 Win7-64bit,首先,你要有Python,我用的是2.7.10版本,本资源是针对64位系统相对应的scrapy框架需要的依赖,具体安装过程见资源内容

    scrapy 环境搭建完整包.zip

    Windows下搭建爬虫框架scrapy,全资源、jar包,带每一步详细步骤

    scrapy-pyppeteer:Scrapy的Pyppeteer集成

    未维护如果您需要Scrapy的浏览器集成,请考虑使用Scrapy的...要求Python 3.6+ Scrapy 2.0+ 皮皮特0.0.23+安装$ pip install scrapy-pyppeteer配置通过替换默认的http和https下载处理程序: DOWNLOAD_HANDLERS = { ...

    scrapy及相关配置

    教你如何配置scrapy,以及包含了所有配置中所用到的文件

    scrapy redis配置文件setting参数详解

    scrapy项目 setting.py #Resis 设置 #使能Redis调度器 SCHEDULER = 'scrapy_redis.scheduler.Scheduler' #所有spider通过redis使用同一个去重过滤器 DUPEFILTER_CLASS = 'scrapy_redis.dupefilter.RFPDupeFilter' ...

    【选修】01-Scrapy配置文件详解.vep

    【选修】01-Scrapy配置文件详解.vep

    scrapy依赖文件包

    scrapy的所有依赖文件打包(不包含scrapy文件),scrapy文件可以自行安装pip后,用pip安装 命令为:pip install scrapy==1.0.5 scrapy1.0.5中文文档地址 ...

    大数据爬虫技术第10章 初识爬虫框架Scrapy.ppt

    我们简单介绍一下各个主要文件的作用: scrapy.cfg --配置文件,用于存储项目的配置信息。 mySpider/ --项目的Python模块,将会从这里引用代码。 mySpider/items.py --实体文件,用于定义项目的目标实体。 mySpider/...

    scrapy框架配置随机延时、UA、IP

    作为强大的采集框架scrapy,有几个基本配置,大家一定要掌握。下面猫哥一一为大家介绍。 随机延时 在scrapy框架settings.py文件中有一个默认的延时设置DOWNLOAD_DELAY = 2,这个设置的延时时间是固定的,也就是说...

    scrapy 0.22.3

    Scrapy Documentation

    记录一下scrapy中settings的一些配置小结

    本文主要介绍了scrapy settings配置,分享给大家,具体如下: # 字符编码 FEED_EXPORT_ENCODING = 'utf-8' # redis写法一 # REDIS_URL = 'redis://localhost:6379' # redis写法二 REDIS_HOST = '192.168.10.223'...

    基于scrapy的redis安装和配置方法

    今天小编就为大家分享一篇基于scrapy的redis安装和配置方法,具有很好的参考价值,希望对大家有所帮助。一起跟随小编过来看看吧

    Python爬虫之路-scrapy爬虫框架课程

    scrapy爬虫框架课程,包含全部课件与代码 课程纲要: 1.scrapy的概念作用和工作...10.scrapy的日志信息与配置 11.scrapyd部署scrapy项目 12.gerapy爬虫管理 13.gerapy爬虫管理 13.scrapy总结图 14.crawlspider类的使用

    代码以及其他_scrapy爬虫框架课程_scrapy_

    redis原理分析并实现断点续爬以及分布式爬虫9.scrapy_splash组件的使用10.scrapy的日志信息与配置11.scrapyd部署scrapy项目12.gerapy爬虫管理13.gerapy爬虫管理13.scrapy总结图14.crawlspider类的使用

    django+scrapy结合

    将Django和scrapy结合,实现通过Django的网页控制scrapy的运行,并将爬取的数据存入数据库。

    django+scrapy+mysql完成简单微博热点系统的开发

    系统是采用的Django+Scrapy+Mysql三层架构进行开发的,主要思路是我们通过scrapy框架进行微博热点的爬取,经过一系列的处理最终成为我们想要的item,然后存入mysql数据库,最后Django从数据库中读取数据在网页上输出...

    Learning Scrapy 中文版

    Learning Scrapy 中文版 Learning Scrapy 中文版 Learning Scrapy 中文版

    scrapy 中文教程 文字版 最新

    scrapy 中文教程 最新版 0.25版的, epub格式

    Scrapy文档1.4.0 文档

    Scrapy文档1.4.0

    scrapy 分布式爬虫全集

    其中包括爬虫入门,虚拟环境搭建,对scrapy的调试,对动态网站的爬取,scrapy与mysql的使用,scrapy-redis分布式爬虫的使用, elasticsearch搜索引擎的使用, scrapyd部署scrapy爬虫。等相关内容

Global site tag (gtag.js) - Google Analytics