`

Scrapy入门教程-05实验-爬取网站并入库

阅读更多

出处:http://blog.chinaunix.net/uid-23500957-id-3788157.html

 

1、创建项目

 scrapy startproject fjsen

2、定义items--items.py

# Define here the models for your scraped items

#

# See documentation in:

# http://doc.scrapy.org/topics/items.html 

from scrapy.item import Item, Field

class FjsenItem(Item):

    # define the fields for your item here like:

    # name = Field()

    title=Field()#文章标题

    link=Field()#文章链接

    addtime=Field()#文章时间

3、编写爬虫

新建一个fjsen_spider.py,内容如下:

#-*- coding: utf-8 -*-

from scrapy.spider import BaseSpider

from scrapy.selector import HtmlXPathSelector

from fjsen.items import FjsenItem

class FjsenSpider(BaseSpider):

    name="fjsen"

    allowed_domains=["fjsen.com"]

    start_urls=['http://www.fjsen.com/j/node_94962_'+str(x)+'.htm' for x in range(2,11)]+['http://www.fjsen.com/j/node_94962.htm']

    def parse(self,response):

        hxs=HtmlXPathSelector(response)

        sites=hxs.select('//ul/li')

        items=[]

        for site in sites:

            item=FjsenItem()

            item['title']=site.select('a/text()').extract()

            item['link'] = site.select('a/@href').extract()

            item['addtime']=site.select('span/text()').extract()

            items.append(item)

        return items 

4、入库---在pipelines.py中处理

# Define your item pipelines here

#

# Don't forget to add your pipeline to the ITEM_PIPELINES setting

# See: http://doc.scrapy.org/topics/item-pipeline.html import sqlite3

from os import path

from scrapy import signals

from scrapy.xlib.pydispatch import dispatcher

class FjsenPipeline(object):

                          

    def __init__(self):

        self.conn=None

        dispatcher.connect(self.initialize,signals.engine_started)

        dispatcher.connect(self.finalize,signals.engine_stopped)

    def process_item(self,item,spider):

        self.conn.execute('insert into fjsen values(?,?,?,?)',(None,item['title'][0],'http://www.fjsen.com/'+item['link'][0],item['addtime'][0]))

        return item

    def initialize(self):

        if path.exists(self.filename):

            self.conn=sqlite3.connect(self.filename)

        else:

            self.conn=self.create_table(self.filename)

    def finalize(self):

        if self.conn is not None:

            self.conn.commit()

            self.conn.close()

            self.conn=None

    def create_table(self,filename):

        conn=sqlite3.connect(filename)

        conn.execute("""create table fjsen(id integer primary key autoincrement,title text,link text,addtime text)""")

        conn.commit()

        return conn

5、修改配置--setting.py

ITEM_PIPELINES=['fjsen.pipelines.FjsenPipeline']

6、执行

scrapy crawl fjsen

 

==生成一个data.sqlite的数据库文件 

分享到:
评论

相关推荐

Global site tag (gtag.js) - Google Analytics