`
ipython
  • 浏览: 289166 次
  • 性别: Icon_minigender_1
  • 来自: 佛山
社区版块
存档分类
最新评论

简单的crawler,python

阅读更多

以下是十分简单的python写的爬虫小脚本。

import os,re
import urlparse,urllib,urllib2
import hashlib,Queue

class Request(object):
    def __init__(self):
        self.url= ''
        self.error_url = []
        self.undownload_url = []
        self.downloaded_url = []
        self.headers={'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6)  \
                       Gecko/20091201 Firefox/3.5.6',
                      'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
                      }

    def read_url_from_file(self):
        urls = []
        try:
            r = open('urls.txt','r')
            urls = r.readlines()
            r.close()
        except IOError:
            url = raw_input("no file urls.txt,please input a url >>> ")
            if not url.lower().startswith('http://'):
                url = 'http://'+url
            urls.append(url)
        return urls


    def is_download_url(self):
        return self.url in self.downloaded_url

    def get(self):
        ok = False
        req = urllib2.Request(self.url,None,self.headers)
        for i in range(3):
                temp = urllib2.urlopen(req)
                if temp.getcode()==200:
                    if 'content-type' in temp.headers.keys(): 
                        content_type = temp.headers.get('content-type')
                        if 'text' in content_type:
                            return temp.read()
                    else:
                        return temp.read()
        return ''
 
    def find_links(self):
        com = re.compile(r"""<a[^>]*?href\s*=\s*['"]?([^'"\s>]{1,500})['">\s]""",re.I|re.M|re.S)
        links = com.findall(self.content)
        links = set(links)
        links = list(links)
        full_links = [urlparse.urljoin(self.url,x) for x in links]
        for i in full_links:
            if i not in self.undownload_url and i not in self.downloaded_url:
                self.undownload_url.append(i)

    def save_page(self,page_source,filename):
        dirname = filename.split('/')[0]
        if not os.path.isdir(dirname):
            os.mkdir(dirname)
        w = open(filename,'w')
        w.write(page_source)
        w.close()
              
    def save_result(self,urls,filename):
        w = open(filename,'w')
        w.write('\n'.join(urls))
        w.close()

    def run(self):
        self.page = 0
        self.ldir = 0
        self.num_per_dir = 100
        self.undownload_url = self.read_url_from_file()
        self.url_to_path = {}
        try:
            while self.undownload_url:
                self.url = self.undownload_url.pop(0)
                self.url = self.url.replace('\n','')
                order = self.page+self.ldir*self.num_per_dir
                print (" %4d download... %s " % (order,self.url) )
                self.content = self.get()
                self.downloaded_url.append(self.url)
                if self.content:
                    self.find_links()
                    filename = str(self.ldir)+'/'+str(self.page)
                    self.save_page(self.content,filename)
                    self.url_to_path[str(order)] = self.url
                    self.page += 1
                    if self.page >= self.num_per_dir:
                        self.page -= self.num_per_dir
                        self.ldir += 1
                else:
                    self.error_url.append(self.url)
            print ("finished !")     
            
        except:
            print ("save downloaded urls into downloaded file")
            self.save_result(self.downloaded_url,'downloaded')
            print ("save error urls into error file")
            self.save_result(self.undownload_url,'undownload')
            print ("save url to file dictonary into url_to_file")
            self.save_result([x+'\t'+self.url_to_path[x] for x in self.url_to_path.keys()],'url_to_file')
            print ("total try to download %d urls" % order)
 
aa = Request()
aa.run()
 
0
2
分享到:
评论

相关推荐

Global site tag (gtag.js) - Google Analytics