贴一段我用D写的Spider的代码。
越来越爱D了。
import std.stdio;
import std.string;
import std.conv;
import std.socket;
import std.socketstream;
import std.stream;
import std.regexp;
import std.thread;
import std.c.time;
char[][] g_queue;
int g_task_amount=0;
const char[] homepage="http://mobile.younet.com/";
const ushort max_thread=20;
alias std.string.find strfind;
char[] getHTML(char[] url){
char[] domain,html;
ubyte[1024] buf;
ushort port=80;
if(!isURL(url))return null;
int i=strfind(url,"://")+3;
url=url[i..$];
int j=strfind(url,":");
int e=strfind(url,"/");
if(e<0){
e=url.length;
}
if(j>0){
port=toUshort(url[j+1..e]);
domain=url[0..j];
}
else{
domain=url[0..e];
}
if(e==url.length){
url="/";
}
else{
url=url[e..$];
}
debug(younet){
writefln(toString(port) ~" "~ domain ~" "~ url);
}
Socket sock=new TcpSocket(new InternetAddress(domain,port));
Stream ss=new SocketStream(sock);
ss.writeString("GET " ~ url ~ " HTTP/1.0\r\n"
"Host: " ~ domain ~ "\r\n"
"Connection: close\r\n"
"Referer: http://" ~ domain ~ url ~ "\r\n"
"\r\n\r\n\r\n\r\n");
int recv_amount=ss.read(buf);
while(recv_amount>0){
html ~= cast(char[])buf[0..recv_amount];
recv_amount=ss.read(buf);
}
ss.close();
sock.close();
char[][] mc=RegExp("(URL=|Location: )(.*?)[\"\r]").match(html);
if(mc.length==3){
char[] new_location=mc[2];
html=getHTML(new_location);
return html;
}
int start_pos=strfind(html,"\r\n\r\n") ;
html=html[start_pos+4 .. $];
return html[0..$];
}
int crawl(void * p){
while(true){
char[] url,html;
synchronized{
if(g_queue.length==0)
sleep(1);
if(g_queue.length==0)
break;
url=g_queue[0];
writefln("begin:" ~ url);
if(g_queue.length>0)
g_queue=g_queue[1..$];
}
try{
html=getHTML(url);
}
catch(Exception ex){
synchronized{
if(g_task_amount>0)
g_task_amount-=1;
}
writefln(ex);
writefln("failed:" ~ url);
writefln("remains" ~ toString(g_task_amount));
continue;
}
debug(younet){
printf(toStringz("!!!" ~ html[0..200]));
}
if(strfind(url,"files/list")<0){
synchronized{
foreach(m;RegExp("files/list_\\d+\\.html").search(html)){
g_queue ~= homepage ~ m.match(0);
g_task_amount+=1;
}
g_task_amount-=1;
}
writefln("done:" ~ url);
debug(younet){
writefln(g_queue);
}
}
else{
writefln("done:" ~ url);
synchronized{
g_task_amount-=1;
writefln("remains" ~ toString(g_task_amount));
}
}
}
return 1;
}
int main(char[][] args){
//writefln("Hello");
g_queue ~= homepage;
g_task_amount+=1;
Thread[] tds;
for(int i=0;i<max_thread;i++){
Thread t=new Thread(&crawl,null);
t.start();
tds ~= t;
}
sleep(5);
while(true){
sleep(1);
if(g_task_amount<=0)break;
}
return 0;
}
分享到:
相关推荐
c#编写spider
《用perl解析JavaScript之JavaScript模块的安装--SpiderMonkey》 安装依赖软件: 安装pyrex:sudo apt-get install python-pyrex 安装g++:sudo apt-get install g++ 安装libjs.so: $ tar zxvf js-1.7.0.tar...
weibo_spider_爬虫python_关键词爬虫_python_python爬虫_spider_源码.rar
J-Spider:是一个完全可配置和定制的Web Spider引擎.你可以利用它来检查网站的错误(内在的服务器错误等),网站内外部链接检查,分析网站的结构(可创建一个网站地图),下载整个Web站点,你还可以写一个JSpider插件来扩展...
spider.py, spider.py 关于该爬虫( 爬虫有坑。初学时编写,仅供参考)程序运行示例:Spider.py -u url -d深度日志文件默认当前目录,名字:spider。log##主要参考:http://bbs.chinaunix.net
163spider-master
C++ js 互相调用 spider monkey
Black_Spider_1.7.2 new tools for upload shell and chek vuln
This is a small spider solitaire game.
spider网络爬虫 c++ 实现 采用广度搜索算法获取url
C#写的Spider程序
对新型机构spider的描述,美国专利,新型手术工具
Spider: A Large-Scale Human-Labeled Dataset for Complex and Cross-Domain Semantic Parsing and Text-to-SQL Task
用来爬取cnblogs网站上的新闻资源,仅可用于学习交流
讲解了spider的基本原理,适合于刚刚接触spider的人员使用!
spider_news_all, Scrapy Spider for 各种新闻网站
spiderMonkey JS源代码 OpenVXI开发中使用
ignore_haved_crawl_today_article_account: true # 忽略已经抓取到今日发布文章的公众号,即今日不再监测该公众号 redis_task_cache_root_key: wechat # reids 中缓存任务的根key 如 wechat: zombie_ac
Mozilla网络太慢了,抓取了SpiderMonkey的API网站的页面,整理而成,不依赖网络快速查找API!
burpsuite新版的Spider模块Content discovery功能详解和实操