使用JSoup+CSSPath采集和讯网人物信息
代码见github
模型类:
public class Person { private String name; //基本信息 private Map<String, String> basicInfos; //教育经历 List<String> educations; //工作经历 List<String> jobs; //重要事件 List<String> importants; public String getName() { return name; } public void setName(String name) { this.name = name; } public Map<String, String> getBasicInfos() { return basicInfos; } public void setBasicInfos(Map<String, String> basicInfos) { this.basicInfos = basicInfos; } public List<String> getEducations() { return educations; } public void setEducations(List<String> educations) { this.educations = educations; } public List<String> getJobs() { return jobs; } public void setJobs(List<String> jobs) { this.jobs = jobs; } public List<String> getImportants() { return importants; } public void setImportants(List<String> importants) { this.importants = importants; } }
采集器:
package org.apdplat.demo.collect; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import org.jsoup.select.Elements; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class PersonCollector{ private static final Logger LOG = LoggerFactory.getLogger(PersonCollector.class); private static final int PAGES = 298; public List<Person> collect() { List<Person> persons = new ArrayList<>(); try { String url = "http://renwu.hexun.com/search.aspx?z=All&Filter=All&page="; //共298页 for(int i=1; i<PAGES+1; i++){ url += i; Document document = Jsoup.connect(url).get(); String cssQuery = "html body div.wrap div.mainBox div.main div.contBox div.cont div.slistBox ul li a"; LOG.debug("cssQuery: " + cssQuery); Elements elements = document.select(cssQuery); for(Element element : elements){ try{ String personName = element.text().replace(Jsoup.parse(" ").text(), " ").replace(Jsoup.parse("・").text(), "·"); LOG.debug("人物姓名:"+personName); String href = element.attr("href"); LOG.debug("人物链接:"+href); document = Jsoup.connect(href).get(); //基本信息 String basicInfoCSSQuery = "html body div.wrap div.mainBox div.main div.setBase div.right ul li"; LOG.debug("basicInfoCSSQuery: " + basicInfoCSSQuery); Elements basicElements = document.select(basicInfoCSSQuery); Map<String, String> basicInfos = new HashMap<>(); for(Element basicElement : basicElements){ String info = basicElement.text().replace(Jsoup.parse(" ").text(), " ").replace(Jsoup.parse("・").text(), "·"); if(info != null){ String[] attrs = info.split(":"); if(attrs != null && attrs.length == 2){ basicInfos.put(attrs[0], attrs[1]); } } } String moreCSSQuery = "html body div.wrap div.mainBox div.main div.contBox"; LOG.debug("moreCSSQuery: " + moreCSSQuery); Elements moreElements = document.select(moreCSSQuery); //教育经历 List<String> educations = new ArrayList<>(); Elements educationElements = moreElements.get(0).select("div.cont p"); for(Element educationElement : educationElements){ String education = educationElement.text().replace(Jsoup.parse(" ").text(), " ").replace(Jsoup.parse("・").text(), "·"); if(education != null && !"".equals(education.trim())){ educations.add(education); } } //工作经历 List<String> jobs = new ArrayList<>(); Elements jobElements = moreElements.get(1).select("div.cont p"); for(Element jobElement : jobElements){ String job = jobElement.text().replace(Jsoup.parse(" ").text(), " ").replace(Jsoup.parse("・").text(), "·"); if(job != null && !"".equals(job.trim())){ jobs.add(job); } } //重要事件 List<String> importants = new ArrayList<>(); Elements importantElements = moreElements.get(4).select("div.cont p"); for(Element importantElement : importantElements){ String important = importantElement.text().replace(Jsoup.parse(" ").text(), " ").replace(Jsoup.parse("・").text(), "·"); if(important != null && !"".equals(important.trim())){ importants.add(important); } } Person person = new Person(); person.setName(personName); person.setBasicInfos(basicInfos); person.setEducations(educations); person.setJobs(jobs); person.setImportants(importants); persons.add(person); }catch(IOException e){ LOG.error("采集出错",e); } } } } catch (IOException ex) { LOG.error("采集出错",ex); } return persons; } public static void main(String[] args) { PersonCollector personCollector = new PersonCollector(); List<Person> persons = personCollector.collect(); if (persons != null) { int i = 1; for (Person person : persons) { LOG.info("采集结果 " + (i++) + " "+person.getName()+ " :"); if(person.getBasicInfos() != null && person.getBasicInfos().size() > 0){ LOG.info("基本信息************************************************************"); for(Entry<String, String> basicInfo : person.getBasicInfos().entrySet()){ LOG.info(basicInfo.getKey() +":" + basicInfo.getValue()); } } if(person.getEducations() != null && person.getEducations().size() > 0){ LOG.info(""); LOG.info("教育经历************************************************************"); for(String education : person.getEducations()){ LOG.info(education); } } if(person.getJobs() != null && person.getJobs().size() > 0){ LOG.info(""); LOG.info("工作经历************************************************************"); for(String job : person.getJobs()){ LOG.info(job); } } if(person.getImportants() != null && person.getImportants().size() > 0){ LOG.info(""); LOG.info("重要事件************************************************************"); for(String important : person.getImportants()){ LOG.info(important.replace("\\?", " ")); } } LOG.info(""); LOG.info(""); } } else { LOG.error("没有采集到结果"); } } }
相关推荐
资源名字:基于java+Jsoup+HttpClient的网络爬虫技术的网络新闻分析系统设计与实现(源码+文档)_MySQL_网络爬虫_数据挖掘.zip 资源内容:项目全套源码+完整文档 源码说明: 全部项目源码都是经过测试校正后百分百...
jsoup+httpclient 简单爬虫,一个jsoup的简单爬虫实例
主要介绍了Java爬虫Jsoup+httpclient获取动态生成的数据的相关资料,需要的朋友可以参考下
java基于jsoup+mongodb的简单爬虫入门程序,简单易懂,希望能给大家提供帮助
用eclipse在Maven基础上使用Jsoup+MYSQL+MyBatis+jsp实现网络小说抓取以及前端展示
web小说网站
springboot+mybatisplus+jsoup+mysql springboot【小说阅读网站】,多线程抓取小说数据(单本或者批量),持久化到MySQL数据库,能定时跟源站数据同步=小说定时更新。通过jsoup采集数据到mysql数据,redis作为缓存...
jsoup+htmlunitl 爬取外币汇率 可运行直接拿到数据
Java爬虫【一篇文章精通系列-案例开发-巨细】HttpClient5 + jsoup + WebMagic + spider-flow【万字长文一篇文章学会】
图片列表RecyclerView+ListView+WebView+JzvdStdJZPlayer+Jsoup+Glide(二)
Jsoup+httpclient模拟登陆和抓取页面.pdf
根据传入的关键字,利用Jsoup和httpclient协议解析的各大视频网站的网站视频播放列表和链接地址,源码和jar包全都有,无错版,很好的学习资料
图片列表RecyclerView+ListView+WebView+JzvdStd/JZPlayer+Jsoup+Glide
Jsoup+JavaMail实现自动投票,验证邮箱功能
jsoup + jdid网页动态解析,类似火狐firebug的html查看,适用于windows,包括tray.dll,jdic.dll
这些jar包能够快速的实现网页爬虫功能,能够快熟实现模拟浏览器的一些操作。
针对java语言 编写的爬虫demo,爬取互联网图片资源 寻找图片素材,自动下载到本地
NULL 博文链接:https://yiyickf.iteye.com/blog/1107085
sb.append("欢迎使用新安人才网个人专区</title>").append("\n"); Elements meta = doc.getElementsByTag("meta"); sb.append(meta.toString()).append("\n"); ////////////////////////////body//...