public static void main(String[] args) throws Exception {
Directory dir =new RAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_34, new StandardAnalyzer(Version.LUCENE_34));
IndexWriter writer = new IndexWriter(dir,conf);
Document doc1 = new Document();
Document doc2 = new Document();
Document doc3 = new Document();
Document doc4 = new Document();
Document doc5 = new Document();
Field f1 = new Field("sex","1", Field.Store.YES, Field.Index.ANALYZED);
Field f2 = new Field("sex","0", Field.Store.YES, Field.Index.ANALYZED);
Field f3 = new Field("sex","1", Field.Store.YES, Field.Index.ANALYZED);
Field f4 = new Field("sex","1", Field.Store.YES, Field.Index.ANALYZED);
Field f5 = new Field("sex","1", Field.Store.YES, Field.Index.ANALYZED);
Field f11 = new Field("income","10", Field.Store.YES, Field.Index.ANALYZED);
Field f22 = new Field("income","10", Field.Store.YES, Field.Index.ANALYZED);
Field f33 = new Field("income","11", Field.Store.YES, Field.Index.ANALYZED);
Field f44 = new Field("income","10", Field.Store.YES, Field.Index.ANALYZED);
Field f55 = new Field("income","10", Field.Store.YES, Field.Index.ANALYZED);
doc1.add(f1);
doc2.add(f2);
doc3.add(f3);
doc4.add(f4);
doc5.add(f5);
doc1.add(f11);
doc2.add(f22);
doc3.add(f33);
doc4.add(f44);
doc5.add(f55);
doc5.setBoost(2f);
writer.addDocument(doc1);
writer.addDocument(doc2);
writer.addDocument(doc3);
writer.addDocument(doc4);
writer.addDocument(doc5);
writer.close();
IndexSearcher searcher = new IndexSearcher(dir);
//TermQuery q = new TermQuery(new Term("sex", "1"));
//q.setBoost(2f);
QueryParser qp=new QueryParser(Version.LUCENE_34, "nickname",new StandardAnalyzer(Version.LUCENE_34));
Query q=qp.parse("sex:1 AND income:10");
TopDocs hits = searcher.search(q,100);
for(int i=0;i<hits.scoreDocs.length;i++){
ScoreDoc sdoc = hits.scoreDocs[i];
Document doc = searcher.doc(sdoc.doc);
System.out.print(doc.get("sex") + "\t\t");
System.out.println(sdoc.score);
System.out.println(searcher.explain(q, sdoc.doc));//
}
/*for(int docID=0; docID<hits.length;docID++){
Document doc = searcher.doc(hits[docID].doc);
System.out.print(doc.get("sex") + "\t\t");
System.out.println(hits[docID].score);
System.out.println(searcher.explain(q, docID));//
} */
}
输出结果
1 2.828427
2.828427 = (MATCH) sum of:
1.4142135 = (MATCH) weight(sex:1 in 4), product of:
0.70710677 = queryWeight(sex:1), product of:
1.0 = idf(docFreq=4, maxDocs=5)
0.70710677 = queryNorm
2.0 = (MATCH) fieldWeight(sex:1 in 4), product of:
1.0 = tf(termFreq(sex:1)=1)
1.0 = idf(docFreq=4, maxDocs=5)
2.0 = fieldNorm(field=sex, doc=4)
1.4142135 = (MATCH) weight(income:10 in 4), product of:
0.70710677 = queryWeight(income:10), product of:
1.0 = idf(docFreq=4, maxDocs=5)
0.70710677 = queryNorm
2.0 = (MATCH) fieldWeight(income:10 in 4), product of:
1.0 = tf(termFreq(income:10)=1)
1.0 = idf(docFreq=4, maxDocs=5)
2.0 = fieldNorm(field=income, doc=4)
1 1.4142135
1.4142135 = (MATCH) sum of:
0.70710677 = (MATCH) weight(sex:1 in 0), product of:
0.70710677 = queryWeight(sex:1), product of:
1.0 = idf(docFreq=4, maxDocs=5)
0.70710677 = queryNorm
1.0 = (MATCH) fieldWeight(sex:1 in 0), product of:
1.0 = tf(termFreq(sex:1)=1)
1.0 = idf(docFreq=4, maxDocs=5)
1.0 = fieldNorm(field=sex, doc=0)
0.70710677 = (MATCH) weight(income:10 in 0), product of:
0.70710677 = queryWeight(income:10), product of:
1.0 = idf(docFreq=4, maxDocs=5)
0.70710677 = queryNorm
1.0 = (MATCH) fieldWeight(income:10 in 0), product of:
1.0 = tf(termFreq(income:10)=1)
1.0 = idf(docFreq=4, maxDocs=5)
1.0 = fieldNorm(field=income, doc=0)
1 1.4142135
1.4142135 = (MATCH) sum of:
0.70710677 = (MATCH) weight(sex:1 in 3), product of:
0.70710677 = queryWeight(sex:1), product of:
1.0 = idf(docFreq=4, maxDocs=5)
0.70710677 = queryNorm
1.0 = (MATCH) fieldWeight(sex:1 in 3), product of:
1.0 = tf(termFreq(sex:1)=1)
1.0 = idf(docFreq=4, maxDocs=5)
1.0 = fieldNorm(field=sex, doc=3)
0.70710677 = (MATCH) weight(income:10 in 3), product of:
0.70710677 = queryWeight(income:10), product of:
1.0 = idf(docFreq=4, maxDocs=5)
0.70710677 = queryNorm
1.0 = (MATCH) fieldWeight(income:10 in 3), product of:
1.0 = tf(termFreq(income:10)=1)
1.0 = idf(docFreq=4, maxDocs=5)
1.0 = fieldNorm(field=income, doc=3)
score简单说是由各个字段的 tf * idf * boost * lengthNorm相加计算得出的。每个字段又分为 queryWeight,fieldWeight
分享到:
相关推荐
简单的lucene应用,包括建立索引,更新索引,查询索引。基础的应用。代码方便易懂。声明该源码仅供个人学习研究使用。不得用于商业盈利目的,侵权必究。
资源为庖丁解牛分词法的最新源码以及生成的jar包,支持最新的Lucene3.4以及Lucene3.0以上版本。Jar包为本地生成
Lucene3.4开发入门.pdf
lucene3.4最新JAR包,包括核心包,分析器,查询器,利用此包可以实现LUCENE基本功能
资源为庖丁解牛分词法的最新源码以及生成的jar包,支持最新的Lucene3.4以及Lucene3.0以上版本。Jar包为本地生成,大家也可以到SVN上检出自己生成,另外庖丁解牛分词法的使用Demo我会接下来上传一份,欢迎分享。
lucene-3.4.0-src.zip 直接导入到eclipse就可以使用
lucene文档,lucene文档,lucene文档,lucene文档,lucene文档,lucene相关文档
lucene所需jar 使用文档 demo
包含翻译后的API文档:lucene-core-7.2.1-javadoc-API文档-中文(简体)版.zip; Maven坐标:org.apache.lucene:lucene-core:7.2.1; 标签:apache、lucene、core、中文文档、jar包、java; 使用方法:解压翻译后的API...
这是 lucene-3.4.0的源码。Lucene是apache软件基金会4 jakarta项目组的一个子项目,是一个开放源代码的全文检索引擎工具包,即它不是一个完整的全文检索引擎,而是一个全文检索引擎的架构,提供了完整的查询引擎和...
weblucene安装文档 。。。。。。。。。。。。。。。
lucene3.0+索引查看工具,lucene搜索引擎中索引部分的查看工具,-lucene search engine indexing part of the view tools
包含翻译后的API文档:lucene-core-7.7.0-javadoc-API文档-中文(简体)版.zip; Maven坐标:org.apache.lucene:lucene-core:7.7.0; 标签:apache、lucene、core、中文文档、jar包、java; 使用方法:解压翻译后的API...
Lucene学习文档Lucene学习文档Lucene学习文档Lucene学习文档
lucene整理文档,lucene详细描述,安装使用过程。
这是lucene的使用案例,实现了对word文档中的关键字检索,并将检索出的内容高亮打印出来
Lucene是apache软件基金会4 jakarta项目组的一个子项目,是一个开放源代码的全文检索引擎工具包,但它不是一个完整的全文检索引擎
这里面是lucene的相关学习资料,特别适合新手学习。
lucene帮助文档解压之后是.chm格式的。
lucene索引工具包官方开发文档,适用版本4.6.0