资源说明:
/*GUI code*/ package org.apache.lucene.demo; import java.applet.Applet; import java.awt.Button; import java.awt.Frame; import java.awt.TextField; import java.awt.event.ActionEvent; import java.awt.event.ActionListener; import java.io.BufferedReader; import java.io.File; import java.io.FileReader; import java.io.IOException; import java.io.InputStreamReader; import java.util.Date; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.index.FilterIndexReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.search.Collector; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Searcher; import org.apache.lucene.search.TopScoreDocCollector; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.util.Version; /** Simple command-line based search demo. */ public class SearchFiles1 extends Frame implements ActionListener { String s; TextField useName; TextField pwd; // A Button to click Button logButton; /** Use the norms from one field for all fields. Norms are read into memory, * using a byte of memory per document per searched field. This can cause * search of large collections with a large number of fields to run out of * memory. If all of the fields contain only a single token, then the norms * are all identical, then single norm vector may be shared. */ private static class OneNormsReader extends FilterIndexReader { private String field; public OneNormsReader(IndexReader in, String field) { super(in); this.field = field; } @Override public byte[] norms(String field) throws IOException { return in.norms(this.field); } } private SearchFiles1() { setLayout(null); // text and length of the field useName = new TextField("",10); // initialze the button and give it a text. logButton = new Button("Search"); pwd = new TextField("",1000); // now we will specify the positions of the GUI components. // this is done by specifying the x and y coordinate and //the width and height. useName.setBounds(50,50,150,30); logButton.setBounds(250,50,100,30); pwd.setBounds(50,100,500,500); // now that all is set we can add these components to the applet add(useName, "center"); add(pwd, "center"); add(logButton, "center"); logButton.addActionListener(this); } public void actionPerformed(ActionEvent evt) { //String s; s=useName.getText(); try { String result = doQuery(s); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); } //pwd.setText(s/*result*/); } /** Simple command-line based search demo. */ public static void main(String[] args) throws Exception { Frame f= new SearchFiles1(); f.setSize(500, 500); f.setVisible(true); f.setResizable(true); }/* String usage = "Usage:\tjava org.apache.lucene.demo.SearchFiles [-index dir] [-field f] [-repeat n] [-queries file] [-raw] [-norms field] [-paging hitsPerPage]"; usage += "\n\tSpecify 'false' for hitsPerPage to use streaming instead of paging search."; if (args.length > 0 && ("-h".equals(args[0]) || "-help".equals(args[0]))) { System.out.println(usage); System.exit(0); } String index = "index"; String field = "contents"; String queries = null; int repeat = 0; boolean raw = false; String normsField = null; boolean paging = true; int hitsPerPage = 10; for (int i = 0; i < args.length; i++) { if ("-index".equals(args[i])) { index = args[i+1]; i++; } else if ("-field".equals(args[i])) { field = args[i+1]; i++; } else if ("-queries".equals(args[i])) { queries = args[i+1]; i++; } else if ("-repeat".equals(args[i])) { repeat = Integer.parseInt(args[i+1]); i++; } else if ("-raw".equals(args[i])) { raw = true; } else if ("-norms".equals(args[i])) { normsField = args[i+1]; i++; } else if ("-paging".equals(args[i])) { if (args[i+1].equals("false")) { paging = false; } else { hitsPerPage = Integer.parseInt(args[i+1]); if (hitsPerPage == 0) { paging = false; } } i++; } } IndexReader reader = IndexReader.open(FSDirectory.open(new File(index)), true); // only searching, so read-only=true if (normsField != null) reader = new OneNormsReader(reader, normsField); Searcher searcher = new IndexSearcher(reader); Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT); BufferedReader in = null; if (queries != null) { in = new BufferedReader(new FileReader(queries)); } else { in = new BufferedReader(new InputStreamReader(System.in, "UTF-8")); } QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, field, analyzer); while (true) { if (queries == null) // prompt the user System.out.println("Enter query: "); //String line = in.readLine(); String line= ((SearchFiles1)f).s; if (line == null || line.length() == -1) break; line = line.trim(); if (line.length() == 0) break; Query query = parser.parse(line); System.out.println("Searching for: " + query.toString(field)); if (repeat > 0) { // repeat & time as benchmark Date start = new Date(); for (int i = 0; i < repeat; i++) { searcher.search(query, null, 100); } Date end = new Date(); System.out.println("Time: "+(end.getTime()-start.getTime())+"ms"); } if (paging) { doPagingSearch(in, searcher, query, hitsPerPage, raw, queries == null); } else { doStreamingSearch(searcher, query); } } reader.close(); }*/ /** * This method uses a custom HitCollector implementation which simply prints out * the docId and score of every matching document. * * This simulates the streaming search use case, where all hits are supposed to * be processed, regardless of their relevance. */ public static void doStreamingSearch(final Searcher searcher, Query query) throws IOException { Collector streamingHitCollector = new Collector() { private Scorer scorer; private int docBase; // simply print docId and score of every matching document @Override public void collect(int doc) throws IOException { System.out.println("doc=" + doc + docBase + " score=" + scorer.score()); } @Override public boolean acceptsDocsOutOfOrder() { return true; } @Override public void setNextReader(IndexReader reader, int docBase) throws IOException { this.docBase = docBase; } @Override public void setScorer(Scorer scorer) throws IOException { this.scorer = scorer; } }; searcher.search(query, streamingHitCollector); } String doQuery(String s) throws Exception { //remove while true - query processing code String usage = "Usage:\tjava org.apache.lucene.demo.SearchFiles [-index dir] [-field f] [-repeat n] [-queries file] [-raw] [-norms field] [-paging hitsPerPage]"; usage += "\n\tSpecify 'false' for hitsPerPage to use streaming instead of paging search."; if (s.length() > 0 && ("-h".equals(s) || "-help".equals(s))) { System.out.println(usage); System.exit(0); } String index = "index"; String field = "contents"; String queries = null; int repeat = 0; boolean raw = false; String normsField = null; boolean paging = true; int hitsPerPage = 10; /* for (int i = 0; i < args.length; i++) { if ("-index".equals(args[i])) { index = args[i+1]; i++; } else if ("-field".equals(args[i])) { field = args[i+1]; i++; } else if ("-queries".equals(args[i])) { queries = args[i+1]; i++; } else if ("-repeat".equals(args[i])) { repeat = Integer.parseInt(args[i+1]); i++; } else if ("-raw".equals(args[i])) { raw = true; } else if ("-norms".equals(args[i])) { normsField = args[i+1]; i++; } else if ("-paging".equals(args[i])) { if (args[i+1].equals("false")) { paging = false; } else { hitsPerPage = Integer.parseInt(args[i+1]); if (hitsPerPage == 0) { paging = false; } } i++; } } */ IndexReader reader = IndexReader.open(FSDirectory.open(new File(index)), true); // only searching, so read-only=true if (normsField != null) reader = new OneNormsReader(reader, normsField); Searcher searcher = new IndexSearcher(reader); Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT); BufferedReader in = null; if (queries != null) { in = new BufferedReader(new FileReader(queries)); } else { in = new BufferedReader(new InputStreamReader(System.in, "UTF-8")); } QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, field, analyzer); while (true) { if (queries == null) // prompt the user System.out.println("Enter query: "); //String line = in.readLine(); String line= s;//((SearchFiles1)f).s; if (line == null || line.length() == -1) break; line = line.trim(); if (line.length() == 0) break; Query query = parser.parse(line); System.out.println("Searching for: " + query.toString(field)); if (repeat > 0) { // repeat & time as benchmark Date start = new Date(); for (int i = 0; i < repeat; i++) { searcher.search(query, null, 100); } Date end = new Date(); System.out.println("Time: "+(end.getTime()-start.getTime())+"ms"); } if (paging) { doPagingSearch(in, searcher, query, hitsPerPage, raw, queries == null); } else { doStreamingSearch(searcher, query); } } reader.close(); return null; } /** * This demonstrates a typical paging search scenario, where the search engine presents * pages of size n to the user. The user can then go to the next page if interested in * the next hits. * * When the query is executed for the first time, then only enough results are collected * to fill 5 result pages. If the user wants to page beyond this limit, then the query * is executed another time and all hits are collected. * */ public static void doPagingSearch(BufferedReader in, Searcher searcher, Query query, int hitsPerPage, boolean raw, boolean interactive) throws IOException { // Collect enough docs to show 5 pages TopScoreDocCollector collector = TopScoreDocCollector.create( 5 * hitsPerPage, false); searcher.search(query, collector); ScoreDoc[] hits = collector.topDocs().scoreDocs; int numTotalHits = collector.getTotalHits(); System.out.println(numTotalHits + " total matching documents"); int start = 0; int end = Math.min(numTotalHits, hitsPerPage); while (true) { if (end > hits.length) { System.out.println("Only results 1 - " + hits.length +" of " + numTotalHits + " total matching documents collected."); System.out.println("Collect more (y/n) ?"); String line = in.readLine(); if (line.length() == 0 || line.charAt(0) == 'n') { break; } collector = TopScoreDocCollector.create(numTotalHits, false); searcher.search(query, collector); hits = collector.topDocs().scoreDocs; } end = Math.min(hits.length, start + hitsPerPage); for (int i = start; i < end; i++) { if (raw) { // output raw format System.out.println("doc="+hits[i].doc+" score="+hits[i].score); continue; } Document doc = searcher.doc(hits[i].doc); String path = doc.get("path"); if (path != null) { System.out.println((i+1) + ". " + path); String title = doc.get("title"); if (title != null) { System.out.println(" Title: " + doc.get("title")); } } else { System.out.println((i+1) + ". " + "No path for this document"); } } if (!interactive) { break; } if (numTotalHits >= end) { boolean quit = false; while (true) { System.out.print("Press "); if (start - hitsPerPage >= 0) { System.out.print("(p)revious page, "); } if (start + hitsPerPage < numTotalHits) { System.out.print("(n)ext page, "); } System.out.println("(q)uit or enter number to jump to a page."); String line = in.readLine(); if (line.length() == 0 || line.charAt(0)=='q') { quit = true; break; } if (line.charAt(0) == 'p') { start = Math.max(0, start - hitsPerPage); break; } else if (line.charAt(0) == 'n') { if (start + hitsPerPage < numTotalHits) { start+=hitsPerPage; } break; } else { int page = Integer.parseInt(line); if ((page - 1) * hitsPerPage < numTotalHits) { start = (page - 1) * hitsPerPage; break; } else { System.out.println("No such page"); } } } if (quit) break; end = Math.min(numTotalHits, start + hitsPerPage); } } } } /*Multithreading Code*/ package org.apache.lucene.demo; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.util.Version; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.util.Date; public class IndexFiles2 extends Thread{ //String args[]; String path; File INDEX_D,d; private IndexFiles2(String s, File d) { path=s; INDEX_D=d; } static final File INDEX_DIR = new File("index"); static final File INDEX_DIR1 = new File("index1"); static final File INDEX_DIR2 = new File("index2"); static final File INDEX_DIR3 = new File("index3"); static final File INDEX_DIR4 = new File("index4"); /** Index all text files under a directory. */ public void run(){ //String args[] = arg1; //public static void main(String[] args) { /* String usage = "java org.apache.lucene.demo.IndexFiles"; if (args.length == 0) { System.err.println("Usage: " + usage); System.exit(1); } */ if (INDEX_D.exists()) { System.out.println("Cannot save index to '" +INDEX_D+ "' directory, please delete it first"); System.exit(1); } final File docDir = new File(path); if (!docDir.exists() || !docDir.canRead()) { System.out.println("Document directory '" +docDir.getAbsolutePath()+ "' does not exist or is not readable, please check the path"); System.exit(1); } Date start = new Date(); try { IndexWriter writer = new IndexWriter(FSDirectory.open(INDEX_D), new StandardAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); System.out.println("Indexing to directory '" +INDEX_D+ "'..."); indexDocs(writer, docDir); System.out.println("Optimizing..."); writer.optimize(); writer.close(); Date end = new Date(); System.out.println(end.getTime() - start.getTime() + " total milliseconds"); } catch (IOException e) { System.out.println(" caught a " + e.getClass() + "\n with message: " + e.getMessage()); } } public static void main(String args[]){ String s="E:/projecttrial/p"; String s1="E:/projecttrial/p1"; String s2="E:/projecttrial/p2"; String s3="E:/projecttrial/p3"; String s4="E:/projecttrial/p4"; IndexFiles2 a= new IndexFiles2(s,INDEX_DIR); IndexFiles2 a1= new IndexFiles2(s1,INDEX_DIR1); IndexFiles2 a2= new IndexFiles2(s2,INDEX_DIR2); IndexFiles2 a3= new IndexFiles2(s3,INDEX_DIR3); IndexFiles2 a4= new IndexFiles2(s4,INDEX_DIR4); // args1="C:/Users/dell/Desktop/files" a.start(); a1.start(); a2.start(); a3.start(); a4.start(); } static void indexDocs(IndexWriter writer, File file) throws IOException{ // do not try to index files that cannot be read if (file.canRead()) { if (file.isDirectory()) { String[] files = file.list(); // an IO error could occur if (files != null) { for (int i = 0; i < files.length; i++) { indexDocs(writer, new File(file, files[i])); } } } else { System.out.println("adding " + file); try { writer.addDocument(FileDocument.Document(file)); } // at least on windows, some temporary files raise this exception with an "access denied" message // checking if the file can be read doesn't help catch (FileNotFoundException fnfe) { ; } } } } }
本源码包内暂不包含可直接显示的源代码文件,请下载源码包。