Here are the examples of the java api class org.apache.lucene.index.IndexReader taken from open source projects.
1. TestTermRangeQuery#testInclusive()
Project: lucene-solr
File: TestTermRangeQuery.java
File: TestTermRangeQuery.java
public void testInclusive() throws Exception { Query query = TermRangeQuery.newStringRange("content", "A", "C", true, true); initializeIndex(new String[] { "A", "B", "C", "D" }); IndexReader reader = DirectoryReader.open(dir); IndexSearcher searcher = newSearcher(reader); ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs; assertEquals("A,B,C,D - A,B,C in range", 3, hits.length); reader.close(); initializeIndex(new String[] { "A", "B", "D" }); reader = DirectoryReader.open(dir); searcher = newSearcher(reader); hits = searcher.search(query, 1000).scoreDocs; assertEquals("A,B,D - A and B in range", 2, hits.length); reader.close(); addDoc("C"); reader = DirectoryReader.open(dir); searcher = newSearcher(reader); hits = searcher.search(query, 1000).scoreDocs; assertEquals("C added - A, B, C in range", 3, hits.length); reader.close(); }
2. TestTermRangeQuery#testExclusive()
Project: lucene-solr
File: TestTermRangeQuery.java
File: TestTermRangeQuery.java
public void testExclusive() throws Exception { Query query = TermRangeQuery.newStringRange("content", "A", "C", false, false); initializeIndex(new String[] { "A", "B", "C", "D" }); IndexReader reader = DirectoryReader.open(dir); IndexSearcher searcher = newSearcher(reader); ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs; assertEquals("A,B,C,D, only B in range", 1, hits.length); reader.close(); initializeIndex(new String[] { "A", "B", "D" }); reader = DirectoryReader.open(dir); searcher = newSearcher(reader); hits = searcher.search(query, 1000).scoreDocs; assertEquals("A,B,D, only B in range", 1, hits.length); reader.close(); addDoc("C"); reader = DirectoryReader.open(dir); searcher = newSearcher(reader); hits = searcher.search(query, 1000).scoreDocs; assertEquals("C added, still only B in range", 1, hits.length); reader.close(); }
3. MinDocQueryTests#testRandom()
Project: elasticsearch
File: MinDocQueryTests.java
File: MinDocQueryTests.java
public void testRandom() throws IOException { final int numDocs = randomIntBetween(10, 200); final Document doc = new Document(); final Directory dir = newDirectory(); final RandomIndexWriter w = new RandomIndexWriter(random(), dir); for (int i = 0; i < numDocs; ++i) { w.addDocument(doc); } final IndexReader reader = w.getReader(); final IndexSearcher searcher = newSearcher(reader); for (int i = 0; i <= numDocs; ++i) { assertEquals(numDocs - i, searcher.count(new MinDocQuery(i))); } w.close(); reader.close(); dir.close(); }
4. SearcherTaxonomyManager#refreshIfNeeded()
Project: lucene-solr
File: SearcherTaxonomyManager.java
File: SearcherTaxonomyManager.java
@Override protected SearcherAndTaxonomy refreshIfNeeded(SearcherAndTaxonomy ref) throws IOException { // Must re-open searcher first, otherwise we may get a // new reader that references ords not yet known to the // taxonomy reader: final IndexReader r = ref.searcher.getIndexReader(); final IndexReader newReader = DirectoryReader.openIfChanged((DirectoryReader) r); if (newReader == null) { return null; } else { DirectoryTaxonomyReader tr = TaxonomyReader.openIfChanged(ref.taxonomyReader); if (tr == null) { ref.taxonomyReader.incRef(); tr = ref.taxonomyReader; } else if (taxoWriter != null && taxoWriter.getTaxonomyEpoch() != taxoEpoch) { IOUtils.close(newReader, tr); throw new IllegalStateException("DirectoryTaxonomyWriter.replaceTaxonomy was called, which is not allowed when using SearcherTaxonomyManager"); } return new SearcherAndTaxonomy(SearcherManager.getSearcher(searcherFactory, newReader, r), tr); } }
5. TestWildcard#testQuestionmark()
Project: lucene-solr
File: TestWildcard.java
File: TestWildcard.java
/** * Tests Wildcard queries with a question mark. * * @throws IOException if an error occurs */ public void testQuestionmark() throws IOException { Directory indexStore = getIndexStore("body", new String[] { "metal", "metals", "mXtals", "mXtXls" }); IndexReader reader = DirectoryReader.open(indexStore); IndexSearcher searcher = newSearcher(reader); Query query1 = new WildcardQuery(new Term("body", "m?tal")); Query query2 = new WildcardQuery(new Term("body", "metal?")); Query query3 = new WildcardQuery(new Term("body", "metals?")); Query query4 = new WildcardQuery(new Term("body", "m?t?ls")); Query query5 = new WildcardQuery(new Term("body", "M?t?ls")); Query query6 = new WildcardQuery(new Term("body", "meta??")); assertMatches(searcher, query1, 1); assertMatches(searcher, query2, 1); assertMatches(searcher, query3, 0); assertMatches(searcher, query4, 3); assertMatches(searcher, query5, 0); // Query: 'meta??' matches 'metals' not 'metal' assertMatches(searcher, query6, 1); reader.close(); indexStore.close(); }
6. TestWildcard#testPrefixTerm()
Project: lucene-solr
File: TestWildcard.java
File: TestWildcard.java
/** * Tests if a WildcardQuery that has only a trailing * in the term is * rewritten to a single PrefixQuery. The boost and rewriteMethod should be * preserved. */ public void testPrefixTerm() throws IOException { Directory indexStore = getIndexStore("field", new String[] { "prefix", "prefixx" }); IndexReader reader = DirectoryReader.open(indexStore); IndexSearcher searcher = newSearcher(reader); MultiTermQuery wq = new WildcardQuery(new Term("field", "prefix*")); assertMatches(searcher, wq, 2); wq = new WildcardQuery(new Term("field", "*")); assertMatches(searcher, wq, 2); Terms terms = MultiFields.getTerms(searcher.getIndexReader(), "field"); assertFalse(wq.getTermsEnum(terms).getClass().getSimpleName().contains("AutomatonTermsEnum")); reader.close(); indexStore.close(); }
7. TestWildcard#testEmptyTerm()
Project: lucene-solr
File: TestWildcard.java
File: TestWildcard.java
/** * Tests if a WildcardQuery with an empty term is rewritten to an empty BooleanQuery */ public void testEmptyTerm() throws IOException { Directory indexStore = getIndexStore("field", new String[] { "nowildcard", "nowildcardx" }); IndexReader reader = DirectoryReader.open(indexStore); IndexSearcher searcher = newSearcher(reader); MultiTermQuery wq = new WildcardQuery(new Term("field", "")); wq.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE); assertMatches(searcher, wq, 0); Query q = searcher.rewrite(wq); assertTrue(q instanceof MatchNoDocsQuery); reader.close(); indexStore.close(); }
8. TestWildcard#testTermWithoutWildcard()
Project: lucene-solr
File: TestWildcard.java
File: TestWildcard.java
/** * Tests if a WildcardQuery that has no wildcard in the term is rewritten to a single * TermQuery. The boost should be preserved, and the rewrite should return * a ConstantScoreQuery if the WildcardQuery had a ConstantScore rewriteMethod. */ public void testTermWithoutWildcard() throws IOException { Directory indexStore = getIndexStore("field", new String[] { "nowildcard", "nowildcardx" }); IndexReader reader = DirectoryReader.open(indexStore); IndexSearcher searcher = newSearcher(reader); MultiTermQuery wq = new WildcardQuery(new Term("field", "nowildcard")); assertMatches(searcher, wq, 1); wq.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE); Query q = searcher.rewrite(wq); assertTrue(q instanceof TermQuery); wq.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE); q = searcher.rewrite(wq); assertTrue(q instanceof MultiTermQueryConstantScoreWrapper); wq.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE); q = searcher.rewrite(wq); assertTrue(q instanceof ConstantScoreQuery); reader.close(); indexStore.close(); }
9. TestTotalHitCountCollector#testBasics()
Project: lucene-solr
File: TestTotalHitCountCollector.java
File: TestTotalHitCountCollector.java
public void testBasics() throws Exception { Directory indexStore = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore); for (int i = 0; i < 5; i++) { Document doc = new Document(); doc.add(new StringField("string", "a" + i, Field.Store.NO)); doc.add(new StringField("string", "b" + i, Field.Store.NO)); writer.addDocument(doc); } IndexReader reader = writer.getReader(); writer.close(); IndexSearcher searcher = newSearcher(reader); TotalHitCountCollector c = new TotalHitCountCollector(); searcher.search(new MatchAllDocsQuery(), c); assertEquals(5, c.getTotalHits()); reader.close(); indexStore.close(); }
10. TestTermRangeQuery#testTopTermsRewrite()
Project: lucene-solr
File: TestTermRangeQuery.java
File: TestTermRangeQuery.java
/** This test should not be here, but it tests the fuzzy query rewrite mode (TOP_TERMS_SCORING_BOOLEAN_REWRITE) * with constant score and checks, that only the lower end of terms is put into the range */ public void testTopTermsRewrite() throws Exception { initializeIndex(new String[] { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K" }); IndexReader reader = DirectoryReader.open(dir); IndexSearcher searcher = newSearcher(reader); TermRangeQuery query = TermRangeQuery.newStringRange("content", "B", "J", true, true); checkBooleanTerms(searcher, query, "B", "C", "D", "E", "F", "G", "H", "I", "J"); final int savedClauseCount = BooleanQuery.getMaxClauseCount(); try { BooleanQuery.setMaxClauseCount(3); checkBooleanTerms(searcher, query, "B", "C", "D"); } finally { BooleanQuery.setMaxClauseCount(savedClauseCount); } reader.close(); }
11. TestTermRangeQuery#testAllDocs()
Project: lucene-solr
File: TestTermRangeQuery.java
File: TestTermRangeQuery.java
public void testAllDocs() throws Exception { initializeIndex(new String[] { "A", "B", "C", "D" }); IndexReader reader = DirectoryReader.open(dir); IndexSearcher searcher = newSearcher(reader); TermRangeQuery query = new TermRangeQuery("content", null, null, true, true); assertEquals(4, searcher.search(query, 1000).scoreDocs.length); query = TermRangeQuery.newStringRange("content", "", null, true, true); assertEquals(4, searcher.search(query, 1000).scoreDocs.length); query = TermRangeQuery.newStringRange("content", "", null, true, false); assertEquals(4, searcher.search(query, 1000).scoreDocs.length); // and now another one query = TermRangeQuery.newStringRange("content", "B", null, true, true); assertEquals(3, searcher.search(query, 1000).scoreDocs.length); reader.close(); }
12. TestSloppyPhraseQuery#testInfiniteFreq1()
Project: lucene-solr
File: TestSloppyPhraseQuery.java
File: TestSloppyPhraseQuery.java
// LUCENE-3215 public void testInfiniteFreq1() throws Exception { String document = "drug druggy drug drug drug"; Directory dir = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(newField("lyrics", document, new FieldType(TextField.TYPE_NOT_STORED))); iw.addDocument(doc); IndexReader ir = iw.getReader(); iw.close(); IndexSearcher is = newSearcher(ir); PhraseQuery.Builder builder = new PhraseQuery.Builder(); builder.add(new Term("lyrics", "drug"), 1); builder.add(new Term("lyrics", "drug"), 3); builder.setSlop(1); PhraseQuery pq = builder.build(); // "drug the drug"~1 assertSaneScoring(pq, is); ir.close(); dir.close(); }
13. TestSearcherManager#testEvilSearcherFactory()
Project: lucene-solr
File: TestSearcherManager.java
File: TestSearcherManager.java
public void testEvilSearcherFactory() throws Exception { final Random random = random(); final Directory dir = newDirectory(); final RandomIndexWriter w = new RandomIndexWriter(random, dir); w.commit(); final IndexReader other = DirectoryReader.open(dir); final SearcherFactory theEvilOne = new SearcherFactory() { @Override public IndexSearcher newSearcher(IndexReader ignored, IndexReader previous) { return LuceneTestCase.newSearcher(other); } }; expectThrows(IllegalStateException.class, () -> { new SearcherManager(dir, theEvilOne); }); expectThrows(IllegalStateException.class, () -> { new SearcherManager(w.w, random.nextBoolean(), false, theEvilOne); }); w.close(); other.close(); dir.close(); }
14. TestScoreCachingWrappingScorer#testGetScores()
Project: lucene-solr
File: TestScoreCachingWrappingScorer.java
File: TestScoreCachingWrappingScorer.java
public void testGetScores() throws Exception { Directory directory = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), directory); writer.commit(); IndexReader ir = writer.getReader(); writer.close(); IndexSearcher searcher = newSearcher(ir); Weight fake = new TermQuery(new Term("fake", "weight")).createWeight(searcher, true, 1f); Scorer s = new SimpleScorer(fake); ScoreCachingCollector scc = new ScoreCachingCollector(scores.length); scc.setScorer(s); // We need to iterate on the scorer so that its doc() advances. int doc; while ((doc = s.iterator().nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { scc.collect(doc); } for (int i = 0; i < scores.length; i++) { assertEquals(scores[i], scc.mscores[i], 0f); } ir.close(); directory.close(); }
15. TestPrefixQuery#testMatchAll()
Project: lucene-solr
File: TestPrefixQuery.java
File: TestPrefixQuery.java
public void testMatchAll() throws Exception { Directory directory = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), directory); Document doc = new Document(); doc.add(newStringField("field", "field", Field.Store.YES)); writer.addDocument(doc); IndexReader reader = writer.getReader(); PrefixQuery query = new PrefixQuery(new Term("field", "")); IndexSearcher searcher = newSearcher(reader); assertEquals(1, searcher.search(query, 1000).totalHits); writer.close(); reader.close(); directory.close(); }
16. TestPointQueries#testBasicMultiDimPointInSetQuery()
Project: lucene-solr
File: TestPointQueries.java
File: TestPointQueries.java
public void testBasicMultiDimPointInSetQuery() throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(); iwc.setCodec(getCodec()); IndexWriter w = new IndexWriter(dir, iwc); Document doc = new Document(); doc.add(new IntPoint("int", 17, 42)); w.addDocument(doc); IndexReader r = DirectoryReader.open(w); IndexSearcher s = newSearcher(r, false); assertEquals(0, s.count(newMultiDimIntSetQuery("int", 2, 17, 41))); assertEquals(1, s.count(newMultiDimIntSetQuery("int", 2, 17, 42))); assertEquals(1, s.count(newMultiDimIntSetQuery("int", 2, -7, -7, 17, 42))); assertEquals(1, s.count(newMultiDimIntSetQuery("int", 2, 17, 42, -14, -14))); w.close(); r.close(); dir.close(); }
17. TestPhraseQuery#testPhraseQueryWithStopAnalyzer()
Project: lucene-solr
File: TestPhraseQuery.java
File: TestPhraseQuery.java
public void testPhraseQueryWithStopAnalyzer() throws Exception { Directory directory = newDirectory(); Analyzer stopAnalyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET); RandomIndexWriter writer = new RandomIndexWriter(random(), directory, newIndexWriterConfig(stopAnalyzer)); Document doc = new Document(); doc.add(newTextField("field", "the stop words are here", Field.Store.YES)); writer.addDocument(doc); IndexReader reader = writer.getReader(); writer.close(); IndexSearcher searcher = newSearcher(reader); // valid exact phrase query PhraseQuery query = new PhraseQuery("field", "stop", "words"); ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs; assertEquals(1, hits.length); QueryUtils.check(random(), query, searcher); reader.close(); directory.close(); }
18. TestNot#testNot()
Project: lucene-solr
File: TestNot.java
File: TestNot.java
public void testNot() throws Exception { Directory store = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), store); Document d1 = new Document(); d1.add(newTextField("field", "a b", Field.Store.YES)); writer.addDocument(d1); IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); BooleanQuery.Builder query = new BooleanQuery.Builder(); query.add(new TermQuery(new Term("field", "a")), BooleanClause.Occur.SHOULD); query.add(new TermQuery(new Term("field", "b")), BooleanClause.Occur.MUST_NOT); ScoreDoc[] hits = searcher.search(query.build(), 1000).scoreDocs; assertEquals(0, hits.length); writer.close(); reader.close(); store.close(); }
19. TestMultiPhraseQuery#testNoDocs()
Project: lucene-solr
File: TestMultiPhraseQuery.java
File: TestMultiPhraseQuery.java
public void testNoDocs() throws Exception { Directory indexStore = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore); add("a note", "note", writer); IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); MultiPhraseQuery.Builder qb = new MultiPhraseQuery.Builder(); qb.add(new Term("body", "a")); qb.add(new Term[] { new Term("body", "nope"), new Term("body", "nope") }); MultiPhraseQuery q = qb.build(); assertEquals("Wrong number of hits", 0, searcher.search(q, 1).totalHits); // just make sure no exc: searcher.explain(q, 0); writer.close(); reader.close(); indexStore.close(); }
20. TestMultiPhraseQuery#testMultiExactWithRepeats()
Project: lucene-solr
File: TestMultiPhraseQuery.java
File: TestMultiPhraseQuery.java
public void testMultiExactWithRepeats() throws IOException { Directory indexStore = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore); add("a b c d e f g h i k", writer); IndexReader r = writer.getReader(); writer.close(); IndexSearcher searcher = newSearcher(r); MultiPhraseQuery.Builder qb = new MultiPhraseQuery.Builder(); qb.add(new Term[] { new Term("body", "a"), new Term("body", "d") }, 0); qb.add(new Term[] { new Term("body", "a"), new Term("body", "f") }, 2); // should match on "a b" assertEquals(1, searcher.search(qb.build(), 1).totalHits); r.close(); indexStore.close(); }
21. TestMultiPhraseQuery#testMultiSloppyWithRepeats()
Project: lucene-solr
File: TestMultiPhraseQuery.java
File: TestMultiPhraseQuery.java
//LUCENE-3821 fixes sloppy phrase scoring, except for this known problem @Ignore public void testMultiSloppyWithRepeats() throws IOException { Directory indexStore = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore); add("a b c d e f g h i k", writer); IndexReader r = writer.getReader(); writer.close(); IndexSearcher searcher = newSearcher(r); MultiPhraseQuery.Builder qb = new MultiPhraseQuery.Builder(); // this will fail, when the scorer would propagate [a] rather than [a,b], qb.add(new Term[] { new Term("body", "a"), new Term("body", "b") }); qb.add(new Term[] { new Term("body", "a") }); qb.setSlop(6); // should match on "a b" assertEquals(1, searcher.search(qb.build(), 1).totalHits); r.close(); indexStore.close(); }
22. TestMultiPhraseQuery#testTall()
Project: lucene-solr
File: TestMultiPhraseQuery.java
File: TestMultiPhraseQuery.java
// LUCENE-2580 public void testTall() throws IOException { Directory indexStore = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore); add("blueberry chocolate pie", writer); add("blueberry chocolate tart", writer); IndexReader r = writer.getReader(); writer.close(); IndexSearcher searcher = newSearcher(r); MultiPhraseQuery.Builder qb = new MultiPhraseQuery.Builder(); qb.add(new Term("body", "blueberry")); qb.add(new Term("body", "chocolate")); qb.add(new Term[] { new Term("body", "pie"), new Term("body", "tart") }); assertEquals(2, searcher.search(qb.build(), 1).totalHits); r.close(); indexStore.close(); }
23. TestLRUQueryCache#testRefuseToCacheTooLargeEntries()
Project: lucene-solr
File: TestLRUQueryCache.java
File: TestLRUQueryCache.java
public void testRefuseToCacheTooLargeEntries() throws IOException { Directory dir = newDirectory(); final RandomIndexWriter w = new RandomIndexWriter(random(), dir); for (int i = 0; i < 100; ++i) { w.addDocument(new Document()); } IndexReader reader = w.getReader(); // size of 1 byte final LRUQueryCache queryCache = new LRUQueryCache(1, 1, context -> random().nextBoolean()); final IndexSearcher searcher = newSearcher(reader); searcher.setQueryCache(queryCache); searcher.setQueryCachingPolicy(QueryCachingPolicy.ALWAYS_CACHE); searcher.count(new MatchAllDocsQuery()); assertEquals(0, queryCache.getCacheCount()); assertEquals(0, queryCache.getEvictionCount()); reader.close(); w.close(); dir.close(); }
24. TestFuzzyQuery#testBoostOnlyRewrite()
Project: lucene-solr
File: TestFuzzyQuery.java
File: TestFuzzyQuery.java
/** Test the TopTermsBoostOnlyBooleanQueryRewrite rewrite method. */ public void testBoostOnlyRewrite() throws Exception { Directory directory = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), directory); addDoc("Lucene", writer); addDoc("Lucene", writer); addDoc("Lucenne", writer); IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); writer.close(); FuzzyQuery query = new FuzzyQuery(new Term("field", "lucene")); query.setRewriteMethod(new MultiTermQuery.TopTermsBoostOnlyBooleanQueryRewrite(50)); ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs; assertEquals(3, hits.length); // normally, 'Lucenne' would be the first result as IDF will skew the score. assertEquals("Lucene", reader.document(hits[0].doc).get("field")); assertEquals("Lucene", reader.document(hits[1].doc).get("field")); assertEquals("Lucenne", reader.document(hits[2].doc).get("field")); reader.close(); directory.close(); }
25. TestFuzzyQuery#testBasicPrefix()
Project: lucene-solr
File: TestFuzzyQuery.java
File: TestFuzzyQuery.java
public void testBasicPrefix() throws Exception { Directory directory = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), directory); addDoc("abc", writer); IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); writer.close(); FuzzyQuery query = new FuzzyQuery(new Term("field", "abc"), FuzzyQuery.defaultMaxEdits, 1); ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs; assertEquals(1, hits.length); reader.close(); directory.close(); }
26. TestFieldValueQuery#testFieldExistsButNoDocsHaveField()
Project: lucene-solr
File: TestFieldValueQuery.java
File: TestFieldValueQuery.java
public void testFieldExistsButNoDocsHaveField() throws IOException { Directory dir = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), dir); // 1st segment has the field, but 2nd one does not Document doc = new Document(); doc.add(new NumericDocValuesField("f", 1)); iw.addDocument(doc); iw.commit(); iw.addDocument(new Document()); iw.commit(); final IndexReader reader = iw.getReader(); final IndexSearcher searcher = newSearcher(reader); iw.close(); assertEquals(1, searcher.search(new FieldValueQuery("f"), 1).totalHits); reader.close(); dir.close(); }
27. TestFieldValueQuery#testAllDocsHaveField()
Project: lucene-solr
File: TestFieldValueQuery.java
File: TestFieldValueQuery.java
public void testAllDocsHaveField() throws IOException { Directory dir = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(new NumericDocValuesField("f", 1)); iw.addDocument(doc); iw.commit(); final IndexReader reader = iw.getReader(); final IndexSearcher searcher = newSearcher(reader); iw.close(); assertEquals(1, searcher.search(new FieldValueQuery("f"), 1).totalHits); reader.close(); dir.close(); }
28. TestFieldValueQuery#testMissingField()
Project: lucene-solr
File: TestFieldValueQuery.java
File: TestFieldValueQuery.java
public void testMissingField() throws IOException { Directory dir = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), dir); iw.addDocument(new Document()); iw.commit(); final IndexReader reader = iw.getReader(); final IndexSearcher searcher = newSearcher(reader); iw.close(); assertEquals(0, searcher.search(new FieldValueQuery("f"), 1).totalHits); reader.close(); dir.close(); }
29. TestControlledRealTimeReopenThread#testEvilSearcherFactory()
Project: lucene-solr
File: TestControlledRealTimeReopenThread.java
File: TestControlledRealTimeReopenThread.java
public void testEvilSearcherFactory() throws Exception { final Directory dir = newDirectory(); final RandomIndexWriter w = new RandomIndexWriter(random(), dir); w.commit(); final IndexReader other = DirectoryReader.open(dir); final SearcherFactory theEvilOne = new SearcherFactory() { @Override public IndexSearcher newSearcher(IndexReader ignored, IndexReader previous) { return LuceneTestCase.newSearcher(other); } }; expectThrows(IllegalStateException.class, () -> { new SearcherManager(w.w, false, false, theEvilOne); }); w.close(); other.close(); dir.close(); }
30. TestBooleanScorer#testEmbeddedBooleanScorer()
Project: lucene-solr
File: TestBooleanScorer.java
File: TestBooleanScorer.java
/** Make sure BooleanScorer can embed another * BooleanScorer. */ public void testEmbeddedBooleanScorer() throws Exception { Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(newTextField("field", "doctors are people who prescribe medicines of which they know little, to cure diseases of which they know less, in human beings of whom they know nothing", Field.Store.NO)); w.addDocument(doc); IndexReader r = w.getReader(); w.close(); IndexSearcher s = new IndexSearcher(r); BooleanQuery.Builder q1 = new BooleanQuery.Builder(); q1.add(new TermQuery(new Term("field", "little")), BooleanClause.Occur.SHOULD); q1.add(new TermQuery(new Term("field", "diseases")), BooleanClause.Occur.SHOULD); BooleanQuery.Builder q2 = new BooleanQuery.Builder(); q2.add(q1.build(), BooleanClause.Occur.SHOULD); q2.add(new CrazyMustUseBulkScorerQuery(), BooleanClause.Occur.SHOULD); assertEquals(1, s.search(q2.build(), 10).totalHits); r.close(); dir.close(); }
31. TestBooleanQuery#testMinShouldMatchLeniency()
Project: lucene-solr
File: TestBooleanQuery.java
File: TestBooleanQuery.java
public void testMinShouldMatchLeniency() throws Exception { Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))); Document doc = new Document(); doc.add(newTextField("field", "a b c d", Field.Store.NO)); w.addDocument(doc); IndexReader r = DirectoryReader.open(w); IndexSearcher s = newSearcher(r); BooleanQuery.Builder bq = new BooleanQuery.Builder(); bq.add(new TermQuery(new Term("field", "a")), BooleanClause.Occur.SHOULD); bq.add(new TermQuery(new Term("field", "b")), BooleanClause.Occur.SHOULD); // No doc can match: BQ has only 2 clauses and we are asking for minShouldMatch=4 bq.setMinimumNumberShouldMatch(4); assertEquals(0, s.search(bq.build(), 1).totalHits); r.close(); w.close(); dir.close(); }
32. TestSpanTermQuery#testNoPositions()
Project: lucene-solr
File: TestSpanTermQuery.java
File: TestSpanTermQuery.java
public void testNoPositions() throws IOException { Directory dir = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(new StringField("foo", "bar", Field.Store.NO)); iw.addDocument(doc); IndexReader ir = iw.getReader(); iw.close(); IndexSearcher is = new IndexSearcher(ir); SpanTermQuery query = new SpanTermQuery(new Term("foo", "bar")); IllegalStateException expected = expectThrows(IllegalStateException.class, () -> { is.search(query, 5); }); assertTrue(expected.getMessage().contains("was indexed without position data")); ir.close(); dir.close(); }
33. TestSpanNotQuery#testNoPositions()
Project: lucene-solr
File: TestSpanNotQuery.java
File: TestSpanNotQuery.java
public void testNoPositions() throws IOException { Directory dir = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(new StringField("foo", "bar", Field.Store.NO)); iw.addDocument(doc); IndexReader ir = iw.getReader(); iw.close(); IndexSearcher is = new IndexSearcher(ir); SpanTermQuery query = new SpanTermQuery(new Term("foo", "bar")); SpanTermQuery query2 = new SpanTermQuery(new Term("foo", "baz")); IllegalStateException expected = expectThrows(IllegalStateException.class, () -> { is.search(new SpanNotQuery(query, query2), 5); }); assertTrue(expected.getMessage().contains("was indexed without position data")); ir.close(); dir.close(); }
34. TestSpanNearQuery#testNoPositions()
Project: lucene-solr
File: TestSpanNearQuery.java
File: TestSpanNearQuery.java
public void testNoPositions() throws IOException { Directory dir = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(new StringField("foo", "bar", Field.Store.NO)); iw.addDocument(doc); IndexReader ir = iw.getReader(); iw.close(); IndexSearcher is = new IndexSearcher(ir); SpanTermQuery query = new SpanTermQuery(new Term("foo", "bar")); SpanTermQuery query2 = new SpanTermQuery(new Term("foo", "baz")); IllegalStateException expected = expectThrows(IllegalStateException.class, () -> { is.search(new SpanNearQuery(new SpanQuery[] { query, query2 }, 10, true), 5); }); assertTrue(expected.getMessage().contains("was indexed without position data")); ir.close(); dir.close(); }
35. TestSimilarity2#testOmitTFAndNorms()
Project: lucene-solr
File: TestSimilarity2.java
File: TestSimilarity2.java
/** make sure all sims work if TF and norms is omitted */ public void testOmitTFAndNorms() throws Exception { Directory dir = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), dir); Document doc = new Document(); FieldType ft = new FieldType(TextField.TYPE_NOT_STORED); ft.setIndexOptions(IndexOptions.DOCS); ft.setOmitNorms(true); ft.freeze(); Field f = newField("foo", "bar", ft); doc.add(f); iw.addDocument(doc); IndexReader ir = iw.getReader(); iw.close(); IndexSearcher is = newSearcher(ir); for (Similarity sim : sims) { is.setSimilarity(sim); BooleanQuery.Builder query = new BooleanQuery.Builder(); query.add(new TermQuery(new Term("foo", "bar")), BooleanClause.Occur.SHOULD); assertEquals(1, is.search(query.build(), 10).totalHits); } ir.close(); dir.close(); }
36. TestSimilarity2#testOmitTF()
Project: lucene-solr
File: TestSimilarity2.java
File: TestSimilarity2.java
/** make sure all sims work if TF is omitted */ public void testOmitTF() throws Exception { Directory dir = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), dir); Document doc = new Document(); FieldType ft = new FieldType(TextField.TYPE_NOT_STORED); ft.setIndexOptions(IndexOptions.DOCS); ft.freeze(); Field f = newField("foo", "bar", ft); doc.add(f); iw.addDocument(doc); IndexReader ir = iw.getReader(); iw.close(); IndexSearcher is = newSearcher(ir); for (Similarity sim : sims) { is.setSimilarity(sim); BooleanQuery.Builder query = new BooleanQuery.Builder(); query.add(new TermQuery(new Term("foo", "bar")), BooleanClause.Occur.SHOULD); assertEquals(1, is.search(query.build(), 10).totalHits); } ir.close(); dir.close(); }
37. TestSimilarity2#testNoNorms()
Project: lucene-solr
File: TestSimilarity2.java
File: TestSimilarity2.java
/** make sure we can retrieve when norms are disabled */ public void testNoNorms() throws Exception { Directory dir = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), dir); Document doc = new Document(); FieldType ft = new FieldType(TextField.TYPE_NOT_STORED); ft.setOmitNorms(true); ft.freeze(); doc.add(newField("foo", "bar", ft)); iw.addDocument(doc); IndexReader ir = iw.getReader(); iw.close(); IndexSearcher is = newSearcher(ir); for (Similarity sim : sims) { is.setSimilarity(sim); BooleanQuery.Builder query = new BooleanQuery.Builder(); query.add(new TermQuery(new Term("foo", "bar")), BooleanClause.Occur.SHOULD); assertEquals(1, is.search(query.build(), 10).totalHits); } ir.close(); dir.close(); }
38. TestSimilarity2#testEmptyTerm()
Project: lucene-solr
File: TestSimilarity2.java
File: TestSimilarity2.java
/** similar to the above, however the field exists, but we query with a term that doesnt exist too */ public void testEmptyTerm() throws Exception { Directory dir = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(newTextField("foo", "bar", Field.Store.NO)); iw.addDocument(doc); IndexReader ir = iw.getReader(); iw.close(); IndexSearcher is = newSearcher(ir); for (Similarity sim : sims) { is.setSimilarity(sim); BooleanQuery.Builder query = new BooleanQuery.Builder(); query.add(new TermQuery(new Term("foo", "bar")), BooleanClause.Occur.SHOULD); query.add(new TermQuery(new Term("foo", "baz")), BooleanClause.Occur.SHOULD); assertEquals(1, is.search(query.build(), 10).totalHits); } ir.close(); dir.close(); }
39. TestSimilarity2#testEmptyField()
Project: lucene-solr
File: TestSimilarity2.java
File: TestSimilarity2.java
/** similar to the above, but ORs the query with a real field */ public void testEmptyField() throws Exception { Directory dir = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(newTextField("foo", "bar", Field.Store.NO)); iw.addDocument(doc); IndexReader ir = iw.getReader(); iw.close(); IndexSearcher is = newSearcher(ir); for (Similarity sim : sims) { is.setSimilarity(sim); BooleanQuery.Builder query = new BooleanQuery.Builder(); query.add(new TermQuery(new Term("foo", "bar")), BooleanClause.Occur.SHOULD); query.add(new TermQuery(new Term("bar", "baz")), BooleanClause.Occur.SHOULD); assertEquals(1, is.search(query.build(), 10).totalHits); } ir.close(); dir.close(); }
40. TestSimilarity2#testEmptyIndex()
Project: lucene-solr
File: TestSimilarity2.java
File: TestSimilarity2.java
/** because of stupid things like querynorm, it's possible we computeStats on a field that doesnt exist at all * test this against a totally empty index, to make sure sims handle it */ public void testEmptyIndex() throws Exception { Directory dir = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), dir); IndexReader ir = iw.getReader(); iw.close(); IndexSearcher is = newSearcher(ir); for (Similarity sim : sims) { is.setSimilarity(sim); assertEquals(0, is.search(new TermQuery(new Term("foo", "bar")), 10).totalHits); } ir.close(); dir.close(); }
41. TestField#testIndexedBinaryField()
Project: lucene-solr
File: TestField.java
File: TestField.java
public void testIndexedBinaryField() throws Exception { Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir); Document doc = new Document(); BytesRef br = new BytesRef(new byte[5]); Field field = new StringField("binary", br, Field.Store.YES); assertEquals(br, field.binaryValue()); doc.add(field); w.addDocument(doc); IndexReader r = w.getReader(); IndexSearcher s = newSearcher(r); TopDocs hits = s.search(new TermQuery(new Term("binary", br)), 1); assertEquals(1, hits.totalHits); Document storedDoc = s.doc(hits.scoreDocs[0].doc); assertEquals(br, storedDoc.getField("binary").binaryValue()); r.close(); w.close(); dir.close(); }
42. TestDocument#testPositionIncrementMultiFields()
Project: lucene-solr
File: TestDocument.java
File: TestDocument.java
public void testPositionIncrementMultiFields() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); writer.addDocument(makeDocumentWithFields()); IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); PhraseQuery query = new PhraseQuery("indexed_not_tokenized", "test1", "test2"); ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs; assertEquals(1, hits.length); doAssert(searcher.doc(hits[0].doc), true); writer.close(); reader.close(); dir.close(); }
43. TestDocument#testGetValuesForIndexedDocument()
Project: lucene-solr
File: TestDocument.java
File: TestDocument.java
/** * Tests {@link Document#getValues(String)} method for a Document retrieved * from an index. * * @throws Exception on error */ public void testGetValuesForIndexedDocument() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); writer.addDocument(makeDocumentWithFields()); IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); // search for something that does exist Query query = new TermQuery(new Term("keyword", "test1")); // ensure that queries return expected results without DateFilter first ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs; assertEquals(1, hits.length); doAssert(searcher.doc(hits[0].doc), true); writer.close(); reader.close(); dir.close(); }
44. TestPerFieldPostingsFormat2#assertQuery()
Project: lucene-solr
File: TestPerFieldPostingsFormat2.java
File: TestPerFieldPostingsFormat2.java
public void assertQuery(Term t, Directory dir, int num) throws IOException { if (VERBOSE) { System.out.println("\nTEST: assertQuery " + t); } IndexReader reader = DirectoryReader.open(dir); IndexSearcher searcher = newSearcher(reader); TopDocs search = searcher.search(new TermQuery(t), num + 10); assertEquals(num, search.totalHits); reader.close(); }
45. PrintReaderTask#doLogic()
Project: lucene-solr
File: PrintReaderTask.java
File: PrintReaderTask.java
@Override public int doLogic() throws Exception { Directory dir = getRunData().getDirectory(); IndexReader r = null; if (userData == null) r = DirectoryReader.open(dir); else r = DirectoryReader.open(OpenReaderTask.findIndexCommit(dir, userData)); System.out.println("--> numDocs:" + r.numDocs() + " dels:" + r.numDeletedDocs()); r.close(); return 1; }
46. OLuceneIndexManagerAbstract#size()
Project: orientdb
File: OLuceneIndexManagerAbstract.java
File: OLuceneIndexManagerAbstract.java
public long size(final ValuesTransformer<V> transformer) { IndexReader reader = null; IndexSearcher searcher = null; try { reader = getSearcher().getIndexReader(); } catch (IOException e) { OLogManager.instance().error(this, "Error on getting size of Lucene index", e); } finally { if (searcher != null) { release(searcher); } } return reader.numDocs(); }
47. TestBlockJoin#testAdvanceSingleParentSingleChild()
Project: lucene-solr
File: TestBlockJoin.java
File: TestBlockJoin.java
public void testAdvanceSingleParentSingleChild() throws Exception { Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir); Document childDoc = new Document(); childDoc.add(newStringField("child", "1", Field.Store.NO)); Document parentDoc = new Document(); parentDoc.add(newStringField("parent", "1", Field.Store.NO)); w.addDocuments(Arrays.asList(childDoc, parentDoc)); IndexReader r = w.getReader(); w.close(); IndexSearcher s = newSearcher(r); Query tq = new TermQuery(new Term("child", "1")); BitSetProducer parentFilter = new QueryBitSetProducer(new TermQuery(new Term("parent", "1"))); CheckJoinIndex.check(s.getIndexReader(), parentFilter); ToParentBlockJoinQuery q = new ToParentBlockJoinQuery(tq, parentFilter, ScoreMode.Avg); Weight weight = s.createNormalizedWeight(q, true); Scorer sc = weight.scorer(s.getIndexReader().leaves().get(0)); assertEquals(1, sc.iterator().advance(1)); r.close(); dir.close(); }
48. TestBlockJoin#testBoostBug()
Project: lucene-solr
File: TestBlockJoin.java
File: TestBlockJoin.java
public void testBoostBug() throws Exception { final Directory dir = newDirectory(); final RandomIndexWriter w = new RandomIndexWriter(random(), dir); IndexReader r = w.getReader(); w.close(); IndexSearcher s = newSearcher(r); ToParentBlockJoinQuery q = new ToParentBlockJoinQuery(new MatchNoDocsQuery(), new QueryBitSetProducer(new MatchAllDocsQuery()), ScoreMode.Avg); QueryUtils.check(random(), q, s); s.search(q, 10); BooleanQuery.Builder bqB = new BooleanQuery.Builder(); bqB.add(q, BooleanClause.Occur.MUST); BooleanQuery bq = bqB.build(); s.search(new BoostQuery(bq, 2f), 10); r.close(); dir.close(); }
49. HighlighterTest#searchIndex()
Project: lucene-solr
File: HighlighterTest.java
File: HighlighterTest.java
private void searchIndex() throws IOException, InvalidTokenOffsetsException { Query query = new TermQuery(new Term("t_text1", "random")); IndexReader reader = DirectoryReader.open(dir); IndexSearcher searcher = newSearcher(reader); // This scorer can return negative idf -> null fragment Scorer scorer = new QueryTermScorer(query, searcher.getIndexReader(), "t_text1"); // This scorer doesn't use idf (patch version) //Scorer scorer = new QueryTermScorer( query, "t_text1" ); Highlighter h = new Highlighter(scorer); TopDocs hits = searcher.search(query, 10); for (int i = 0; i < hits.totalHits; i++) { Document doc = searcher.doc(hits.scoreDocs[i].doc); String result = h.getBestFragment(a, "t_text1", doc.get("t_text1")); if (VERBOSE) System.out.println("result:" + result); assertEquals("more <B>random</B> words for second field", result); } reader.close(); }
50. TestDirectoryTaxonomyWriter#testCommit()
Project: lucene-solr
File: TestDirectoryTaxonomyWriter.java
File: TestDirectoryTaxonomyWriter.java
@Test public void testCommit() throws Exception { // Verifies that nothing is committed to the underlying Directory, if // commit() wasn't called. Directory dir = newDirectory(); DirectoryTaxonomyWriter ltw = new DirectoryTaxonomyWriter(dir, OpenMode.CREATE_OR_APPEND, NO_OP_CACHE); assertFalse(DirectoryReader.indexExists(dir)); // first commit, so that an index will be created ltw.commit(); ltw.addCategory(new FacetLabel("a")); IndexReader r = DirectoryReader.open(dir); assertEquals("No categories should have been committed to the underlying directory", 1, r.numDocs()); r.close(); ltw.close(); dir.close(); }
51. IndexVisualWordsTest#testIndexMissingFiles()
Project: LIRE
File: IndexVisualWordsTest.java
File: IndexVisualWordsTest.java
public void testIndexMissingFiles() throws IOException { // first delete some of the existing ones ... System.out.println("Deleting visual words from docs ..."); IndexReader ir = DirectoryReader.open(FSDirectory.open(new File(index))); IndexWriter iw = LuceneUtils.createIndexWriter(index, false); int maxDocs = ir.maxDoc(); for (int i = 0; i < maxDocs / 10; i++) { Document d = ir.document(i); // d.removeFields(DocumentBuilder.FIELD_NAME_SURF + DocumentBuilder.FIELD_NAME_BOVW); d.removeFields(DocumentBuilder.FIELD_NAME_SURF + DocumentBuilder.FIELD_NAME_BOVW); // d.removeFields(DocumentBuilder.FIELD_NAME_SURF_LOCAL_FEATURE_HISTOGRAM); d.removeFields(DocumentBuilder.FIELD_NAME_SURF + DocumentBuilder.FIELD_NAME_BOVW_VECTOR); // d.removeFields(DocumentBuilder.FIELD_NAME_SURF); iw.updateDocument(new Term(DocumentBuilder.FIELD_NAME_IDENTIFIER, d.getValues(DocumentBuilder.FIELD_NAME_IDENTIFIER)[0]), d); } System.out.println("# of deleted docs: " + maxDocs / 10); System.out.println("Optimizing and closing ..."); iw.close(); ir.close(); System.out.println("Creating new visual words ..."); BOVWBuilder surfFeatureHistogramBuilder = new BOVWBuilder(DirectoryReader.open(FSDirectory.open(new File(index))), new SurfFeature(), numSamples, clusters); // surfFeatureHistogramBuilder.indexMissing(); // System.out.println("Finished."); }
52. MockEngineSupport#newSearcher()
Project: elasticsearch
File: MockEngineSupport.java
File: MockEngineSupport.java
public AssertingIndexSearcher newSearcher(String source, IndexSearcher searcher, SearcherManager manager) throws EngineException { IndexReader reader = searcher.getIndexReader(); IndexReader wrappedReader = reader; assert reader != null; if (reader instanceof DirectoryReader && mockContext.wrapReader) { wrappedReader = wrapReader((DirectoryReader) reader); } // this executes basic query checks and asserts that weights are normalized only once etc. final AssertingIndexSearcher assertingIndexSearcher = new AssertingIndexSearcher(mockContext.random, wrappedReader); assertingIndexSearcher.setSimilarity(searcher.getSimilarity(true)); assertingIndexSearcher.setQueryCache(filterCache); assertingIndexSearcher.setQueryCachingPolicy(filterCachingPolicy); return assertingIndexSearcher; }
53. QueryUtils#wrapUnderlyingReader()
Project: lucene-solr
File: QueryUtils.java
File: QueryUtils.java
/** * Given an IndexSearcher, returns a new IndexSearcher whose IndexReader * is a MultiReader containing the Reader of the original IndexSearcher, * as well as several "empty" IndexReaders -- some of which will have * deleted documents in them. This new IndexSearcher should * behave exactly the same as the original IndexSearcher. * @param s the searcher to wrap * @param edge if negative, s will be the first sub; if 0, s will be in the middle, if positive s will be the last sub */ public static IndexSearcher wrapUnderlyingReader(Random random, final IndexSearcher s, final int edge) throws IOException { IndexReader r = s.getIndexReader(); // we can't put deleted docs before the nested reader, because // it will throw off the docIds IndexReader[] readers = new IndexReader[] { edge < 0 ? r : new MultiReader(), new MultiReader(), new FCInvisibleMultiReader(edge < 0 ? emptyReader(4) : new MultiReader(), new MultiReader(), 0 == edge ? r : new MultiReader()), 0 < edge ? new MultiReader() : emptyReader(7), new MultiReader(), new FCInvisibleMultiReader(0 < edge ? new MultiReader() : emptyReader(5), new MultiReader(), 0 < edge ? r : new MultiReader()) }; IndexSearcher out = LuceneTestCase.newSearcher(new FCInvisibleMultiReader(readers)); out.setSimilarity(s.getSimilarity(true)); return out; }
54. BaseGeoPointTestCase#testMultiPolygonBasics()
Project: lucene-solr
File: BaseGeoPointTestCase.java
File: BaseGeoPointTestCase.java
/** test we can search for a multi-polygon */ public void testMultiPolygonBasics() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); // add a doc with a point Document document = new Document(); addPointToDoc("field", document, 18.313694, -65.227444); writer.addDocument(document); // search and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); Polygon a = new Polygon(new double[] { 28, 28, 29, 29, 28 }, new double[] { -56, -55, -55, -56, -56 }); Polygon b = new Polygon(new double[] { 18, 18, 19, 19, 18 }, new double[] { -66, -65, -65, -66, -66 }); assertEquals(1, searcher.count(newPolygonQuery("field", a, b))); reader.close(); writer.close(); dir.close(); }
55. BaseGeoPointTestCase#testPolygonHoleExcludes()
Project: lucene-solr
File: BaseGeoPointTestCase.java
File: BaseGeoPointTestCase.java
/** test we can search for a polygon with a hole (that excludes the doc) */ public void testPolygonHoleExcludes() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); // add a doc with a point Document document = new Document(); addPointToDoc("field", document, 18.313694, -65.227444); writer.addDocument(document); // search and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); Polygon inner = new Polygon(new double[] { 18.2, 18.2, 18.4, 18.4, 18.2 }, new double[] { -65.3, -65.2, -65.2, -65.3, -65.3 }); Polygon outer = new Polygon(new double[] { 18, 18, 19, 19, 18 }, new double[] { -66, -65, -65, -66, -66 }, inner); assertEquals(0, searcher.count(newPolygonQuery("field", outer))); reader.close(); writer.close(); dir.close(); }
56. BaseGeoPointTestCase#testPolygonHole()
Project: lucene-solr
File: BaseGeoPointTestCase.java
File: BaseGeoPointTestCase.java
/** test we can search for a polygon with a hole (but still includes the doc) */ public void testPolygonHole() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); // add a doc with a point Document document = new Document(); addPointToDoc("field", document, 18.313694, -65.227444); writer.addDocument(document); // search and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); Polygon inner = new Polygon(new double[] { 18.5, 18.5, 18.7, 18.7, 18.5 }, new double[] { -65.7, -65.4, -65.4, -65.7, -65.7 }); Polygon outer = new Polygon(new double[] { 18, 18, 19, 19, 18 }, new double[] { -66, -65, -65, -66, -66 }, inner); assertEquals(1, searcher.count(newPolygonQuery("field", outer))); reader.close(); writer.close(); dir.close(); }
57. BaseGeoPointTestCase#testPolygonBasics()
Project: lucene-solr
File: BaseGeoPointTestCase.java
File: BaseGeoPointTestCase.java
/** test we can search for a polygon */ public void testPolygonBasics() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); // add a doc with a point Document document = new Document(); addPointToDoc("field", document, 18.313694, -65.227444); writer.addDocument(document); // search and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); assertEquals(1, searcher.count(newPolygonQuery("field", new Polygon(new double[] { 18, 18, 19, 19, 18 }, new double[] { -66, -65, -65, -66, -66 })))); reader.close(); writer.close(); dir.close(); }
58. BaseGeoPointTestCase#testDistanceBasics()
Project: lucene-solr
File: BaseGeoPointTestCase.java
File: BaseGeoPointTestCase.java
/** test we can search for a point */ public void testDistanceBasics() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); // add a doc with a location Document document = new Document(); addPointToDoc("field", document, 18.313694, -65.227444); writer.addDocument(document); // search within 50km and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); assertEquals(1, searcher.count(newDistanceQuery("field", 18, -65, 50_000))); reader.close(); writer.close(); dir.close(); }
59. BaseGeoPointTestCase#testBoxBasics()
Project: lucene-solr
File: BaseGeoPointTestCase.java
File: BaseGeoPointTestCase.java
/** Add a single point and search for it in a box */ // NOTE: we don't currently supply an exact search, only ranges, because of the lossiness... public void testBoxBasics() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); // add a doc with a point Document document = new Document(); addPointToDoc("field", document, 18.313694, -65.227444); writer.addDocument(document); // search and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); assertEquals(1, searcher.count(newRectQuery("field", 18, 19, -66, -65))); reader.close(); writer.close(); dir.close(); }
60. TestSpellChecker#testBogusField()
Project: lucene-solr
File: TestSpellChecker.java
File: TestSpellChecker.java
public void testBogusField() throws Exception { IndexReader r = DirectoryReader.open(userindex); Directory compIdx = newDirectory(); SpellChecker compareSP = new SpellCheckerMock(compIdx, new LevensteinDistance(), new SuggestWordFrequencyComparator()); addwords(r, compareSP, "field3"); String[] similar = compareSP.suggestSimilar("fvie", 2, r, "bogusFieldBogusField", SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); assertEquals(0, similar.length); r.close(); if (!compareSP.isClosed()) compareSP.close(); compIdx.close(); }
61. TestSpellChecker#testComparator()
Project: lucene-solr
File: TestSpellChecker.java
File: TestSpellChecker.java
public void testComparator() throws Exception { IndexReader r = DirectoryReader.open(userindex); Directory compIdx = newDirectory(); SpellChecker compareSP = new SpellCheckerMock(compIdx, new LevensteinDistance(), new SuggestWordFrequencyComparator()); addwords(r, compareSP, "field3"); String[] similar = compareSP.suggestSimilar("fvie", 2, r, "field3", SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); assertTrue(similar.length == 2); //five and fvei have the same score, but different frequencies. assertEquals("fvei", similar[0]); assertEquals("five", similar[1]); r.close(); if (!compareSP.isClosed()) compareSP.close(); compIdx.close(); }
62. TestGeo3DPoint#testBasic()
Project: lucene-solr
File: TestGeo3DPoint.java
File: TestGeo3DPoint.java
public void testBasic() throws Exception { Directory dir = getDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(); iwc.setCodec(getCodec()); IndexWriter w = new IndexWriter(dir, iwc); Document doc = new Document(); doc.add(new Geo3DPoint("field", 50.7345267, -97.5303555)); w.addDocument(doc); IndexReader r = DirectoryReader.open(w); // We can't wrap with "exotic" readers because the query must see the BKD3DDVFormat: IndexSearcher s = newSearcher(r, false); assertEquals(1, s.search(Geo3DPoint.newShapeQuery("field", GeoCircleFactory.makeGeoCircle(PlanetModel.WGS84, toRadians(50), toRadians(-97), Math.PI / 180.)), 1).totalHits); w.close(); r.close(); dir.close(); }
63. TestGeoPointQuery#testInvalidShift()
Project: lucene-solr
File: TestGeoPointQuery.java
File: TestGeoPointQuery.java
/** explicit test failure for LUCENE-7325 */ public void testInvalidShift() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); // add a doc with a point Document document = new Document(); addPointToDoc("field", document, 80, -65); writer.addDocument(document); // search and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); assertEquals(0, searcher.count(newRectQuery("field", 90, 90, -180, 0))); reader.close(); writer.close(); dir.close(); }
64. TestTermAutomatonQuery#testOneTermDoesNotExist()
Project: lucene-solr
File: TestTermAutomatonQuery.java
File: TestTermAutomatonQuery.java
public void testOneTermDoesNotExist() throws Exception { Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(newTextField("field", "x y z", Field.Store.NO)); w.addDocument(doc); IndexReader r = w.getReader(); IndexSearcher s = newSearcher(r); TokenStream ts = new CannedTokenStream(new Token[] { token("a", 1, 1), token("x", 1, 1) }); TermAutomatonQuery q = new TokenStreamToTermAutomatonQuery().toQuery("field", ts); // System.out.println("DOT: " + q.toDot()); assertEquals(0, s.search(q, 1).totalHits); w.close(); r.close(); dir.close(); }
65. TestTermAutomatonQuery#testTermDoesNotExist()
Project: lucene-solr
File: TestTermAutomatonQuery.java
File: TestTermAutomatonQuery.java
public void testTermDoesNotExist() throws Exception { Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(newTextField("field", "x y z", Field.Store.NO)); w.addDocument(doc); IndexReader r = w.getReader(); IndexSearcher s = newSearcher(r); TokenStream ts = new CannedTokenStream(new Token[] { token("a", 1, 1) }); TermAutomatonQuery q = new TokenStreamToTermAutomatonQuery().toQuery("field", ts); // System.out.println("DOT: " + q.toDot()); assertEquals(0, s.search(q, 1).totalHits); w.close(); r.close(); dir.close(); }
66. TestSlowFuzzyQuery#testBoostOnlyRewrite()
Project: lucene-solr
File: TestSlowFuzzyQuery.java
File: TestSlowFuzzyQuery.java
/** Test the TopTermsBoostOnlyBooleanQueryRewrite rewrite method. */ public void testBoostOnlyRewrite() throws Exception { Directory directory = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), directory); addDoc("Lucene", writer); addDoc("Lucene", writer); addDoc("Lucenne", writer); IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); writer.close(); SlowFuzzyQuery query = new SlowFuzzyQuery(new Term("field", "lucene")); query.setRewriteMethod(new MultiTermQuery.TopTermsBoostOnlyBooleanQueryRewrite(50)); ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs; assertEquals(3, hits.length); // normally, 'Lucenne' would be the first result as IDF will skew the score. assertEquals("Lucene", reader.document(hits[0].doc).get("field")); assertEquals("Lucene", reader.document(hits[1].doc).get("field")); assertEquals("Lucenne", reader.document(hits[2].doc).get("field")); reader.close(); directory.close(); }
67. TestSlowFuzzyQuery#testFuzzinessLong2()
Project: lucene-solr
File: TestSlowFuzzyQuery.java
File: TestSlowFuzzyQuery.java
public void testFuzzinessLong2() throws Exception { //Lucene-5033 Directory directory = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), directory); addDoc("abcdef", writer); addDoc("segment", writer); IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); writer.close(); SlowFuzzyQuery query; query = new SlowFuzzyQuery(new Term("field", "abcxxxx"), 3f, 0); ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs; assertEquals(0, hits.length); query = new SlowFuzzyQuery(new Term("field", "abcxxxx"), 4f, 0); hits = searcher.search(query, 1000).scoreDocs; assertEquals(1, hits.length); reader.close(); directory.close(); }
68. TestPayloadSpanUtil#testPayloadSpanUtil()
Project: lucene-solr
File: TestPayloadSpanUtil.java
File: TestPayloadSpanUtil.java
public void testPayloadSpanUtil() throws Exception { Directory directory = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), directory, newIndexWriterConfig(new PayloadAnalyzer()).setSimilarity(new ClassicSimilarity())); Document doc = new Document(); doc.add(newTextField(FIELD, "xx rr yy mm pp", Field.Store.YES)); writer.addDocument(doc); IndexReader reader = writer.getReader(); writer.close(); IndexSearcher searcher = newSearcher(reader); PayloadSpanUtil psu = new PayloadSpanUtil(searcher.getTopReaderContext()); Collection<byte[]> payloads = psu.getPayloadsForQuery(new TermQuery(new Term(FIELD, "rr"))); if (VERBOSE) { System.out.println("Num payloads:" + payloads.size()); for (final byte[] bytes : payloads) { System.out.println(new String(bytes, StandardCharsets.UTF_8)); } } reader.close(); directory.close(); }
69. TestInetAddressPoint#testBasicsV6()
Project: lucene-solr
File: TestInetAddressPoint.java
File: TestInetAddressPoint.java
/** Add a single address and search for it */ public void testBasicsV6() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); // add a doc with an address Document document = new Document(); InetAddress address = InetAddress.getByName("fec0::f66d"); document.add(new InetAddressPoint("field", address)); writer.addDocument(document); // search and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); assertEquals(1, searcher.count(InetAddressPoint.newExactQuery("field", address))); assertEquals(1, searcher.count(InetAddressPoint.newPrefixQuery("field", address, 64))); assertEquals(1, searcher.count(InetAddressPoint.newRangeQuery("field", InetAddress.getByName("fec0::f66c"), InetAddress.getByName("fec0::f66e")))); reader.close(); writer.close(); dir.close(); }
70. TestInetAddressPoint#testBasics()
Project: lucene-solr
File: TestInetAddressPoint.java
File: TestInetAddressPoint.java
/** Add a single address and search for it */ public void testBasics() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); // add a doc with an address Document document = new Document(); InetAddress address = InetAddress.getByName("1.2.3.4"); document.add(new InetAddressPoint("field", address)); writer.addDocument(document); // search and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); assertEquals(1, searcher.count(InetAddressPoint.newExactQuery("field", address))); assertEquals(1, searcher.count(InetAddressPoint.newPrefixQuery("field", address, 24))); assertEquals(1, searcher.count(InetAddressPoint.newRangeQuery("field", InetAddress.getByName("1.2.3.3"), InetAddress.getByName("1.2.3.5")))); assertEquals(1, searcher.count(InetAddressPoint.newSetQuery("field", InetAddress.getByName("1.2.3.4")))); assertEquals(1, searcher.count(InetAddressPoint.newSetQuery("field", InetAddress.getByName("1.2.3.4"), InetAddress.getByName("1.2.3.5")))); assertEquals(0, searcher.count(InetAddressPoint.newSetQuery("field", InetAddress.getByName("1.2.3.3")))); assertEquals(0, searcher.count(InetAddressPoint.newSetQuery("field"))); reader.close(); writer.close(); dir.close(); }
71. TestHalfFloatPoint#testBasicsMultiDims()
Project: lucene-solr
File: TestHalfFloatPoint.java
File: TestHalfFloatPoint.java
/** Add a single multi-dimensional value and search for it */ public void testBasicsMultiDims() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); // add a doc with two dimensions Document document = new Document(); document.add(new HalfFloatPoint("field", 1.25f, -2f)); writer.addDocument(document); // search and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); assertEquals(1, searcher.count(HalfFloatPoint.newRangeQuery("field", new float[] { 0, -5 }, new float[] { 1.25f, -1 }))); assertEquals(0, searcher.count(HalfFloatPoint.newRangeQuery("field", new float[] { 0, 0 }, new float[] { 2, 2 }))); assertEquals(0, searcher.count(HalfFloatPoint.newRangeQuery("field", new float[] { -10, -10 }, new float[] { 1, 2 }))); reader.close(); writer.close(); dir.close(); }
72. TestBigIntegerPoint#testNegative()
Project: lucene-solr
File: TestBigIntegerPoint.java
File: TestBigIntegerPoint.java
/** Add a negative 1D point and search for it */ public void testNegative() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); // add a doc with a large biginteger value Document document = new Document(); BigInteger negative = BigInteger.valueOf(Long.MAX_VALUE).multiply(BigInteger.valueOf(64)).negate(); document.add(new BigIntegerPoint("field", negative)); writer.addDocument(document); // search and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); assertEquals(1, searcher.count(BigIntegerPoint.newExactQuery("field", negative))); assertEquals(1, searcher.count(BigIntegerPoint.newRangeQuery("field", negative.subtract(BigInteger.ONE), negative.add(BigInteger.ONE)))); reader.close(); writer.close(); dir.close(); }
73. TestBigIntegerPoint#testBasics()
Project: lucene-solr
File: TestBigIntegerPoint.java
File: TestBigIntegerPoint.java
/** Add a single 1D point and search for it */ public void testBasics() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); // add a doc with a large biginteger value Document document = new Document(); BigInteger large = BigInteger.valueOf(Long.MAX_VALUE).multiply(BigInteger.valueOf(64)); document.add(new BigIntegerPoint("field", large)); writer.addDocument(document); // search and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); assertEquals(1, searcher.count(BigIntegerPoint.newExactQuery("field", large))); assertEquals(1, searcher.count(BigIntegerPoint.newRangeQuery("field", large.subtract(BigInteger.ONE), large.add(BigInteger.ONE)))); assertEquals(1, searcher.count(BigIntegerPoint.newSetQuery("field", large))); assertEquals(0, searcher.count(BigIntegerPoint.newSetQuery("field", large.subtract(BigInteger.ONE)))); assertEquals(0, searcher.count(BigIntegerPoint.newSetQuery("field"))); reader.close(); writer.close(); dir.close(); }
74. TestIDVersionPostingsFormat#testBasic()
Project: lucene-solr
File: TestIDVersionPostingsFormat.java
File: TestIDVersionPostingsFormat.java
public void testBasic() throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random())); iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat())); RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); Document doc = new Document(); doc.add(makeIDField("id0", 100)); w.addDocument(doc); doc = new Document(); doc.add(makeIDField("id1", 110)); w.addDocument(doc); IndexReader r = w.getReader(); IDVersionSegmentTermsEnum termsEnum = (IDVersionSegmentTermsEnum) r.leaves().get(0).reader().fields().terms("id").iterator(); assertTrue(termsEnum.seekExact(new BytesRef("id0"), 50)); assertTrue(termsEnum.seekExact(new BytesRef("id0"), 100)); assertFalse(termsEnum.seekExact(new BytesRef("id0"), 101)); assertTrue(termsEnum.seekExact(new BytesRef("id1"), 50)); assertTrue(termsEnum.seekExact(new BytesRef("id1"), 110)); assertFalse(termsEnum.seekExact(new BytesRef("id1"), 111)); r.close(); w.close(); dir.close(); }
75. QueryParserTestBase#testPositionIncrements()
Project: lucene-solr
File: QueryParserTestBase.java
File: QueryParserTestBase.java
// LUCENE-2002: make sure defaults for StandardAnalyzer's // enableStopPositionIncr & QueryParser's enablePosIncr // "match" public void testPositionIncrements() throws Exception { Directory dir = newDirectory(); Analyzer a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(a)); Document doc = new Document(); doc.add(newTextField("field", "the wizard of ozzy", Field.Store.NO)); w.addDocument(doc); IndexReader r = DirectoryReader.open(w); w.close(); IndexSearcher s = newSearcher(r); Query q = getQuery("\"wizard of ozzy\"", a); assertEquals(1, s.search(q, 1).totalHits); r.close(); dir.close(); }
76. TestQPHelper#testMultiPhraseQuery()
Project: lucene-solr
File: TestQPHelper.java
File: TestQPHelper.java
public void testMultiPhraseQuery() throws Exception { Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new CannedAnalyzer())); Document doc = new Document(); doc.add(newTextField("field", "", Field.Store.NO)); w.addDocument(doc); IndexReader r = DirectoryReader.open(w); IndexSearcher s = newSearcher(r); Query q = new StandardQueryParser(new CannedAnalyzer()).parse("\"a\"", "field"); assertTrue(q instanceof MultiPhraseQuery); assertEquals(1, s.search(q, 10).totalHits); r.close(); w.close(); dir.close(); }
77. TestMultiFieldQPHelper#testStopWordSearching()
Project: lucene-solr
File: TestMultiFieldQPHelper.java
File: TestMultiFieldQPHelper.java
public void testStopWordSearching() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); Directory ramDir = newDirectory(); IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(analyzer)); Document doc = new Document(); doc.add(newTextField("body", "blah the footest blah", Field.Store.NO)); iw.addDocument(doc); iw.close(); StandardQueryParser mfqp = new StandardQueryParser(); mfqp.setMultiFields(new String[] { "body" }); mfqp.setAnalyzer(analyzer); mfqp.setDefaultOperator(StandardQueryConfigHandler.Operator.AND); Query q = mfqp.parse("the footest", null); IndexReader ir = DirectoryReader.open(ramDir); IndexSearcher is = newSearcher(ir); ScoreDoc[] hits = is.search(q, 1000).scoreDocs; assertEquals(1, hits.length); ir.close(); ramDir.close(); }
78. TestMultiFieldQueryParser#testStopWordSearching()
Project: lucene-solr
File: TestMultiFieldQueryParser.java
File: TestMultiFieldQueryParser.java
public void testStopWordSearching() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); Directory ramDir = newDirectory(); IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(analyzer)); Document doc = new Document(); doc.add(newTextField("body", "blah the footest blah", Field.Store.NO)); iw.addDocument(doc); iw.close(); MultiFieldQueryParser mfqp = new MultiFieldQueryParser(new String[] { "body" }, analyzer); mfqp.setDefaultOperator(QueryParser.Operator.AND); Query q = mfqp.parse("the footest"); IndexReader ir = DirectoryReader.open(ramDir); IndexSearcher is = newSearcher(ir); ScoreDoc[] hits = is.search(q, 1000).scoreDocs; assertEquals(1, hits.length); ir.close(); ramDir.close(); }
79. TestCustomScoreQuery#testRewrite()
Project: lucene-solr
File: TestCustomScoreQuery.java
File: TestCustomScoreQuery.java
@Test public void testRewrite() throws Exception { IndexReader r = DirectoryReader.open(dir); final IndexSearcher s = newSearcher(r); Query q = new TermQuery(new Term(TEXT_FIELD, "first")); CustomScoreQuery original = new CustomScoreQuery(q); CustomScoreQuery rewritten = (CustomScoreQuery) original.rewrite(s.getIndexReader()); assertTrue("rewritten query should be identical, as TermQuery does not rewrite", original == rewritten); assertTrue("no hits for query", s.search(rewritten, 1).totalHits > 0); assertEquals(s.search(q, 1).totalHits, s.search(rewritten, 1).totalHits); // everything q = new TermRangeQuery(TEXT_FIELD, null, null, true, true); original = new CustomScoreQuery(q); rewritten = (CustomScoreQuery) original.rewrite(s.getIndexReader()); assertTrue("rewritten query should not be identical, as TermRangeQuery rewrites", original != rewritten); assertTrue("no hits for query", s.search(rewritten, 1).totalHits > 0); assertEquals(s.search(q, 1).totalHits, s.search(original, 1).totalHits); assertEquals(s.search(q, 1).totalHits, s.search(rewritten, 1).totalHits); r.close(); }
80. TestCustomScoreQuery#testCustomExternalQuery()
Project: lucene-solr
File: TestCustomScoreQuery.java
File: TestCustomScoreQuery.java
@Test public void testCustomExternalQuery() throws Exception { BooleanQuery.Builder q1 = new BooleanQuery.Builder(); q1.add(new TermQuery(new Term(TEXT_FIELD, "first")), BooleanClause.Occur.SHOULD); q1.add(new TermQuery(new Term(TEXT_FIELD, "aid")), BooleanClause.Occur.SHOULD); q1.add(new TermQuery(new Term(TEXT_FIELD, "text")), BooleanClause.Occur.SHOULD); final Query q = new CustomExternalQuery(q1.build()); log(q); IndexReader r = DirectoryReader.open(dir); IndexSearcher s = newSearcher(r); TopDocs hits = s.search(q, 1000); assertEquals(N_DOCS, hits.totalHits); for (int i = 0; i < N_DOCS; i++) { final int doc = hits.scoreDocs[i].doc; final float score = hits.scoreDocs[i].score; assertEquals("doc=" + doc, (float) 1 + (4 * doc) % N_DOCS, score, 0.0001); } r.close(); }
81. TestPayloadSpans#testSpanNot()
Project: lucene-solr
File: TestPayloadSpans.java
File: TestPayloadSpans.java
public void testSpanNot() throws Exception { SpanQuery[] clauses = new SpanQuery[2]; clauses[0] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "one")); clauses[1] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "three")); SpanQuery spq = new SpanNearQuery(clauses, 5, true); SpanNotQuery snq = new SpanNotQuery(spq, new SpanTermQuery(new Term(PayloadHelper.FIELD, "two"))); Directory directory = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), directory, newIndexWriterConfig(new PayloadAnalyzer()).setSimilarity(similarity)); Document doc = new Document(); doc.add(newTextField(PayloadHelper.FIELD, "one two three one four three", Field.Store.YES)); writer.addDocument(doc); IndexReader reader = getOnlyLeafReader(writer.getReader()); writer.close(); checkSpans(snq.createWeight(newSearcher(reader, false), false, 1f).getSpans(reader.leaves().get(0), SpanWeight.Postings.PAYLOADS), 1, new int[] { 2 }); reader.close(); directory.close(); }
82. TestFieldScoreQuery#doTestExactScore()
Project: lucene-solr
File: TestFieldScoreQuery.java
File: TestFieldScoreQuery.java
// Test that FieldScoreQuery returns docs with expected score. private void doTestExactScore(ValueSource valueSource) throws Exception { Query functionQuery = getFunctionQuery(valueSource); IndexReader r = DirectoryReader.open(dir); IndexSearcher s = newSearcher(r); TopDocs td = s.search(functionQuery, 1000); assertEquals("All docs should be matched!", N_DOCS, td.totalHits); ScoreDoc sd[] = td.scoreDocs; for (ScoreDoc aSd : sd) { float score = aSd.score; log(s.explain(functionQuery, aSd.doc)); String id = s.getIndexReader().document(aSd.doc).get(ID_FIELD); // "ID7" --> 7.0 float expectedScore = expectedFieldScore(id); assertEquals("score of " + id + " shuould be " + expectedScore + " != " + score, expectedScore, score, TEST_SCORE_TOLERANCE_DELTA); } r.close(); }
83. TestFieldScoreQuery#doTestRank()
Project: lucene-solr
File: TestFieldScoreQuery.java
File: TestFieldScoreQuery.java
// Test that FieldScoreQuery returns docs in expected order. private void doTestRank(ValueSource valueSource) throws Exception { Query functionQuery = getFunctionQuery(valueSource); IndexReader r = DirectoryReader.open(dir); IndexSearcher s = newSearcher(r); log("test: " + functionQuery); QueryUtils.check(random(), functionQuery, s); ScoreDoc[] h = s.search(functionQuery, 1000).scoreDocs; assertEquals("All docs should be matched!", N_DOCS, h.length); // greater than all ids of docs in this test String prevID = "ID" + (N_DOCS + 1); for (int i = 0; i < h.length; i++) { String resID = s.doc(h[i].doc).get(ID_FIELD); log(i + ". score=" + h[i].score + " - " + resID); log(s.explain(functionQuery, h[i].doc)); assertTrue("res id " + resID + " should be < prev res id " + prevID, resID.compareTo(prevID) < 0); prevID = resID; } r.close(); }
84. TestMemoryIndexAgainstRAMDir#assertAllQueries()
Project: lucene-solr
File: TestMemoryIndexAgainstRAMDir.java
File: TestMemoryIndexAgainstRAMDir.java
/** * Run all queries against both the RAMDirectory and MemoryIndex, ensuring they are the same. */ public void assertAllQueries(MemoryIndex memory, Directory ramdir, Analyzer analyzer) throws Exception { IndexReader reader = DirectoryReader.open(ramdir); IndexSearcher ram = newSearcher(reader); IndexSearcher mem = memory.createSearcher(); QueryParser qp = new QueryParser("foo", analyzer); for (String query : queries) { TopDocs ramDocs = ram.search(qp.parse(query), 1); TopDocs memDocs = mem.search(qp.parse(query), 1); assertEquals(query, ramDocs.totalHits, memDocs.totalHits); } reader.close(); }
85. TestJoinUtil#testRewrite()
Project: lucene-solr
File: TestJoinUtil.java
File: TestJoinUtil.java
public void testRewrite() throws IOException { Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(new SortedDocValuesField("join_field", new BytesRef("abc"))); w.addDocument(doc); doc = new Document(); doc.add(new SortedDocValuesField("join_field", new BytesRef("abd"))); w.addDocument(doc); IndexReader reader = w.getReader(); IndexSearcher searcher = newSearcher(reader); OrdinalMap ordMap = OrdinalMap.build(null, new SortedDocValues[0], 0f); Query joinQuery = JoinUtil.createJoinQuery("join_field", new MatchNoDocsQuery(), new MatchNoDocsQuery(), searcher, RandomPicks.randomFrom(random(), ScoreMode.values()), ordMap, 0, Integer.MAX_VALUE); // no exception due to missing rewrites searcher.search(joinQuery, 1); reader.close(); w.close(); dir.close(); }
86. TestLegacyFieldCache#testEmptyIndex()
Project: lucene-solr
File: TestLegacyFieldCache.java
File: TestLegacyFieldCache.java
public void testEmptyIndex() throws Exception { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())).setMaxBufferedDocs(500)); writer.close(); IndexReader r = DirectoryReader.open(dir); LeafReader reader = SlowCompositeReaderWrapper.wrap(r); TestUtil.checkReader(reader); FieldCache.DEFAULT.getTerms(reader, "foobar", true); FieldCache.DEFAULT.getTermsIndex(reader, "foobar"); FieldCache.DEFAULT.purgeByCacheKey(reader.getCoreCacheKey()); r.close(); dir.close(); }
87. TestFieldCacheSort#testSortOneDocumentWithScores()
Project: lucene-solr
File: TestFieldCacheSort.java
File: TestFieldCacheSort.java
/** Tests sorting a single document with scores */ public void testSortOneDocumentWithScores() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(newStringField("value", "foo", Field.Store.YES)); writer.addDocument(doc); IndexReader ir = UninvertingReader.wrap(writer.getReader(), Collections.singletonMap("value", Type.SORTED)); writer.close(); IndexSearcher searcher = newSearcher(ir); Sort sort = new Sort(new SortField("value", SortField.Type.STRING)); TopDocs expected = searcher.search(new TermQuery(new Term("value", "foo")), 10); assertEquals(1, expected.totalHits); TopDocs actual = searcher.search(new TermQuery(new Term("value", "foo")), 10, sort, true, true); assertEquals(expected.totalHits, actual.totalHits); assertEquals(expected.scoreDocs[0].score, actual.scoreDocs[0].score, 0F); TestUtil.checkReader(ir); ir.close(); dir.close(); }
88. TestFieldCacheSort#testSortOneDocument()
Project: lucene-solr
File: TestFieldCacheSort.java
File: TestFieldCacheSort.java
/** Tests sorting a single document */ public void testSortOneDocument() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(newStringField("value", "foo", Field.Store.YES)); writer.addDocument(doc); IndexReader ir = UninvertingReader.wrap(writer.getReader(), Collections.singletonMap("value", Type.SORTED)); writer.close(); IndexSearcher searcher = newSearcher(ir); Sort sort = new Sort(new SortField("value", SortField.Type.STRING)); TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(1, td.totalHits); assertEquals("foo", searcher.doc(td.scoreDocs[0].doc).get("value")); TestUtil.checkReader(ir); ir.close(); dir.close(); }
89. TestFieldCacheSort#testMultiValuedField()
Project: lucene-solr
File: TestFieldCacheSort.java
File: TestFieldCacheSort.java
/** test that we throw exception on multi-valued field, creates corrupt reader, use SORTED_SET instead */ public void testMultiValuedField() throws IOException { Directory indexStore = newDirectory(); IndexWriter writer = new IndexWriter(indexStore, newIndexWriterConfig(new MockAnalyzer(random()))); for (int i = 0; i < 5; i++) { Document doc = new Document(); doc.add(new StringField("string", "a" + i, Field.Store.NO)); doc.add(new StringField("string", "b" + i, Field.Store.NO)); writer.addDocument(doc); } // enforce one segment to have a higher unique term count in all cases writer.forceMerge(1); writer.close(); Sort sort = new Sort(new SortField("string", SortField.Type.STRING), SortField.FIELD_DOC); IndexReader reader = UninvertingReader.wrap(DirectoryReader.open(indexStore), Collections.singletonMap("string", Type.SORTED)); IndexSearcher searcher = new IndexSearcher(reader); expectThrows(IllegalStateException.class, () -> { searcher.search(new MatchAllDocsQuery(), 500, sort); }); reader.close(); indexStore.close(); }
90. TestFieldCacheSort#testEmptyStringVsNullStringSort()
Project: lucene-solr
File: TestFieldCacheSort.java
File: TestFieldCacheSort.java
public void testEmptyStringVsNullStringSort() throws Exception { Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))); Document doc = new Document(); doc.add(newStringField("f", "", Field.Store.NO)); doc.add(newStringField("t", "1", Field.Store.NO)); w.addDocument(doc); w.commit(); doc = new Document(); doc.add(newStringField("t", "1", Field.Store.NO)); w.addDocument(doc); IndexReader r = UninvertingReader.wrap(DirectoryReader.open(w), Collections.singletonMap("f", Type.SORTED)); w.close(); IndexSearcher s = newSearcher(r); TopDocs hits = s.search(new TermQuery(new Term("t", "1")), 10, new Sort(new SortField("f", SortField.Type.STRING))); assertEquals(2, hits.totalHits); // null sorts first assertEquals(1, hits.scoreDocs[0].doc); assertEquals(0, hits.scoreDocs[1].doc); TestUtil.checkReader(r); r.close(); dir.close(); }
91. TestFieldCacheSort#testFieldDocReverse()
Project: lucene-solr
File: TestFieldCacheSort.java
File: TestFieldCacheSort.java
/** Tests sorting on reverse internal docid order */ public void testFieldDocReverse() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(newStringField("value", "foo", Field.Store.NO)); writer.addDocument(doc); doc = new Document(); doc.add(newStringField("value", "bar", Field.Store.NO)); writer.addDocument(doc); IndexReader ir = writer.getReader(); writer.close(); IndexSearcher searcher = newSearcher(ir); Sort sort = new Sort(new SortField(null, SortField.Type.DOC, true)); TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(2, td.totalHits); // docid 1, then docid 0 assertEquals(1, td.scoreDocs[0].doc); assertEquals(0, td.scoreDocs[1].doc); TestUtil.checkReader(ir); ir.close(); dir.close(); }
92. TestFieldCacheSort#testFieldDoc()
Project: lucene-solr
File: TestFieldCacheSort.java
File: TestFieldCacheSort.java
/** Tests sorting on internal docid order */ public void testFieldDoc() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(newStringField("value", "foo", Field.Store.NO)); writer.addDocument(doc); doc = new Document(); doc.add(newStringField("value", "bar", Field.Store.NO)); writer.addDocument(doc); IndexReader ir = writer.getReader(); writer.close(); IndexSearcher searcher = newSearcher(ir); Sort sort = new Sort(SortField.FIELD_DOC); TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort); assertEquals(2, td.totalHits); // docid 0, then docid 1 assertEquals(0, td.scoreDocs[0].doc); assertEquals(1, td.scoreDocs[1].doc); TestUtil.checkReader(ir); ir.close(); dir.close(); }
93. TestFieldCache#testEmptyIndex()
Project: lucene-solr
File: TestFieldCache.java
File: TestFieldCache.java
public void testEmptyIndex() throws Exception { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())).setMaxBufferedDocs(500)); writer.close(); IndexReader r = DirectoryReader.open(dir); LeafReader reader = SlowCompositeReaderWrapper.wrap(r); TestUtil.checkReader(reader); FieldCache.DEFAULT.getTerms(reader, "foobar", true); FieldCache.DEFAULT.getTermsIndex(reader, "foobar"); FieldCache.DEFAULT.purgeByCacheKey(reader.getCoreCacheKey()); r.close(); dir.close(); }
94. TestQueryWrapperFilter#testQueryWrapperFilterPropagatesApproximations()
Project: lucene-solr
File: TestQueryWrapperFilter.java
File: TestQueryWrapperFilter.java
public void testQueryWrapperFilterPropagatesApproximations() throws IOException { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(new StringField("foo", "bar", Store.NO)); writer.addDocument(doc); writer.commit(); final IndexReader reader = writer.getReader(); writer.close(); final IndexSearcher searcher = new IndexSearcher(reader); // to still have approximations searcher.setQueryCache(null); final Query query = new QueryWrapperFilter(new RandomApproximationQuery(new TermQuery(new Term("foo", "bar")), random())); final Weight weight = searcher.createNormalizedWeight(query, random().nextBoolean()); final Scorer scorer = weight.scorer(reader.leaves().get(0)); assertNotNull(scorer.twoPhaseIterator()); reader.close(); dir.close(); }
95. TestQueryWrapperFilter#testScore()
Project: lucene-solr
File: TestQueryWrapperFilter.java
File: TestQueryWrapperFilter.java
public void testScore() throws IOException { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(new StringField("foo", "bar", Store.NO)); writer.addDocument(doc); writer.commit(); final IndexReader reader = writer.getReader(); writer.close(); final IndexSearcher searcher = new IndexSearcher(reader); final Query query = new QueryWrapperFilter(new TermQuery(new Term("foo", "bar"))); final TopDocs topDocs = searcher.search(query, 1); assertEquals(1, topDocs.totalHits); assertEquals(0f, topDocs.scoreDocs[0].score, 0f); reader.close(); dir.close(); }
96. TestQueryWrapperFilter#testThousandDocuments()
Project: lucene-solr
File: TestQueryWrapperFilter.java
File: TestQueryWrapperFilter.java
public void testThousandDocuments() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); for (int i = 0; i < 1000; i++) { Document doc = new Document(); doc.add(newStringField("field", English.intToEnglish(i), Field.Store.NO)); writer.addDocument(doc); } IndexReader reader = writer.getReader(); writer.close(); IndexSearcher searcher = newSearcher(reader); for (int i = 0; i < 1000; i++) { TermQuery termQuery = new TermQuery(new Term("field", English.intToEnglish(i))); QueryWrapperFilter qwf = new QueryWrapperFilter(termQuery); TopDocs td = searcher.search(qwf, 10); assertEquals(1, td.totalHits); } reader.close(); dir.close(); }
97. TestMultiThreadTermVectors#test()
Project: lucene-solr
File: TestMultiThreadTermVectors.java
File: TestMultiThreadTermVectors.java
public void test() throws Exception { IndexReader reader = null; try { reader = DirectoryReader.open(directory); for (int i = 1; i <= numThreads; i++) testTermPositionVectors(reader, i); } catch (IOException ioe) { fail(ioe.getMessage()); } finally { if (reader != null) { try { /** close the opened reader */ reader.close(); } catch (IOException ioe) { ioe.printStackTrace(); } } } }
98. TestIndexSearcher#testSearchAfterPassedMaxDoc()
Project: lucene-solr
File: TestIndexSearcher.java
File: TestIndexSearcher.java
@Test public void testSearchAfterPassedMaxDoc() throws Exception { // LUCENE-5128: ensure we get a meaningful message if searchAfter exceeds maxDoc Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir); w.addDocument(new Document()); IndexReader r = w.getReader(); w.close(); IndexSearcher s = new IndexSearcher(r); expectThrows(IllegalArgumentException.class, () -> { s.searchAfter(new ScoreDoc(r.maxDoc(), 0.54f), new MatchAllDocsQuery(), 10); }); IOUtils.close(r, dir); }
99. IndexManager#getIndexReader()
Project: spacewalk
File: IndexManager.java
File: IndexManager.java
private IndexReader getIndexReader(String indexName, String locale) throws CorruptIndexException, IOException { String path = ""; if (indexName.compareTo(BuilderFactory.DOCS_TYPE) == 0) { path = indexWorkDir + File.separator + getDocIndexPath(locale); } else { path = indexWorkDir + indexName; } log.info("IndexManager::getIndexReader(" + indexName + ", " + locale + ") path = " + path); File f = new File(path); IndexReader retval = IndexReader.open(FSDirectory.getDirectory(f)); return retval; }
100. IndexManager#removeFromIndex()
Project: spacewalk
File: IndexManager.java
File: IndexManager.java
/** * Remove a document from an index * * @param indexName index to use * @param uniqueField field name which represents this data's unique id * @param objectId unique document id * @throws IndexingException something went wrong removing the document */ public void removeFromIndex(String indexName, String uniqueField, String objectId) throws IndexingException { log.info("Removing <" + indexName + "> " + uniqueField + ":" + objectId); Term t = new Term(uniqueField, objectId); IndexReader reader; try { reader = getIndexReader(indexName, IndexHandler.DEFAULT_LANG); try { reader.deleteDocuments(t); reader.flush(); } finally { if (reader != null) { reader.close(); } } } catch (CorruptIndexException e) { throw new IndexingException(e); } catch (IOException e) { throw new IndexingException(e); } }