Here are the examples of the java api class org.apache.lucene.store.MockDirectoryWrapper taken from open source projects.
1. SimpleReplicaNode#getDirectory()
Project: lucene-solr
File: SimpleReplicaNode.java
File: SimpleReplicaNode.java
static Directory getDirectory(Random random, int id, Path path, boolean doCheckIndexOnClose) throws IOException { MockDirectoryWrapper dir = LuceneTestCase.newMockFSDirectory(path); dir.setAssertNoUnrefencedFilesOnClose(true); dir.setCheckIndexOnClose(doCheckIndexOnClose); // Corrupt any index files not referenced by current commit point; this is important (increases test evilness) because we may have done // a hard crash of the previous JVM writing to this directory and so MDW's corrupt-unknown-files-on-close never ran: Node.nodeMessage(System.out, id, "top: corrupt unknown files"); dir.corruptUnknownFiles(); return dir; }
2. TestIndexWriterOnDiskFull#testImmediateDiskFull()
Project: lucene-solr
File: TestIndexWriterOnDiskFull.java
File: TestIndexWriterOnDiskFull.java
// LUCENE-1130: make sure immeidate disk full on creating // an IndexWriter (hit during DW.ThreadState.init()) is // OK: public void testImmediateDiskFull() throws IOException { MockDirectoryWrapper dir = newMockDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())).setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()).setCommitOnClose(false)); // empty commit, to not create confusing situation with first commit writer.commit(); dir.setMaxSizeInBytes(Math.max(1, dir.getRecomputedActualSizeInBytes())); final Document doc = new Document(); FieldType customType = new FieldType(TextField.TYPE_STORED); doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", customType)); expectThrows(IOException.class, () -> { writer.addDocument(doc); }); assertTrue(writer.deleter.isClosed()); assertTrue(writer.isClosed()); dir.close(); }
3. TestIndexFileDeleter#testTrashyFile()
Project: lucene-solr
File: TestIndexFileDeleter.java
File: TestIndexFileDeleter.java
public void testTrashyFile() throws IOException { MockDirectoryWrapper dir = newMockDirectory(); // TODO: maybe handle such trash better elsewhere... dir.setCheckIndexOnClose(false); // empty commit new IndexWriter(dir, new IndexWriterConfig(null)).close(); SegmentInfos sis = SegmentInfos.readLatestCommit(dir); assertEquals(1, sis.getGeneration()); // add trash file dir.createOutput(IndexFileNames.SEGMENTS + "_", IOContext.DEFAULT).close(); // no inflation inflateGens(sis, Arrays.asList(dir.listAll()), InfoStream.getDefault()); assertEquals(1, sis.getGeneration()); dir.close(); }
4. TestIndexFileDeleter#testSegmentsInflation()
Project: lucene-solr
File: TestIndexFileDeleter.java
File: TestIndexFileDeleter.java
public void testSegmentsInflation() throws IOException { MockDirectoryWrapper dir = newMockDirectory(); // TODO: allow falling back more than one commit dir.setCheckIndexOnClose(false); // empty commit new IndexWriter(dir, new IndexWriterConfig(null)).close(); SegmentInfos sis = SegmentInfos.readLatestCommit(dir); assertEquals(1, sis.getGeneration()); // add trash commit dir.createOutput(IndexFileNames.SEGMENTS + "_2", IOContext.DEFAULT).close(); // ensure inflation inflateGens(sis, Arrays.asList(dir.listAll()), InfoStream.getDefault()); assertEquals(2, sis.getGeneration()); // add another trash commit dir.createOutput(IndexFileNames.SEGMENTS + "_4", IOContext.DEFAULT).close(); inflateGens(sis, Arrays.asList(dir.listAll()), InfoStream.getDefault()); assertEquals(4, sis.getGeneration()); dir.close(); }
5. TestCrash#testCrashAfterCloseNoWait()
Project: lucene-solr
File: TestCrash.java
File: TestCrash.java
public void testCrashAfterCloseNoWait() throws IOException { Random random = random(); MockDirectoryWrapper dir = newMockDirectory(random, NoLockFactory.INSTANCE); IndexWriter writer = initIndex(random, dir, false, false); try { writer.commit(); } finally { writer.close(); } dir.crash(); /* String[] l = dir.list(); Arrays.sort(l); for(int i=0;i<l.length;i++) System.out.println("file " + i + " = " + l[i] + " " + dir.fileLength(l[i]) + " bytes"); */ IndexReader reader = DirectoryReader.open(dir); assertEquals(157, reader.numDocs()); reader.close(); dir.close(); }
6. TestCrash#testCrashAfterClose()
Project: lucene-solr
File: TestCrash.java
File: TestCrash.java
public void testCrashAfterClose() throws IOException { IndexWriter writer = initIndex(random(), false); MockDirectoryWrapper dir = (MockDirectoryWrapper) writer.getDirectory(); writer.close(); dir.crash(); /* String[] l = dir.list(); Arrays.sort(l); for(int i=0;i<l.length;i++) System.out.println("file " + i + " = " + l[i] + " " + dir.fileLength(l[i]) + " bytes"); */ IndexReader reader = DirectoryReader.open(dir); assertEquals(157, reader.numDocs()); reader.close(); dir.close(); }
7. ShadowEngineTests#testFailEngineOnCorruption()
Project: elasticsearch
File: ShadowEngineTests.java
File: ShadowEngineTests.java
public void testFailEngineOnCorruption() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); primaryEngine.index(new Engine.Index(newUid("1"), doc)); primaryEngine.flush(); MockDirectoryWrapper leaf = DirectoryUtils.getLeaf(replicaEngine.config().getStore().directory(), MockDirectoryWrapper.class); leaf.setRandomIOExceptionRate(1.0); leaf.setRandomIOExceptionRateOnOpen(1.0); try { replicaEngine.refresh("foo"); fail("exception expected"); } catch (Exception ex) { } try { Engine.Searcher searchResult = replicaEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); searchResult.close(); fail("exception expected"); } catch (EngineClosedException ex) { } }
8. TestIndexWriter#testDeleteAllNRTLeftoverFiles()
Project: lucene-solr
File: TestIndexWriter.java
File: TestIndexWriter.java
public void testDeleteAllNRTLeftoverFiles() throws Exception { MockDirectoryWrapper d = new MockDirectoryWrapper(random(), new RAMDirectory()); IndexWriter w = new IndexWriter(d, new IndexWriterConfig(new MockAnalyzer(random()))); Document doc = new Document(); for (int i = 0; i < 20; i++) { for (int j = 0; j < 100; ++j) { w.addDocument(doc); } w.commit(); DirectoryReader.open(w).close(); w.deleteAll(); w.commit(); // Make sure we accumulate no files except for empty // segments_N and segments.gen: assertTrue(d.listAll().length <= 2); } w.close(); d.close(); }
9. TestIndexFileDeleter#testTrashyGenFile()
Project: lucene-solr
File: TestIndexFileDeleter.java
File: TestIndexFileDeleter.java
public void testTrashyGenFile() throws IOException { MockDirectoryWrapper dir = newMockDirectory(); // initial commit IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(null)); iw.addDocument(new Document()); iw.commit(); iw.close(); // no deletes: start at 1 SegmentInfos sis = SegmentInfos.readLatestCommit(dir); assertEquals(1, sis.info(0).getNextDelGen()); // add trash file dir.createOutput("_1_A", IOContext.DEFAULT).close(); // no inflation inflateGens(sis, Arrays.asList(dir.listAll()), InfoStream.getDefault()); assertEquals(1, sis.info(0).getNextDelGen()); dir.close(); }
10. InternalEngineTests#testSycnedFlushVanishesOnReplay()
Project: elasticsearch
File: InternalEngineTests.java
File: InternalEngineTests.java
public void testSycnedFlushVanishesOnReplay() throws IOException { final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20); ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); engine.index(new Engine.Index(newUid("1"), doc)); final Engine.CommitId commitID = engine.flush(); assertEquals("should succeed to flush commit with right id and no pending doc", engine.syncFlush(syncId, commitID), Engine.SyncedFlushResult.SUCCESS); assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); doc = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), new BytesArray("{}"), null); engine.index(new Engine.Index(newUid("2"), doc)); EngineConfig config = engine.config(); engine.close(); final MockDirectoryWrapper directory = DirectoryUtils.getLeaf(store.directory(), MockDirectoryWrapper.class); if (directory != null) { // since we rollback the IW we are writing the same segment files again after starting IW but MDW prevents // this so we have to disable the check explicitly directory.setPreventDoubleWrite(false); } engine = new InternalEngine(copy(config, EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG)); engine.recoverFromTranslog(); assertNull("Sync ID must be gone since we have a document to replay", engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID)); }
11. TestTransactions#testTransactions()
Project: lucene-solr
File: TestTransactions.java
File: TestTransactions.java
public void testTransactions() throws Throwable { // we cant use non-ramdir on windows, because this test needs to double-write. MockDirectoryWrapper dir1 = new MockDirectoryWrapper(random(), new RAMDirectory()); MockDirectoryWrapper dir2 = new MockDirectoryWrapper(random(), new RAMDirectory()); dir1.failOn(new RandomFailure()); dir2.failOn(new RandomFailure()); dir1.setFailOnOpenInput(false); dir2.setFailOnOpenInput(false); // We throw exceptions in deleteFile, which creates // leftover files: dir1.setAssertNoUnrefencedFilesOnClose(false); dir2.setAssertNoUnrefencedFilesOnClose(false); initIndex(dir1); initIndex(dir2); TimedThread[] threads = new TimedThread[3]; int numThread = 0; IndexerThread indexerThread = new IndexerThread(this, dir1, dir2, threads); threads[numThread++] = indexerThread; indexerThread.start(); SearcherThread searcherThread1 = new SearcherThread(this, dir1, dir2, threads); threads[numThread++] = searcherThread1; searcherThread1.start(); SearcherThread searcherThread2 = new SearcherThread(this, dir1, dir2, threads); threads[numThread++] = searcherThread2; searcherThread2.start(); for (int i = 0; i < numThread; i++) threads[i].join(); for (int i = 0; i < numThread; i++) assertTrue(!threads[i].failed); dir1.close(); dir2.close(); }
12. TestIndexWriterForceMerge#testForceMergeTempSpaceUsage()
Project: lucene-solr
File: TestIndexWriterForceMerge.java
File: TestIndexWriterForceMerge.java
/** * Make sure forceMerge doesn't use any more than 1X * starting index size as its temporary free space * required. */ public void testForceMergeTempSpaceUsage() throws IOException { final MockDirectoryWrapper dir = newMockDirectory(); // don't use MockAnalyzer, variable length payloads can cause merge to make things bigger, // since things are optimized for fixed length case. this is a problem for MemoryPF's encoding. // (it might have other problems too) Analyzer analyzer = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName) { return new TokenStreamComponents(new MockTokenizer(MockTokenizer.WHITESPACE, true)); } }; IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(analyzer).setMaxBufferedDocs(10).setMergePolicy(newLogMergePolicy())); if (VERBOSE) { System.out.println("TEST: config1=" + writer.getConfig()); } for (int j = 0; j < 500; j++) { TestIndexWriter.addDocWithIndex(writer, j); } // force one extra segment w/ different doc store so // we see the doc stores get merged writer.commit(); TestIndexWriter.addDocWithIndex(writer, 500); writer.close(); long startDiskUsage = 0; for (String f : dir.listAll()) { startDiskUsage += dir.fileLength(f); if (VERBOSE) { System.out.println(f + ": " + dir.fileLength(f)); } } if (VERBOSE) { System.out.println("TEST: start disk usage = " + startDiskUsage); } String startListing = listFiles(dir); dir.resetMaxUsedSizeInBytes(); dir.setTrackDiskUsage(true); writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND).setMergePolicy(newLogMergePolicy())); if (VERBOSE) { System.out.println("TEST: config2=" + writer.getConfig()); } writer.forceMerge(1); writer.close(); long finalDiskUsage = 0; for (String f : dir.listAll()) { finalDiskUsage += dir.fileLength(f); if (VERBOSE) { System.out.println(f + ": " + dir.fileLength(f)); } } if (VERBOSE) { System.out.println("TEST: final disk usage = " + finalDiskUsage); } // The result of the merged index is often smaller, but sometimes it could // be bigger (compression slightly changes, Codec changes etc.). Therefore // we compare the temp space used to the max of the initial and final index // size long maxStartFinalDiskUsage = Math.max(startDiskUsage, finalDiskUsage); long maxDiskUsage = dir.getMaxUsedSizeInBytes(); assertTrue("forceMerge used too much temporary space: starting usage was " + startDiskUsage + " bytes; final usage was " + finalDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been at most " + (4 * maxStartFinalDiskUsage) + " (= 4X starting usage), BEFORE=" + startListing + "AFTER=" + listFiles(dir), maxDiskUsage <= 4 * maxStartFinalDiskUsage); dir.close(); }
13. TestIndexWriterCommit#testCommitOnCloseDiskUsage()
Project: lucene-solr
File: TestIndexWriterCommit.java
File: TestIndexWriterCommit.java
/* * Verify that a writer with "commit on close" indeed * cleans up the temp segments created after opening * that are not referenced by the starting segments * file. We check this by using MockDirectoryWrapper to * measure max temp disk space used. */ public void testCommitOnCloseDiskUsage() throws IOException { // MemoryCodec, since it uses FST, is not necessarily // "additive", ie if you add up N small FSTs, then merge // them, the merged result can easily be larger than the // sum because the merged FST may use array encoding for // some arcs (which uses more space): final String idFormat = TestUtil.getPostingsFormat("id"); final String contentFormat = TestUtil.getPostingsFormat("content"); assumeFalse("This test cannot run with Memory codec", idFormat.equals("Memory") || contentFormat.equals("Memory")); MockDirectoryWrapper dir = newMockDirectory(); Analyzer analyzer; if (random().nextBoolean()) { // no payloads analyzer = new Analyzer() { @Override public TokenStreamComponents createComponents(String fieldName) { return new TokenStreamComponents(new MockTokenizer(MockTokenizer.WHITESPACE, true)); } }; } else { // fixed length payloads final int length = random().nextInt(200); analyzer = new Analyzer() { @Override public TokenStreamComponents createComponents(String fieldName) { Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, true); return new TokenStreamComponents(tokenizer, new MockFixedLengthPayloadFilter(random(), tokenizer, length)); } }; } IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(analyzer).setMaxBufferedDocs(10).setReaderPooling(false).setMergePolicy(newLogMergePolicy(10))); for (int j = 0; j < 30; j++) { TestIndexWriter.addDocWithIndex(writer, j); } writer.close(); dir.resetMaxUsedSizeInBytes(); dir.setTrackDiskUsage(true); long startDiskUsage = dir.getMaxUsedSizeInBytes(); writer = new IndexWriter(dir, newIndexWriterConfig(analyzer).setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10).setMergeScheduler(new SerialMergeScheduler()).setReaderPooling(false).setMergePolicy(newLogMergePolicy(10))); for (int j = 0; j < 1470; j++) { TestIndexWriter.addDocWithIndex(writer, j); } long midDiskUsage = dir.getMaxUsedSizeInBytes(); dir.resetMaxUsedSizeInBytes(); writer.forceMerge(1); writer.close(); DirectoryReader.open(dir).close(); long endDiskUsage = dir.getMaxUsedSizeInBytes(); // Ending index is 50X as large as starting index; due // to 3X disk usage normally we allow 150X max // transient usage. If something is wrong w/ deleter // and it doesn't delete intermediate segments then it // will exceed this 150X: // System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " + endDiskUsage); assertTrue("writer used too much space while adding documents: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage + " max=" + (startDiskUsage * 150), midDiskUsage < 150 * startDiskUsage); assertTrue("writer used too much space after close: endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage + " max=" + (startDiskUsage * 150), endDiskUsage < 150 * startDiskUsage); dir.close(); }
14. TestIndexFileDeleter#testExcInDecRef()
Project: lucene-solr
File: TestIndexFileDeleter.java
File: TestIndexFileDeleter.java
// LUCENE-5919 public void testExcInDecRef() throws Throwable { MockDirectoryWrapper dir = newMockDirectory(); // disable slow things: we don't rely upon sleeps here. dir.setThrottling(MockDirectoryWrapper.Throttling.NEVER); dir.setUseSlowOpenClosers(false); final AtomicBoolean doFailExc = new AtomicBoolean(); dir.failOn(new MockDirectoryWrapper.Failure() { @Override public void eval(MockDirectoryWrapper dir) throws IOException { if (doFailExc.get() && random().nextInt(4) == 1) { Exception e = new Exception(); StackTraceElement stack[] = e.getStackTrace(); for (int i = 0; i < stack.length; i++) { if (stack[i].getClassName().equals(IndexFileDeleter.class.getName()) && stack[i].getMethodName().equals("decRef")) { throw new RuntimeException("fake fail"); } } } } }); IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random())); //iwc.setMergeScheduler(new SerialMergeScheduler()); MergeScheduler ms = iwc.getMergeScheduler(); if (ms instanceof ConcurrentMergeScheduler) { final ConcurrentMergeScheduler suppressFakeFail = new ConcurrentMergeScheduler() { @Override protected void handleMergeException(Directory dir, Throwable exc) { // suppress only FakeIOException: if (exc instanceof RuntimeException && exc.getMessage().equals("fake fail")) { // ok to ignore } else if ((exc instanceof AlreadyClosedException || exc instanceof IllegalStateException) && exc.getCause() != null && "fake fail".equals(exc.getCause().getMessage())) { // also ok to ignore } else { super.handleMergeException(dir, exc); } } }; final ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler) ms; suppressFakeFail.setMaxMergesAndThreads(cms.getMaxMergeCount(), cms.getMaxThreadCount()); iwc.setMergeScheduler(suppressFakeFail); } RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); // Since we hit exc during merging, a partial // forceMerge can easily return when there are still // too many segments in the index: w.setDoRandomForceMergeAssert(false); doFailExc.set(true); int ITERS = atLeast(1000); for (int iter = 0; iter < ITERS; iter++) { try { if (random().nextInt(10) == 5) { w.commit(); } else if (random().nextInt(10) == 7) { w.getReader().close(); } else { Document doc = new Document(); doc.add(newTextField("field", "some text", Field.Store.NO)); w.addDocument(doc); } } catch (Throwable t) { if (t.toString().contains("fake fail") || (t.getCause() != null && t.getCause().toString().contains("fake fail"))) { } else { throw t; } } } doFailExc.set(false); w.close(); dir.close(); }
15. TestPersistentSnapshotDeletionPolicy#testExistingSnapshots()
Project: lucene-solr
File: TestPersistentSnapshotDeletionPolicy.java
File: TestPersistentSnapshotDeletionPolicy.java
@Test public void testExistingSnapshots() throws Exception { int numSnapshots = 3; MockDirectoryWrapper dir = newMockDirectory(); IndexWriter writer = new IndexWriter(dir, getConfig(random(), getDeletionPolicy(dir))); PersistentSnapshotDeletionPolicy psdp = (PersistentSnapshotDeletionPolicy) writer.getConfig().getIndexDeletionPolicy(); assertNull(psdp.getLastSaveFile()); prepareIndexAndSnapshots(psdp, writer, numSnapshots); assertNotNull(psdp.getLastSaveFile()); writer.close(); // Make sure only 1 save file exists: int count = 0; for (String file : dir.listAll()) { if (file.startsWith(PersistentSnapshotDeletionPolicy.SNAPSHOTS_PREFIX)) { count++; } } assertEquals(1, count); // Make sure we fsync: dir.crash(); dir.clearCrash(); // Re-initialize and verify snapshots were persisted psdp = new PersistentSnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy(), dir, OpenMode.APPEND); writer = new IndexWriter(dir, getConfig(random(), psdp)); psdp = (PersistentSnapshotDeletionPolicy) writer.getConfig().getIndexDeletionPolicy(); assertEquals(numSnapshots, psdp.getSnapshots().size()); assertEquals(numSnapshots, psdp.getSnapshotCount()); assertSnapshotExists(dir, psdp, numSnapshots, false); writer.addDocument(new Document()); writer.commit(); snapshots.add(psdp.snapshot()); assertEquals(numSnapshots + 1, psdp.getSnapshots().size()); assertEquals(numSnapshots + 1, psdp.getSnapshotCount()); assertSnapshotExists(dir, psdp, numSnapshots + 1, false); writer.close(); dir.close(); }
16. BaseIndexFileFormatTestCase#testRandomExceptions()
Project: lucene-solr
File: BaseIndexFileFormatTestCase.java
File: BaseIndexFileFormatTestCase.java
/** Tests exception handling on write and openInput/createOutput */ // TODO: this is really not ideal. each BaseXXXTestCase should have unit tests doing this. // but we use this shotgun approach to prevent bugs in the meantime: it just ensures the // codec does not corrupt the index or leak file handles. public void testRandomExceptions() throws Exception { // disable slow things: we don't rely upon sleeps here. MockDirectoryWrapper dir = newMockDirectory(); dir.setThrottling(MockDirectoryWrapper.Throttling.NEVER); dir.setUseSlowOpenClosers(false); // more rare dir.setRandomIOExceptionRate(0.001); // log all exceptions we hit, in case we fail (for debugging) ByteArrayOutputStream exceptionLog = new ByteArrayOutputStream(); PrintStream exceptionStream = new PrintStream(exceptionLog, true, "UTF-8"); //PrintStream exceptionStream = System.out; Analyzer analyzer = new MockAnalyzer(random()); IndexWriterConfig conf = newIndexWriterConfig(analyzer); // just for now, try to keep this test reproducible conf.setMergeScheduler(new SerialMergeScheduler()); conf.setCodec(getCodec()); int numDocs = atLeast(500); IndexWriter iw = new IndexWriter(dir, conf); try { boolean allowAlreadyClosed = false; for (int i = 0; i < numDocs; i++) { // turn on exceptions for openInput/createOutput dir.setRandomIOExceptionRateOnOpen(0.02); Document doc = new Document(); doc.add(newStringField("id", Integer.toString(i), Field.Store.NO)); addRandomFields(doc); // single doc try { iw.addDocument(doc); // we made it, sometimes delete our doc iw.deleteDocuments(new Term("id", Integer.toString(i))); } catch (AlreadyClosedException ace) { dir.setRandomIOExceptionRateOnOpen(0.0); assertTrue(iw.deleter.isClosed()); assertTrue(allowAlreadyClosed); allowAlreadyClosed = false; conf = newIndexWriterConfig(analyzer); conf.setMergeScheduler(new SerialMergeScheduler()); conf.setCodec(getCodec()); iw = new IndexWriter(dir, conf); } catch (Exception e) { if (e.getMessage() != null && e.getMessage().startsWith("a random IOException")) { exceptionStream.println("\nTEST: got expected fake exc:" + e.getMessage()); e.printStackTrace(exceptionStream); allowAlreadyClosed = true; } else { Rethrow.rethrow(e); } } if (random().nextInt(10) == 0) { // trigger flush: try { if (random().nextBoolean()) { DirectoryReader ir = null; try { ir = DirectoryReader.open(iw, random().nextBoolean(), false); // disable exceptions on openInput until next iteration dir.setRandomIOExceptionRateOnOpen(0.0); TestUtil.checkReader(ir); } finally { IOUtils.closeWhileHandlingException(ir); } } else { // disable exceptions on openInput until next iteration: dir.setRandomIOExceptionRateOnOpen(0.0); // or we make slowExists angry and trip a scarier assert! iw.commit(); } if (DirectoryReader.indexExists(dir)) { TestUtil.checkIndex(dir); } } catch (AlreadyClosedException ace) { dir.setRandomIOExceptionRateOnOpen(0.0); assertTrue(iw.deleter.isClosed()); assertTrue(allowAlreadyClosed); allowAlreadyClosed = false; conf = newIndexWriterConfig(analyzer); conf.setMergeScheduler(new SerialMergeScheduler()); conf.setCodec(getCodec()); iw = new IndexWriter(dir, conf); } catch (Exception e) { if (e.getMessage() != null && e.getMessage().startsWith("a random IOException")) { exceptionStream.println("\nTEST: got expected fake exc:" + e.getMessage()); e.printStackTrace(exceptionStream); allowAlreadyClosed = true; } else { Rethrow.rethrow(e); } } } } try { // disable exceptions on openInput until next iteration: dir.setRandomIOExceptionRateOnOpen(0.0); // or we make slowExists angry and trip a scarier assert! iw.close(); } catch (Exception e) { if (e.getMessage() != null && e.getMessage().startsWith("a random IOException")) { exceptionStream.println("\nTEST: got expected fake exc:" + e.getMessage()); e.printStackTrace(exceptionStream); try { iw.rollback(); } catch (Throwable t) { } } else { Rethrow.rethrow(e); } } dir.close(); } catch (Throwable t) { System.out.println("Unexpected exception: dumping fake-exception-log:..."); exceptionStream.flush(); System.out.println(exceptionLog.toString("UTF-8")); System.out.flush(); Rethrow.rethrow(t); } if (VERBOSE) { System.out.println("TEST PASSED: dumping fake-exception-log:..."); System.out.println(exceptionLog.toString("UTF-8")); } }
17. TestTragicIndexWriterDeadlock#testDeadlockExcNRTReaderCommit()
Project: lucene-solr
File: TestTragicIndexWriterDeadlock.java
File: TestTragicIndexWriterDeadlock.java
public void testDeadlockExcNRTReaderCommit() throws Exception { MockDirectoryWrapper dir = newMockDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(); if (iwc.getMergeScheduler() instanceof ConcurrentMergeScheduler) { iwc.setMergeScheduler(new SuppressingConcurrentMergeScheduler() { @Override protected boolean isOK(Throwable th) { return true; } }); } final IndexWriter w = new IndexWriter(dir, iwc); final CountDownLatch startingGun = new CountDownLatch(1); final AtomicBoolean done = new AtomicBoolean(); Thread commitThread = new Thread() { @Override public void run() { try { startingGun.await(); while (done.get() == false) { w.addDocument(new Document()); w.commit(); } } catch (Throwable t) { done.set(true); } } }; commitThread.start(); final DirectoryReader r0 = DirectoryReader.open(w); Thread nrtThread = new Thread() { @Override public void run() { DirectoryReader r = r0; try { try { startingGun.await(); while (done.get() == false) { DirectoryReader oldReader = r; DirectoryReader r2 = DirectoryReader.openIfChanged(oldReader); if (r2 != null) { r = r2; oldReader.decRef(); } } } finally { r.close(); } } catch (Throwable t) { done.set(true); } } }; nrtThread.start(); dir.setRandomIOExceptionRate(.1); startingGun.countDown(); commitThread.join(); nrtThread.join(); dir.setRandomIOExceptionRate(0.0); w.close(); dir.close(); }
18. TestIndexWriterOutOfFileDescriptors#test()
Project: lucene-solr
File: TestIndexWriterOutOfFileDescriptors.java
File: TestIndexWriterOutOfFileDescriptors.java
public void test() throws Exception { MockDirectoryWrapper dir = newMockFSDirectory(createTempDir("TestIndexWriterOutOfFileDescriptors")); double rate = random().nextDouble() * 0.01; //System.out.println("rate=" + rate); dir.setRandomIOExceptionRateOnOpen(rate); int iters = atLeast(20); LineFileDocs docs = new LineFileDocs(random()); DirectoryReader r = null; DirectoryReader r2 = null; boolean any = false; MockDirectoryWrapper dirCopy = null; int lastNumDocs = 0; for (int iter = 0; iter < iters; iter++) { IndexWriter w = null; if (VERBOSE) { System.out.println("TEST: iter=" + iter); } try { MockAnalyzer analyzer = new MockAnalyzer(random()); analyzer.setMaxTokenLength(TestUtil.nextInt(random(), 1, IndexWriter.MAX_TERM_LENGTH)); IndexWriterConfig iwc = newIndexWriterConfig(analyzer); if (VERBOSE) { // Do this ourselves instead of relying on LTC so // we see incrementing messageID: iwc.setInfoStream(new PrintStreamInfoStream(System.out)); } MergeScheduler ms = iwc.getMergeScheduler(); if (ms instanceof ConcurrentMergeScheduler) { ((ConcurrentMergeScheduler) ms).setSuppressExceptions(); } w = new IndexWriter(dir, iwc); if (r != null && random().nextInt(5) == 3) { if (random().nextBoolean()) { if (VERBOSE) { System.out.println("TEST: addIndexes LR[]"); } TestUtil.addIndexesSlowly(w, r); } else { if (VERBOSE) { System.out.println("TEST: addIndexes Directory[]"); } w.addIndexes(new Directory[] { dirCopy }); } } else { if (VERBOSE) { System.out.println("TEST: addDocument"); } w.addDocument(docs.nextDoc()); } dir.setRandomIOExceptionRateOnOpen(0.0); if (ms instanceof ConcurrentMergeScheduler) { ((ConcurrentMergeScheduler) ms).sync(); } // If exc hit CMS then writer will be tragically closed: if (w.getTragicException() == null) { w.close(); } w = null; // NOTE: This is O(N^2)! Only enable for temporary debugging: //dir.setRandomIOExceptionRateOnOpen(0.0); //_TestUtil.checkIndex(dir); //dir.setRandomIOExceptionRateOnOpen(rate); // Verify numDocs only increases, to catch IndexWriter // accidentally deleting the index: dir.setRandomIOExceptionRateOnOpen(0.0); assertTrue(DirectoryReader.indexExists(dir)); if (r2 == null) { r2 = DirectoryReader.open(dir); } else { DirectoryReader r3 = DirectoryReader.openIfChanged(r2); if (r3 != null) { r2.close(); r2 = r3; } } assertTrue("before=" + lastNumDocs + " after=" + r2.numDocs(), r2.numDocs() >= lastNumDocs); lastNumDocs = r2.numDocs(); //System.out.println("numDocs=" + lastNumDocs); dir.setRandomIOExceptionRateOnOpen(rate); any = true; if (VERBOSE) { System.out.println("TEST: iter=" + iter + ": success"); } } catch (AssertionErrorIOException | ioe) { if (VERBOSE) { System.out.println("TEST: iter=" + iter + ": exception"); ioe.printStackTrace(); } if (w != null) { w.rollback(); } } if (any && r == null && random().nextBoolean()) { // Make a copy of a non-empty index so we can use // it to addIndexes later: dir.setRandomIOExceptionRateOnOpen(0.0); r = DirectoryReader.open(dir); dirCopy = newMockFSDirectory(createTempDir("TestIndexWriterOutOfFileDescriptors.copy")); Set<String> files = new HashSet<>(); for (String file : dir.listAll()) { if (file.startsWith(IndexFileNames.SEGMENTS) || IndexFileNames.CODEC_FILE_PATTERN.matcher(file).matches()) { dirCopy.copyFrom(dir, file, file, IOContext.DEFAULT); files.add(file); } } dirCopy.sync(files); // Have IW kiss the dir so we remove any leftover // files ... we can easily have leftover files at // the time we take a copy because we are holding // open a reader: new IndexWriter(dirCopy, newIndexWriterConfig(new MockAnalyzer(random()))).close(); dirCopy.setRandomIOExceptionRate(rate); dir.setRandomIOExceptionRateOnOpen(rate); } } if (r2 != null) { r2.close(); } if (r != null) { r.close(); dirCopy.close(); } dir.close(); }
19. TestIndexWriterExceptions#testTooManyFileException()
Project: lucene-solr
File: TestIndexWriterExceptions.java
File: TestIndexWriterExceptions.java
// See LUCENE-4870 TooManyOpenFiles errors are thrown as // FNFExceptions which can trigger data loss. public void testTooManyFileException() throws Exception { // Create failure that throws Too many open files exception randomly MockDirectoryWrapper.Failure failure = new MockDirectoryWrapper.Failure() { @Override public MockDirectoryWrapper.Failure reset() { doFail = false; return this; } @Override public void eval(MockDirectoryWrapper dir) throws IOException { if (doFail) { if (random().nextBoolean()) { throw new FileNotFoundException("some/file/name.ext (Too many open files)"); } } } }; MockDirectoryWrapper dir = newMockDirectory(); // The exception is only thrown on open input dir.setFailOnOpenInput(true); dir.failOn(failure); // Create an index with one document IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random())); IndexWriter iw = new IndexWriter(dir, iwc); Document doc = new Document(); doc.add(new StringField("foo", "bar", Field.Store.NO)); // add a document iw.addDocument(doc); iw.commit(); DirectoryReader ir = DirectoryReader.open(dir); assertEquals(1, ir.numDocs()); ir.close(); iw.close(); // Open and close the index a few times for (int i = 0; i < 10; i++) { failure.setDoFail(); iwc = new IndexWriterConfig(new MockAnalyzer(random())); try { iw = new IndexWriter(dir, iwc); } catch (AssertionError ex) { assertTrue(ex.getMessage().matches("file .* does not exist; files=\\[.*\\]")); } catch (CorruptIndexException ex) { continue; } catch (FileNotFoundExceptionNoSuchFileException | ex) { continue; } failure.clearDoFail(); iw.close(); ir = DirectoryReader.open(dir); assertEquals("lost document after iteration: " + i, 1, ir.numDocs()); ir.close(); } // Check if document is still there failure.clearDoFail(); ir = DirectoryReader.open(dir); assertEquals(1, ir.numDocs()); ir.close(); dir.close(); }
20. BaseSegmentInfoFormatTestCase#testExceptionOnCloseInput()
Project: lucene-solr
File: BaseSegmentInfoFormatTestCase.java
File: BaseSegmentInfoFormatTestCase.java
/** * Test segment infos read that hits exception on close * make sure we get our exception back, no file handle leaks, etc. */ public void testExceptionOnCloseInput() throws Exception { Failure fail = new Failure() { @Override public void eval(MockDirectoryWrapper dir) throws IOException { for (StackTraceElement e : Thread.currentThread().getStackTrace()) { if (doFail && "close".equals(e.getMethodName())) { throw new FakeIOException(); } } } }; MockDirectoryWrapper dir = newMockDirectory(); dir.failOn(fail); Codec codec = getCodec(); byte id[] = StringHelper.randomId(); SegmentInfo info = new SegmentInfo(dir, getVersions()[0], "_123", 1, false, codec, Collections.<String, String>emptyMap(), id, new HashMap<>(), null); info.setFiles(Collections.<String>emptySet()); codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); fail.setDoFail(); expectThrows(FakeIOException.class, () -> { codec.segmentInfoFormat().read(dir, "_123", id, IOContext.DEFAULT); }); fail.clearDoFail(); dir.close(); }
21. BaseSegmentInfoFormatTestCase#testExceptionOnOpenInput()
Project: lucene-solr
File: BaseSegmentInfoFormatTestCase.java
File: BaseSegmentInfoFormatTestCase.java
/** * Test segment infos read that hits exception immediately on open. * make sure we get our exception back, no file handle leaks, etc. */ public void testExceptionOnOpenInput() throws Exception { Failure fail = new Failure() { @Override public void eval(MockDirectoryWrapper dir) throws IOException { for (StackTraceElement e : Thread.currentThread().getStackTrace()) { if (doFail && "openInput".equals(e.getMethodName())) { throw new FakeIOException(); } } } }; MockDirectoryWrapper dir = newMockDirectory(); dir.failOn(fail); Codec codec = getCodec(); byte id[] = StringHelper.randomId(); SegmentInfo info = new SegmentInfo(dir, getVersions()[0], "_123", 1, false, codec, Collections.<String, String>emptyMap(), id, new HashMap<>(), null); info.setFiles(Collections.<String>emptySet()); codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); fail.setDoFail(); expectThrows(FakeIOException.class, () -> { codec.segmentInfoFormat().read(dir, "_123", id, IOContext.DEFAULT); }); fail.clearDoFail(); dir.close(); }
22. BaseSegmentInfoFormatTestCase#testExceptionOnCloseOutput()
Project: lucene-solr
File: BaseSegmentInfoFormatTestCase.java
File: BaseSegmentInfoFormatTestCase.java
/** * Test segment infos write that hits exception on close. * make sure we get our exception back, no file handle leaks, etc. */ public void testExceptionOnCloseOutput() throws Exception { Failure fail = new Failure() { @Override public void eval(MockDirectoryWrapper dir) throws IOException { for (StackTraceElement e : Thread.currentThread().getStackTrace()) { if (doFail && "close".equals(e.getMethodName())) { throw new FakeIOException(); } } } }; MockDirectoryWrapper dir = newMockDirectory(); dir.failOn(fail); Codec codec = getCodec(); byte id[] = StringHelper.randomId(); SegmentInfo info = new SegmentInfo(dir, getVersions()[0], "_123", 1, false, codec, Collections.<String, String>emptyMap(), id, new HashMap<>(), null); info.setFiles(Collections.<String>emptySet()); fail.setDoFail(); expectThrows(FakeIOException.class, () -> { codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); }); fail.clearDoFail(); dir.close(); }
23. BaseSegmentInfoFormatTestCase#testExceptionOnCreateOutput()
Project: lucene-solr
File: BaseSegmentInfoFormatTestCase.java
File: BaseSegmentInfoFormatTestCase.java
/** * Test segment infos write that hits exception immediately on open. * make sure we get our exception back, no file handle leaks, etc. */ public void testExceptionOnCreateOutput() throws Exception { Failure fail = new Failure() { @Override public void eval(MockDirectoryWrapper dir) throws IOException { for (StackTraceElement e : Thread.currentThread().getStackTrace()) { if (doFail && "createOutput".equals(e.getMethodName())) { throw new FakeIOException(); } } } }; MockDirectoryWrapper dir = newMockDirectory(); dir.failOn(fail); Codec codec = getCodec(); byte id[] = StringHelper.randomId(); SegmentInfo info = new SegmentInfo(dir, getVersions()[0], "_123", 1, false, codec, Collections.<String, String>emptyMap(), id, new HashMap<>(), null); info.setFiles(Collections.<String>emptySet()); fail.setDoFail(); expectThrows(FakeIOException.class, () -> { codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); }); fail.clearDoFail(); dir.close(); }
24. BaseFieldInfoFormatTestCase#testExceptionOnCloseInput()
Project: lucene-solr
File: BaseFieldInfoFormatTestCase.java
File: BaseFieldInfoFormatTestCase.java
/** * Test field infos read that hits exception on close. * make sure we get our exception back, no file handle leaks, etc. */ public void testExceptionOnCloseInput() throws Exception { Failure fail = new Failure() { @Override public void eval(MockDirectoryWrapper dir) throws IOException { for (StackTraceElement e : Thread.currentThread().getStackTrace()) { if (doFail && "close".equals(e.getMethodName())) { throw new FakeIOException(); } } } }; MockDirectoryWrapper dir = newMockDirectory(); dir.failOn(fail); Codec codec = getCodec(); SegmentInfo segmentInfo = newSegmentInfo(dir, "_123"); FieldInfos.Builder builder = new FieldInfos.Builder(); FieldInfo fi = builder.getOrAdd("field"); fi.setIndexOptions(TextField.TYPE_STORED.indexOptions()); addAttributes(fi); FieldInfos infos = builder.finish(); codec.fieldInfosFormat().write(dir, segmentInfo, "", infos, IOContext.DEFAULT); fail.setDoFail(); expectThrows(FakeIOException.class, () -> { codec.fieldInfosFormat().read(dir, segmentInfo, "", IOContext.DEFAULT); }); fail.clearDoFail(); dir.close(); }
25. BaseFieldInfoFormatTestCase#testExceptionOnOpenInput()
Project: lucene-solr
File: BaseFieldInfoFormatTestCase.java
File: BaseFieldInfoFormatTestCase.java
/** * Test field infos read that hits exception immediately on open. * make sure we get our exception back, no file handle leaks, etc. */ public void testExceptionOnOpenInput() throws Exception { Failure fail = new Failure() { @Override public void eval(MockDirectoryWrapper dir) throws IOException { for (StackTraceElement e : Thread.currentThread().getStackTrace()) { if (doFail && "openInput".equals(e.getMethodName())) { throw new FakeIOException(); } } } }; MockDirectoryWrapper dir = newMockDirectory(); dir.failOn(fail); Codec codec = getCodec(); SegmentInfo segmentInfo = newSegmentInfo(dir, "_123"); FieldInfos.Builder builder = new FieldInfos.Builder(); FieldInfo fi = builder.getOrAdd("field"); fi.setIndexOptions(TextField.TYPE_STORED.indexOptions()); addAttributes(fi); FieldInfos infos = builder.finish(); codec.fieldInfosFormat().write(dir, segmentInfo, "", infos, IOContext.DEFAULT); fail.setDoFail(); expectThrows(FakeIOException.class, () -> { codec.fieldInfosFormat().read(dir, segmentInfo, "", IOContext.DEFAULT); }); fail.clearDoFail(); dir.close(); }
26. BaseFieldInfoFormatTestCase#testExceptionOnCloseOutput()
Project: lucene-solr
File: BaseFieldInfoFormatTestCase.java
File: BaseFieldInfoFormatTestCase.java
/** * Test field infos write that hits exception on close. * make sure we get our exception back, no file handle leaks, etc. */ public void testExceptionOnCloseOutput() throws Exception { Failure fail = new Failure() { @Override public void eval(MockDirectoryWrapper dir) throws IOException { for (StackTraceElement e : Thread.currentThread().getStackTrace()) { if (doFail && "close".equals(e.getMethodName())) { throw new FakeIOException(); } } } }; MockDirectoryWrapper dir = newMockDirectory(); dir.failOn(fail); Codec codec = getCodec(); SegmentInfo segmentInfo = newSegmentInfo(dir, "_123"); FieldInfos.Builder builder = new FieldInfos.Builder(); FieldInfo fi = builder.getOrAdd("field"); fi.setIndexOptions(TextField.TYPE_STORED.indexOptions()); addAttributes(fi); FieldInfos infos = builder.finish(); fail.setDoFail(); expectThrows(FakeIOException.class, () -> { codec.fieldInfosFormat().write(dir, segmentInfo, "", infos, IOContext.DEFAULT); }); fail.clearDoFail(); dir.close(); }
27. BaseFieldInfoFormatTestCase#testExceptionOnCreateOutput()
Project: lucene-solr
File: BaseFieldInfoFormatTestCase.java
File: BaseFieldInfoFormatTestCase.java
/** * Test field infos write that hits exception immediately on open. * make sure we get our exception back, no file handle leaks, etc. */ public void testExceptionOnCreateOutput() throws Exception { Failure fail = new Failure() { @Override public void eval(MockDirectoryWrapper dir) throws IOException { for (StackTraceElement e : Thread.currentThread().getStackTrace()) { if (doFail && "createOutput".equals(e.getMethodName())) { throw new FakeIOException(); } } } }; MockDirectoryWrapper dir = newMockDirectory(); dir.failOn(fail); Codec codec = getCodec(); SegmentInfo segmentInfo = newSegmentInfo(dir, "_123"); FieldInfos.Builder builder = new FieldInfos.Builder(); FieldInfo fi = builder.getOrAdd("field"); fi.setIndexOptions(TextField.TYPE_STORED.indexOptions()); addAttributes(fi); FieldInfos infos = builder.finish(); fail.setDoFail(); expectThrows(FakeIOException.class, () -> { codec.fieldInfosFormat().write(dir, segmentInfo, "", infos, IOContext.DEFAULT); }); fail.clearDoFail(); dir.close(); }
28. TestMergeSchedulerExternal#testSubclassConcurrentMergeScheduler()
Project: lucene-solr
File: TestMergeSchedulerExternal.java
File: TestMergeSchedulerExternal.java
public void testSubclassConcurrentMergeScheduler() throws IOException { MockDirectoryWrapper dir = newMockDirectory(); dir.failOn(new FailOnlyOnMerge()); Document doc = new Document(); Field idField = newStringField("id", "", Field.Store.YES); doc.add(idField); IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random())).setMergeScheduler(new MyMergeScheduler()).setMaxBufferedDocs(2).setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).setMergePolicy(newLogMergePolicy()); ByteArrayOutputStream baos = new ByteArrayOutputStream(); infoStream = new PrintStreamInfoStream(new PrintStream(baos, true, IOUtils.UTF_8)); iwc.setInfoStream(infoStream); IndexWriter writer = new IndexWriter(dir, iwc); LogMergePolicy logMP = (LogMergePolicy) writer.getConfig().getMergePolicy(); logMP.setMergeFactor(10); for (int i = 0; i < 20; i++) { writer.addDocument(doc); } try { ((MyMergeScheduler) writer.getConfig().getMergeScheduler()).sync(); } catch (IllegalStateException ise) { } writer.rollback(); try { assertTrue(mergeThreadCreated); assertTrue(mergeCalled); assertTrue(excCalled); } catch (AssertionError ae) { System.out.println("TEST FAILED; IW infoStream output:"); System.out.println(baos.toString(IOUtils.UTF_8)); throw ae; } dir.close(); }
29. TestPersistentSnapshotDeletionPolicy#testExceptionDuringSave()
Project: lucene-solr
File: TestPersistentSnapshotDeletionPolicy.java
File: TestPersistentSnapshotDeletionPolicy.java
public void testExceptionDuringSave() throws Exception { MockDirectoryWrapper dir = newMockDirectory(); dir.failOn(new MockDirectoryWrapper.Failure() { @Override public void eval(MockDirectoryWrapper dir) throws IOException { StackTraceElement[] trace = Thread.currentThread().getStackTrace(); for (int i = 0; i < trace.length; i++) { if (PersistentSnapshotDeletionPolicy.class.getName().equals(trace[i].getClassName()) && "persist".equals(trace[i].getMethodName())) { throw new IOException("now fail on purpose"); } } } }); IndexWriter writer = new IndexWriter(dir, getConfig(random(), new PersistentSnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy(), dir, OpenMode.CREATE_OR_APPEND))); writer.addDocument(new Document()); writer.commit(); PersistentSnapshotDeletionPolicy psdp = (PersistentSnapshotDeletionPolicy) writer.getConfig().getIndexDeletionPolicy(); try { psdp.snapshot(); } catch (IOException ioe) { if (ioe.getMessage().equals("now fail on purpose")) { } else { throw ioe; } } assertEquals(0, psdp.getSnapshotCount()); writer.close(); assertEquals(1, DirectoryReader.listCommits(dir).size()); dir.close(); }
30. TestNRTReaderCleanup#testClosingNRTReaderDoesNotCorruptYourIndex()
Project: lucene-solr
File: TestNRTReaderCleanup.java
File: TestNRTReaderCleanup.java
public void testClosingNRTReaderDoesNotCorruptYourIndex() throws IOException { // Windows disallows deleting & overwriting files still // open for reading: assumeFalse("this test can't run on Windows", Constants.WINDOWS); MockDirectoryWrapper dir = newMockDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random())); LogMergePolicy lmp = new LogDocMergePolicy(); lmp.setMergeFactor(2); iwc.setMergePolicy(lmp); RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); Document doc = new Document(); doc.add(new TextField("a", "foo", Field.Store.NO)); w.addDocument(doc); w.commit(); w.addDocument(doc); // Get a new reader, but this also sets off a merge: IndexReader r = w.getReader(); w.close(); // Blow away index and make a new writer: for (String name : dir.listAll()) { dir.deleteFile(name); } w = new RandomIndexWriter(random(), dir); w.addDocument(doc); w.close(); r.close(); dir.close(); }
31. TestIndexWriterWithThreads#_testSingleThreadFailure()
Project: lucene-solr
File: TestIndexWriterWithThreads.java
File: TestIndexWriterWithThreads.java
// Runs test, with one thread, using the specific failure // to trigger an IOException public void _testSingleThreadFailure(MockDirectoryWrapper.Failure failure) throws IOException { MockDirectoryWrapper dir = newMockDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random())).setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()).setCommitOnClose(false); if (iwc.getMergeScheduler() instanceof ConcurrentMergeScheduler) { iwc.setMergeScheduler(new SuppressingConcurrentMergeScheduler() { @Override protected boolean isOK(Throwable th) { return th instanceof AlreadyClosedException || (th instanceof IllegalStateException && th.getMessage().contains("this writer hit an unrecoverable error")); } }); } IndexWriter writer = new IndexWriter(dir, iwc); final Document doc = new Document(); FieldType customType = new FieldType(TextField.TYPE_STORED); customType.setStoreTermVectors(true); customType.setStoreTermVectorPositions(true); customType.setStoreTermVectorOffsets(true); doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", customType)); for (int i = 0; i < 6; i++) writer.addDocument(doc); dir.failOn(failure); failure.setDoFail(); expectThrows(IOException.class, () -> { writer.addDocument(doc); writer.addDocument(doc); writer.commit(); }); failure.clearDoFail(); expectThrows(AlreadyClosedException.class, () -> { writer.addDocument(doc); writer.commit(); writer.close(); }); assertTrue(writer.deleter.isClosed()); dir.close(); }
32. TestIndexWriterReader#testNRTOpenExceptions()
Project: lucene-solr
File: TestIndexWriterReader.java
File: TestIndexWriterReader.java
@Test public void testNRTOpenExceptions() throws Exception { // LUCENE-5262: test that several failed attempts to obtain an NRT reader // don't leak file handles. MockDirectoryWrapper dir = (MockDirectoryWrapper) getAssertNoDeletesDirectory(newMockDirectory()); final AtomicBoolean shouldFail = new AtomicBoolean(); dir.failOn(new MockDirectoryWrapper.Failure() { @Override public void eval(MockDirectoryWrapper dir) throws IOException { StackTraceElement[] trace = new Exception().getStackTrace(); if (shouldFail.get()) { for (int i = 0; i < trace.length; i++) { if ("getReadOnlyClone".equals(trace[i].getMethodName())) { if (VERBOSE) { System.out.println("TEST: now fail; exc:"); new Throwable().printStackTrace(System.out); } shouldFail.set(false); throw new FakeIOException(); } } } } }); IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random())); // prevent merges from getting in the way conf.setMergePolicy(NoMergePolicy.INSTANCE); IndexWriter writer = new IndexWriter(dir, conf); // create a segment and open an NRT reader writer.addDocument(new Document()); writer.getReader().close(); // add a new document so a new NRT reader is required writer.addDocument(new Document()); // other NRT reader, since it is already marked closed! for (int i = 0; i < 2; i++) { shouldFail.set(true); expectThrows(FakeIOException.class, () -> { writer.getReader().close(); }); } writer.close(); dir.close(); }
33. TestIndexWriterOnDiskFull#testCorruptionAfterDiskFullDuringMerge()
Project: lucene-solr
File: TestIndexWriterOnDiskFull.java
File: TestIndexWriterOnDiskFull.java
// LUCENE-2593 public void testCorruptionAfterDiskFullDuringMerge() throws IOException { MockDirectoryWrapper dir = newMockDirectory(); //IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random)).setReaderPooling(true)); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())).setMergeScheduler(new SerialMergeScheduler()).setReaderPooling(true).setMergePolicy(newLogMergePolicy(2))); // we can do this because we add/delete/add (and dont merge to "nothing") w.setKeepFullyDeletedSegments(true); Document doc = new Document(); doc.add(newTextField("f", "doctor who", Field.Store.NO)); w.addDocument(doc); w.commit(); w.deleteDocuments(new Term("f", "who")); w.addDocument(doc); // disk fills up! FailTwiceDuringMerge ftdm = new FailTwiceDuringMerge(); ftdm.setDoFail(); dir.failOn(ftdm); expectThrows(IOException.class, () -> { w.commit(); }); assertTrue(ftdm.didFail1 || ftdm.didFail2); TestUtil.checkIndex(dir); ftdm.clearDoFail(); expectThrows(AlreadyClosedException.class, () -> { w.addDocument(doc); }); dir.close(); }
34. TestIndexWriterMaxDocs#testAddTooManyIndexesCodecReader()
Project: lucene-solr
File: TestIndexWriterMaxDocs.java
File: TestIndexWriterMaxDocs.java
/** * LUCENE-6299: Test if addindexes(CodecReader[]) prevents exceeding max docs. */ public void testAddTooManyIndexesCodecReader() throws Exception { // we cheat and add the same one over again... IW wants a write lock on each Directory dir = newDirectory(random(), NoLockFactory.INSTANCE); Document doc = new Document(); IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null)); for (int i = 0; i < 100000; i++) { w.addDocument(doc); } w.forceMerge(1); w.commit(); w.close(); // wrap this with disk full, so test fails faster and doesn't fill up real disks. MockDirectoryWrapper dir2 = newMockDirectory(); w = new IndexWriter(dir2, new IndexWriterConfig(null)); // don't confuse checkindex w.commit(); // 64KB dir2.setMaxSizeInBytes(dir2.sizeInBytes() + 65536); IndexReader r = DirectoryReader.open(dir); CodecReader segReader = (CodecReader) r.leaves().get(0).reader(); CodecReader readers[] = new CodecReader[1 + (IndexWriter.MAX_DOCS / 100000)]; for (int i = 0; i < readers.length; i++) { readers[i] = segReader; } try { w.addIndexes(readers); fail("didn't get expected exception"); } catch (IllegalArgumentException expected) { } catch (IOException fakeDiskFull) { final Exception e; if (fakeDiskFull.getMessage() != null && fakeDiskFull.getMessage().startsWith("fake disk full")) { e = new RuntimeException("test failed: IW checks aren't working and we are executing addIndexes"); e.addSuppressed(fakeDiskFull); } else { e = fakeDiskFull; } throw e; } r.close(); w.close(); dir.close(); dir2.close(); }
35. TestIndexWriterMaxDocs#testAddTooManyIndexesDir()
Project: lucene-solr
File: TestIndexWriterMaxDocs.java
File: TestIndexWriterMaxDocs.java
/** * LUCENE-6299: Test if addindexes(Dir[]) prevents exceeding max docs. */ public void testAddTooManyIndexesDir() throws Exception { // we cheat and add the same one over again... IW wants a write lock on each Directory dir = newDirectory(random(), NoLockFactory.INSTANCE); Document doc = new Document(); IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null)); for (int i = 0; i < 100000; i++) { w.addDocument(doc); } w.forceMerge(1); w.commit(); w.close(); // wrap this with disk full, so test fails faster and doesn't fill up real disks. MockDirectoryWrapper dir2 = newMockDirectory(); w = new IndexWriter(dir2, new IndexWriterConfig(null)); // don't confuse checkindex w.commit(); // 64KB dir2.setMaxSizeInBytes(dir2.sizeInBytes() + 65536); Directory dirs[] = new Directory[1 + (IndexWriter.MAX_DOCS / 100000)]; for (int i = 0; i < dirs.length; i++) { // bypass iw check for duplicate dirs dirs[i] = new FilterDirectory(dir) { }; } try { w.addIndexes(dirs); fail("didn't get expected exception"); } catch (IllegalArgumentException expected) { } catch (IOException fakeDiskFull) { final Exception e; if (fakeDiskFull.getMessage() != null && fakeDiskFull.getMessage().startsWith("fake disk full")) { e = new RuntimeException("test failed: IW checks aren't working and we are executing addIndexes"); e.addSuppressed(fakeDiskFull); } else { e = fakeDiskFull; } throw e; } w.close(); dir.close(); dir2.close(); }
36. TestIndexWriterExceptions#testMergeExceptionIsTragic()
Project: lucene-solr
File: TestIndexWriterExceptions.java
File: TestIndexWriterExceptions.java
public void testMergeExceptionIsTragic() throws Exception { MockDirectoryWrapper dir = newMockDirectory(); final AtomicBoolean didFail = new AtomicBoolean(); dir.failOn(new MockDirectoryWrapper.Failure() { @Override public void eval(MockDirectoryWrapper dir) throws IOException { if (random().nextInt(10) != 0) { return; } if (didFail.get()) { // Already failed return; } StackTraceElement[] trace = Thread.currentThread().getStackTrace(); for (int i = 0; i < trace.length; i++) { if ("merge".equals(trace[i].getMethodName())) { if (VERBOSE) { System.out.println("TEST: now fail; thread=" + Thread.currentThread().getName() + " exc:"); new Throwable().printStackTrace(System.out); } didFail.set(true); throw new FakeIOException(); } } } }); IndexWriterConfig iwc = newIndexWriterConfig(); MergePolicy mp = iwc.getMergePolicy(); if (mp instanceof TieredMergePolicy) { TieredMergePolicy tmp = (TieredMergePolicy) mp; if (tmp.getMaxMergedSegmentMB() < 0.2) { tmp.setMaxMergedSegmentMB(0.2); } } MergeScheduler ms = iwc.getMergeScheduler(); if (ms instanceof ConcurrentMergeScheduler) { ((ConcurrentMergeScheduler) ms).setSuppressExceptions(); } IndexWriter w = new IndexWriter(dir, iwc); while (true) { try { Document doc = new Document(); doc.add(newStringField("field", "string", Field.Store.NO)); w.addDocument(doc); if (random().nextInt(10) == 7) { // Flush new segment: DirectoryReader.open(w).close(); } } catch (AlreadyClosedException ace) { break; } catch (FakeIOException fioe) { break; } } assertNotNull(w.getTragicException()); assertFalse(w.isOpen()); assertTrue(didFail.get()); if (ms instanceof ConcurrentMergeScheduler) { // Sneaky: CMS's merge thread will be concurrently rolling back IW due // to the tragedy, with this main thread, so we have to wait here // to ensure the rollback has finished, else MDW still sees open files: ((ConcurrentMergeScheduler) ms).sync(); } dir.close(); }
37. TestIndexWriterExceptions#testExceptionDuringSync()
Project: lucene-solr
File: TestIndexWriterExceptions.java
File: TestIndexWriterExceptions.java
// LUCENE-1044: test exception during sync public void testExceptionDuringSync() throws IOException { MockDirectoryWrapper dir = newMockDirectory(); FailOnlyInSync failure = new FailOnlyInSync(); dir.failOn(failure); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())).setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()).setMergePolicy(newLogMergePolicy(5))); failure.setDoFail(); for (int i = 0; i < 23; i++) { addDoc(writer); if ((i - 1) % 2 == 0) { try { writer.commit(); } catch (IOException ioe) { } } } ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync(); assertTrue(failure.didFail); failure.clearDoFail(); writer.close(); IndexReader reader = DirectoryReader.open(dir); assertEquals(23, reader.numDocs()); reader.close(); dir.close(); }
38. TestIndexWriterExceptions#testDocumentsWriterAbort()
Project: lucene-solr
File: TestIndexWriterExceptions.java
File: TestIndexWriterExceptions.java
// make sure an aborting exception closes the writer: public void testDocumentsWriterAbort() throws IOException { MockDirectoryWrapper dir = newMockDirectory(); FailOnlyOnFlush failure = new FailOnlyOnFlush(); failure.setDoFail(); dir.failOn(failure); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())).setMaxBufferedDocs(2)); Document doc = new Document(); String contents = "aa bb cc dd ee ff gg hh ii jj kk"; doc.add(newTextField("content", contents, Field.Store.NO)); boolean hitError = false; writer.addDocument(doc); expectThrows(IOException.class, () -> { writer.addDocument(doc); }); // only one flush should fail: assertFalse(hitError); hitError = true; assertTrue(writer.deleter.isClosed()); assertTrue(writer.isClosed()); assertFalse(DirectoryReader.indexExists(dir)); dir.close(); }
39. TestIndexWriterDelete#testErrorInDocsWriterAdd()
Project: lucene-solr
File: TestIndexWriterDelete.java
File: TestIndexWriterDelete.java
// This test tests that the files created by the docs writer before // a segment is written are cleaned up if there's an i/o error public void testErrorInDocsWriterAdd() throws IOException { MockDirectoryWrapper.Failure failure = new MockDirectoryWrapper.Failure() { boolean failed = false; @Override public MockDirectoryWrapper.Failure reset() { failed = false; return this; } @Override public void eval(MockDirectoryWrapper dir) throws IOException { if (!failed) { failed = true; throw new IOException("fail in add doc"); } } }; // create a couple of files String[] keywords = { "1", "2" }; String[] unindexed = { "Netherlands", "Italy" }; String[] unstored = { "Amsterdam has lots of bridges", "Venice has lots of canals" }; String[] text = { "Amsterdam", "Venice" }; MockDirectoryWrapper dir = newMockDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))); modifier.commit(); dir.failOn(failure.reset()); FieldType custom1 = new FieldType(); custom1.setStored(true); for (int i = 0; i < keywords.length; i++) { Document doc = new Document(); doc.add(newStringField("id", keywords[i], Field.Store.YES)); doc.add(newField("country", unindexed[i], custom1)); doc.add(newTextField("contents", unstored[i], Field.Store.NO)); doc.add(newTextField("city", text[i], Field.Store.YES)); try { modifier.addDocument(doc); } catch (IOException io) { if (VERBOSE) { System.out.println("TEST: got expected exc:"); io.printStackTrace(System.out); } break; } } assertTrue(modifier.deleter.isClosed()); TestIndexWriter.assertNoUnreferencedFiles(dir, "docsWriter.abort() failed to delete unreferenced files"); dir.close(); }
40. TestIndexWriterDelete#testErrorAfterApplyDeletes()
Project: lucene-solr
File: TestIndexWriterDelete.java
File: TestIndexWriterDelete.java
// This test tests that buffered deletes are cleared when // an Exception is hit during flush. public void testErrorAfterApplyDeletes() throws IOException { MockDirectoryWrapper.Failure failure = new MockDirectoryWrapper.Failure() { boolean sawMaybe = false; boolean failed = false; Thread thread; @Override public MockDirectoryWrapper.Failure reset() { thread = Thread.currentThread(); sawMaybe = false; failed = false; return this; } @Override public void eval(MockDirectoryWrapper dir) throws IOException { if (Thread.currentThread() != thread) { // don't fail during merging return; } if (sawMaybe && !failed) { boolean seen = false; StackTraceElement[] trace = new Exception().getStackTrace(); for (int i = 0; i < trace.length; i++) { if ("applyDeletesAndUpdates".equals(trace[i].getMethodName()) || "slowFileExists".equals(trace[i].getMethodName())) { seen = true; break; } } if (!seen) { // Only fail once we are no longer in applyDeletes failed = true; if (VERBOSE) { System.out.println("TEST: mock failure: now fail"); new Throwable().printStackTrace(System.out); } throw new RuntimeException("fail after applyDeletes"); } } if (!failed) { StackTraceElement[] trace = new Exception().getStackTrace(); for (int i = 0; i < trace.length; i++) { if ("applyDeletesAndUpdates".equals(trace[i].getMethodName())) { if (VERBOSE) { System.out.println("TEST: mock failure: saw applyDeletes"); new Throwable().printStackTrace(System.out); } sawMaybe = true; break; } } } } }; // create a couple of files String[] keywords = { "1", "2" }; String[] unindexed = { "Netherlands", "Italy" }; String[] unstored = { "Amsterdam has lots of bridges", "Venice has lots of canals" }; String[] text = { "Amsterdam", "Venice" }; MockDirectoryWrapper dir = newMockDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).setMaxBufferedDeleteTerms(2).setReaderPooling(false).setMergePolicy(newLogMergePolicy())); MergePolicy lmp = modifier.getConfig().getMergePolicy(); lmp.setNoCFSRatio(1.0); dir.failOn(failure.reset()); FieldType custom1 = new FieldType(); custom1.setStored(true); for (int i = 0; i < keywords.length; i++) { Document doc = new Document(); doc.add(newStringField("id", keywords[i], Field.Store.YES)); doc.add(newField("country", unindexed[i], custom1)); doc.add(newTextField("contents", unstored[i], Field.Store.NO)); doc.add(newTextField("city", text[i], Field.Store.YES)); modifier.addDocument(doc); } if (VERBOSE) { System.out.println("TEST: now full merge"); } modifier.forceMerge(1); if (VERBOSE) { System.out.println("TEST: now commit"); } modifier.commit(); // one of the two files hits Term term = new Term("city", "Amsterdam"); int hitCount = getHitCount(dir, term); assertEquals(1, hitCount); if (VERBOSE) { System.out.println("TEST: delete term=" + term); } modifier.deleteDocuments(term); if (VERBOSE) { System.out.println("TEST: add empty doc"); } Document doc = new Document(); modifier.addDocument(doc); if (VERBOSE) { System.out.println("TEST: now commit for failure"); } RuntimeException expected = expectThrows(RuntimeException.class, () -> { modifier.commit(); }); if (VERBOSE) { System.out.println("TEST: hit exc:"); expected.printStackTrace(System.out); } // The commit above failed, so we need to retry it (which will // succeed, because the failure is a one-shot) boolean writerClosed; try { modifier.commit(); writerClosed = false; } catch (IllegalStateException ise) { writerClosed = true; } if (writerClosed == false) { hitCount = getHitCount(dir, term); // Make sure the delete was successfully flushed: assertEquals(0, hitCount); modifier.close(); } dir.close(); }
41. TestDirectoryReaderReopen#testOverDecRefDuringReopen()
Project: lucene-solr
File: TestDirectoryReaderReopen.java
File: TestDirectoryReaderReopen.java
public void testOverDecRefDuringReopen() throws Exception { MockDirectoryWrapper dir = newMockDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random())); iwc.setCodec(TestUtil.getDefaultCodec()); IndexWriter w = new IndexWriter(dir, iwc); Document doc = new Document(); doc.add(newStringField("id", "id", Field.Store.NO)); w.addDocument(doc); doc = new Document(); doc.add(newStringField("id", "id2", Field.Store.NO)); w.addDocument(doc); w.commit(); // Open reader w/ one segment w/ 2 docs: DirectoryReader r = DirectoryReader.open(dir); // Delete 1 doc from the segment: //System.out.println("TEST: now delete"); w.deleteDocuments(new Term("id", "id")); //System.out.println("TEST: now commit"); w.commit(); // Fail when reopen tries to open the live docs file: dir.failOn(new MockDirectoryWrapper.Failure() { boolean failed; @Override public void eval(MockDirectoryWrapper dir) throws IOException { if (failed) { return; } //System.out.println("failOn: "); //new Throwable().printStackTrace(System.out); StackTraceElement[] trace = new Exception().getStackTrace(); for (int i = 0; i < trace.length; i++) { if ("readLiveDocs".equals(trace[i].getMethodName())) { if (VERBOSE) { System.out.println("TEST: now fail; exc:"); new Throwable().printStackTrace(System.out); } failed = true; throw new FakeIOException(); } } } }); // Now reopen: //System.out.println("TEST: now reopen"); expectThrows(FakeIOException.class, () -> { DirectoryReader.openIfChanged(r); }); IndexSearcher s = newSearcher(r); assertEquals(1, s.search(new TermQuery(new Term("id", "id")), 1).totalHits); r.close(); w.close(); dir.close(); }
42. TestCrash#testCrashAfterReopen()
Project: lucene-solr
File: TestCrash.java
File: TestCrash.java
public void testCrashAfterReopen() throws IOException { IndexWriter writer = initIndex(random(), false); MockDirectoryWrapper dir = (MockDirectoryWrapper) writer.getDirectory(); // We create leftover files because merging could be // running when we crash: dir.setAssertNoUnrefencedFilesOnClose(false); writer.close(); writer = initIndex(random(), dir, false, true); assertEquals(314, writer.maxDoc()); crash(writer); /* System.out.println("\n\nTEST: open reader"); String[] l = dir.list(); Arrays.sort(l); for(int i=0;i<l.length;i++) System.out.println("file " + i + " = " + l[i] + " " + dir.fileLength(l[i]) + " bytes"); */ IndexReader reader = DirectoryReader.open(dir); assertTrue(reader.numDocs() >= 157); reader.close(); // Make a new dir, copying from the crashed dir, and // open IW on it, to confirm IW "recovers" after a // crash: Directory dir2 = newDirectory(dir); dir.close(); new RandomIndexWriter(random(), dir2).close(); dir2.close(); }
43. TestCrash#testWriterAfterCrash()
Project: lucene-solr
File: TestCrash.java
File: TestCrash.java
public void testWriterAfterCrash() throws IOException { // before any documents were added. if (VERBOSE) { System.out.println("TEST: initIndex"); } IndexWriter writer = initIndex(random(), true); if (VERBOSE) { System.out.println("TEST: done initIndex"); } MockDirectoryWrapper dir = (MockDirectoryWrapper) writer.getDirectory(); // We create leftover files because merging could be // running / store files could be open when we crash: dir.setAssertNoUnrefencedFilesOnClose(false); if (VERBOSE) { System.out.println("TEST: now crash"); } crash(writer); writer = initIndex(random(), dir, false, true); writer.close(); IndexReader reader = DirectoryReader.open(dir); assertTrue(reader.numDocs() < 314); reader.close(); // Make a new dir, copying from the crashed dir, and // open IW on it, to confirm IW "recovers" after a // crash: Directory dir2 = newDirectory(dir); dir.close(); new RandomIndexWriter(random(), dir2).close(); dir2.close(); }
44. TestCrash#testCrashWhileIndexing()
Project: lucene-solr
File: TestCrash.java
File: TestCrash.java
public void testCrashWhileIndexing() throws IOException { // This test relies on being able to open a reader before any commit // happened, so we must create an initial commit just to allow that, but // before any documents were added. IndexWriter writer = initIndex(random(), true); MockDirectoryWrapper dir = (MockDirectoryWrapper) writer.getDirectory(); // We create leftover files because merging could be // running when we crash: dir.setAssertNoUnrefencedFilesOnClose(false); crash(writer); IndexReader reader = DirectoryReader.open(dir); assertTrue(reader.numDocs() < 157); reader.close(); // Make a new dir, copying from the crashed dir, and // open IW on it, to confirm IW "recovers" after a // crash: Directory dir2 = newDirectory(dir); dir.close(); new RandomIndexWriter(random(), dir2).close(); dir2.close(); }
45. TestCrash#crash()
Project: lucene-solr
File: TestCrash.java
File: TestCrash.java
private void crash(final IndexWriter writer) throws IOException { final MockDirectoryWrapper dir = (MockDirectoryWrapper) writer.getDirectory(); ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler(); cms.sync(); dir.crash(); cms.sync(); dir.clearCrash(); }
46. TestConcurrentMergeScheduler#testFlushExceptions()
Project: lucene-solr
File: TestConcurrentMergeScheduler.java
File: TestConcurrentMergeScheduler.java
// Make sure running BG merges still work fine even when // we are hitting exceptions during flushing. public void testFlushExceptions() throws IOException { MockDirectoryWrapper directory = newMockDirectory(); FailOnlyOnFlush failure = new FailOnlyOnFlush(); directory.failOn(failure); IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random())).setMaxBufferedDocs(2); if (iwc.getMergeScheduler() instanceof ConcurrentMergeScheduler) { iwc.setMergeScheduler(new SuppressingConcurrentMergeScheduler() { @Override protected boolean isOK(Throwable th) { return th instanceof AlreadyClosedException || (th instanceof IllegalStateException && th.getMessage().contains("this writer hit an unrecoverable error")); } }); } IndexWriter writer = new IndexWriter(directory, iwc); Document doc = new Document(); Field idField = newStringField("id", "", Field.Store.YES); doc.add(idField); outer: for (int i = 0; i < 10; i++) { if (VERBOSE) { System.out.println("TEST: iter=" + i); } for (int j = 0; j < 20; j++) { idField.setStringValue(Integer.toString(i * 20 + j)); writer.addDocument(doc); } // flush, and we don't hit the exception while (true) { writer.addDocument(doc); failure.setDoFail(); try { writer.flush(true, true); if (failure.hitExc) { fail("failed to hit IOException"); } } catch (IOException ioe) { if (VERBOSE) { ioe.printStackTrace(System.out); } failure.clearDoFail(); assertTrue(writer.isClosed()); assertTrue(writer.deleter.isClosed()); break outer; } } } assertFalse(DirectoryReader.indexExists(directory)); directory.close(); }
47. Test4GBStoredFields#test()
Project: lucene-solr
File: Test4GBStoredFields.java
File: Test4GBStoredFields.java
@Nightly public void test() throws Exception { MockDirectoryWrapper dir = new MockDirectoryWrapper(random(), new MMapDirectory(createTempDir("4GBStoredFields"))); dir.setThrottling(MockDirectoryWrapper.Throttling.NEVER); IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random())); iwc.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH); iwc.setRAMBufferSizeMB(256.0); iwc.setMergeScheduler(new ConcurrentMergeScheduler()); iwc.setMergePolicy(newLogMergePolicy(false, 10)); iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE); // maybe we should factor out crazy cases to ExtremeCompressing? then annotations can handle this stuff... if (random().nextBoolean()) { iwc.setCodec(CompressingCodec.reasonableInstance(random())); } IndexWriter w = new IndexWriter(dir, iwc); MergePolicy mp = w.getConfig().getMergePolicy(); if (mp instanceof LogByteSizeMergePolicy) { // 1 petabyte: ((LogByteSizeMergePolicy) mp).setMaxMergeMB(1024 * 1024 * 1024); } final Document doc = new Document(); final FieldType ft = new FieldType(); ft.setStored(true); ft.freeze(); final int valueLength = RandomInts.randomIntBetween(random(), 1 << 13, 1 << 20); final byte[] value = new byte[valueLength]; for (int i = 0; i < valueLength; ++i) { // random so that even compressing codecs can't compress it value[i] = (byte) random().nextInt(256); } final Field f = new Field("fld", value, ft); doc.add(f); final int numDocs = (int) ((1L << 32) / valueLength + 100); for (int i = 0; i < numDocs; ++i) { w.addDocument(doc); if (VERBOSE && i % (numDocs / 10) == 0) { System.out.println(i + " of " + numDocs + "..."); } } w.forceMerge(1); w.close(); if (VERBOSE) { boolean found = false; for (String file : dir.listAll()) { if (file.endsWith(".fdt")) { final long fileLength = dir.fileLength(file); if (fileLength >= 1L << 32) { found = true; } System.out.println("File length of " + file + " : " + fileLength); } } if (!found) { System.out.println("No .fdt file larger than 4GB, test bug?"); } } DirectoryReader rd = DirectoryReader.open(dir); Document sd = rd.document(numDocs - 1); assertNotNull(sd); assertEquals(1, sd.getFields().size()); BytesRef valueRef = sd.getBinaryValue("fld"); assertNotNull(valueRef); assertEquals(new BytesRef(value), valueRef); rd.close(); dir.close(); }
48. LuceneTests#testPruneUnreferencedFiles()
Project: elasticsearch
File: LuceneTests.java
File: LuceneTests.java
public void testPruneUnreferencedFiles() throws IOException { MockDirectoryWrapper dir = newMockDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(); iwc.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE); iwc.setMergePolicy(NoMergePolicy.INSTANCE); iwc.setMaxBufferedDocs(2); IndexWriter writer = new IndexWriter(dir, iwc); Document doc = new Document(); doc.add(new TextField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); writer.addDocument(doc); writer.commit(); doc = new Document(); doc.add(new TextField("id", "2", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); writer.addDocument(doc); doc = new Document(); doc.add(new TextField("id", "3", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); writer.addDocument(doc); writer.commit(); SegmentInfos segmentCommitInfos = Lucene.readSegmentInfos(dir); doc = new Document(); doc.add(new TextField("id", "4", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); writer.addDocument(doc); writer.deleteDocuments(new Term("id", "2")); writer.commit(); DirectoryReader open = DirectoryReader.open(writer); assertEquals(3, open.numDocs()); assertEquals(1, open.numDeletedDocs()); assertEquals(4, open.maxDoc()); open.close(); writer.close(); SegmentInfos si = Lucene.pruneUnreferencedFiles(segmentCommitInfos.getSegmentsFileName(), dir); assertEquals(si.getSegmentsFileName(), segmentCommitInfos.getSegmentsFileName()); open = DirectoryReader.open(dir); assertEquals(3, open.numDocs()); assertEquals(0, open.numDeletedDocs()); assertEquals(3, open.maxDoc()); IndexSearcher s = new IndexSearcher(open); assertEquals(s.search(new TermQuery(new Term("id", "1")), 1).totalHits, 1); assertEquals(s.search(new TermQuery(new Term("id", "2")), 1).totalHits, 1); assertEquals(s.search(new TermQuery(new Term("id", "3")), 1).totalHits, 1); assertEquals(s.search(new TermQuery(new Term("id", "4")), 1).totalHits, 0); for (String file : dir.listAll()) { assertFalse("unexpected file: " + file, file.equals("segments_3") || file.startsWith("_2")); } open.close(); dir.close(); }
49. TestIndexFileDeleter#testGenerationInflation()
Project: lucene-solr
File: TestIndexFileDeleter.java
File: TestIndexFileDeleter.java
public void testGenerationInflation() throws IOException { MockDirectoryWrapper dir = newMockDirectory(); // initial commit IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(null)); iw.addDocument(new Document()); iw.commit(); iw.close(); // no deletes: start at 1 SegmentInfos sis = SegmentInfos.readLatestCommit(dir); assertEquals(1, sis.info(0).getNextDelGen()); // no inflation inflateGens(sis, Arrays.asList(dir.listAll()), InfoStream.getDefault()); assertEquals(1, sis.info(0).getNextDelGen()); // add trash per-segment deletes file dir.createOutput(IndexFileNames.fileNameFromGeneration("_0", "del", 2), IOContext.DEFAULT).close(); // ensure inflation inflateGens(sis, Arrays.asList(dir.listAll()), InfoStream.getDefault()); assertEquals(3, sis.info(0).getNextDelGen()); dir.close(); }
50. TestIndexFileDeleter#testSegmentNameInflation()
Project: lucene-solr
File: TestIndexFileDeleter.java
File: TestIndexFileDeleter.java
public void testSegmentNameInflation() throws IOException { MockDirectoryWrapper dir = newMockDirectory(); // empty commit new IndexWriter(dir, new IndexWriterConfig(null)).close(); SegmentInfos sis = SegmentInfos.readLatestCommit(dir); assertEquals(0, sis.counter); // no inflation inflateGens(sis, Arrays.asList(dir.listAll()), InfoStream.getDefault()); assertEquals(0, sis.counter); // add trash per-segment file dir.createOutput(IndexFileNames.segmentFileName("_0", "", "foo"), IOContext.DEFAULT).close(); // ensure inflation inflateGens(sis, Arrays.asList(dir.listAll()), InfoStream.getDefault()); assertEquals(1, sis.counter); // add trash per-segment file dir.createOutput(IndexFileNames.segmentFileName("_3", "", "foo"), IOContext.DEFAULT).close(); inflateGens(sis, Arrays.asList(dir.listAll()), InfoStream.getDefault()); assertEquals(4, sis.counter); // ensure we write _4 segment next IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(null)); iw.addDocument(new Document()); iw.commit(); iw.close(); sis = SegmentInfos.readLatestCommit(dir); assertEquals("_4", sis.info(0).info.name); assertEquals(5, sis.counter); dir.close(); }
51. TestForTooMuchCloning#test()
Project: lucene-solr
File: TestForTooMuchCloning.java
File: TestForTooMuchCloning.java
// Make sure we don't clone IndexInputs too frequently // during merging and searching: public void test() throws Exception { final MockDirectoryWrapper dir = newMockDirectory(); final TieredMergePolicy tmp = new TieredMergePolicy(); tmp.setMaxMergeAtOnce(2); final RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig(new MockAnalyzer(random())).setMaxBufferedDocs(2).setMergePolicy(tmp)); final int numDocs = 20; for (int docs = 0; docs < numDocs; docs++) { StringBuilder sb = new StringBuilder(); for (int terms = 0; terms < 100; terms++) { sb.append(TestUtil.randomRealisticUnicodeString(random())); sb.append(' '); } final Document doc = new Document(); doc.add(new TextField("field", sb.toString(), Field.Store.NO)); w.addDocument(doc); } final IndexReader r = w.getReader(); w.close(); //System.out.println("merge clone count=" + cloneCount); assertTrue("too many calls to IndexInput.clone during merging: " + dir.getInputCloneCount(), dir.getInputCloneCount() < 500); final IndexSearcher s = newSearcher(r); // important: set this after newSearcher, it might have run checkindex final int cloneCount = dir.getInputCloneCount(); // dir.setVerboseClone(true); // MTQ that matches all terms so the AUTO_REWRITE should // cutover to filter rewrite and reuse a single DocsEnum // across all terms; final TopDocs hits = s.search(new TermRangeQuery("field", new BytesRef(), new BytesRef("?"), true, true), 10); assertTrue(hits.totalHits > 0); final int queryCloneCount = dir.getInputCloneCount() - cloneCount; //System.out.println("query clone count=" + queryCloneCount); assertTrue("too many calls to IndexInput.clone during TermRangeQuery: " + queryCloneCount, queryCloneCount < 50); r.close(); dir.close(); }
52. TestConcurrentMergeScheduler#testNoStallMergeThreads()
Project: lucene-solr
File: TestConcurrentMergeScheduler.java
File: TestConcurrentMergeScheduler.java
// LUCENE-6197 public void testNoStallMergeThreads() throws Exception { MockDirectoryWrapper dir = newMockDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random())); iwc.setMergePolicy(NoMergePolicy.INSTANCE); iwc.setMaxBufferedDocs(2); IndexWriter w = new IndexWriter(dir, iwc); for (int i = 0; i < 1000; i++) { Document doc = new Document(); doc.add(newStringField("field", "" + i, Field.Store.YES)); w.addDocument(doc); } w.close(); iwc = newIndexWriterConfig(new MockAnalyzer(random())); AtomicBoolean failed = new AtomicBoolean(); ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler() { @Override protected void doStall() { if (Thread.currentThread().getName().startsWith("Lucene Merge Thread")) { failed.set(true); } super.doStall(); } }; cms.setMaxMergesAndThreads(2, 1); iwc.setMergeScheduler(cms); iwc.setMaxBufferedDocs(2); w = new IndexWriter(dir, iwc); w.forceMerge(1); w.close(); dir.close(); assertFalse(failed.get()); }
53. TestAddIndexes#testNonCFSLeftovers()
Project: lucene-solr
File: TestAddIndexes.java
File: TestAddIndexes.java
// LUCENE-2790: tests that the non CFS files were deleted by addIndexes public void testNonCFSLeftovers() throws Exception { Directory[] dirs = new Directory[2]; for (int i = 0; i < dirs.length; i++) { dirs[i] = new RAMDirectory(); IndexWriter w = new IndexWriter(dirs[i], new IndexWriterConfig(new MockAnalyzer(random()))); Document d = new Document(); FieldType customType = new FieldType(TextField.TYPE_STORED); customType.setStoreTermVectors(true); d.add(new Field("c", "v", customType)); w.addDocument(d); w.close(); } DirectoryReader[] readers = new DirectoryReader[] { DirectoryReader.open(dirs[0]), DirectoryReader.open(dirs[1]) }; MockDirectoryWrapper dir = new MockDirectoryWrapper(random(), new RAMDirectory()); IndexWriterConfig conf = new IndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy(true)); MergePolicy lmp = conf.getMergePolicy(); // Force creation of CFS: lmp.setNoCFSRatio(1.0); lmp.setMaxCFSSegmentSizeMB(Double.POSITIVE_INFINITY); IndexWriter w3 = new IndexWriter(dir, conf); TestUtil.addIndexesSlowly(w3, readers); w3.close(); // we should now see segments_X, // _Y.cfs,_Y.cfe, _Z.si SegmentInfos sis = SegmentInfos.readLatestCommit(dir); assertEquals("Only one compound segment should exist", 1, sis.size()); assertTrue(sis.info(0).info.getUseCompoundFile()); dir.close(); }
54. LuceneTests#testNumDocs()
Project: elasticsearch
File: LuceneTests.java
File: LuceneTests.java
public void testNumDocs() throws IOException { MockDirectoryWrapper dir = newMockDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(); IndexWriter writer = new IndexWriter(dir, iwc); Document doc = new Document(); doc.add(new TextField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); writer.addDocument(doc); writer.commit(); SegmentInfos segmentCommitInfos = Lucene.readSegmentInfos(dir); assertEquals(1, Lucene.getNumDocs(segmentCommitInfos)); doc = new Document(); doc.add(new TextField("id", "2", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); writer.addDocument(doc); doc = new Document(); doc.add(new TextField("id", "3", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); writer.addDocument(doc); segmentCommitInfos = Lucene.readSegmentInfos(dir); assertEquals(1, Lucene.getNumDocs(segmentCommitInfos)); writer.commit(); segmentCommitInfos = Lucene.readSegmentInfos(dir); assertEquals(3, Lucene.getNumDocs(segmentCommitInfos)); writer.deleteDocuments(new Term("id", "2")); writer.commit(); segmentCommitInfos = Lucene.readSegmentInfos(dir); assertEquals(2, Lucene.getNumDocs(segmentCommitInfos)); int numDocsToIndex = randomIntBetween(10, 50); List<Term> deleteTerms = new ArrayList<>(); for (int i = 0; i < numDocsToIndex; i++) { doc = new Document(); doc.add(new TextField("id", "extra_" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); deleteTerms.add(new Term("id", "extra_" + i)); writer.addDocument(doc); } int numDocsToDelete = randomIntBetween(0, numDocsToIndex); Collections.shuffle(deleteTerms, random()); for (int i = 0; i < numDocsToDelete; i++) { Term remove = deleteTerms.remove(0); writer.deleteDocuments(remove); } writer.commit(); segmentCommitInfos = Lucene.readSegmentInfos(dir); assertEquals(2 + deleteTerms.size(), Lucene.getNumDocs(segmentCommitInfos)); writer.close(); dir.close(); }
55. LuceneTests#testFiles()
Project: elasticsearch
File: LuceneTests.java
File: LuceneTests.java
public void testFiles() throws IOException { MockDirectoryWrapper dir = newMockDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random())); iwc.setMergePolicy(NoMergePolicy.INSTANCE); iwc.setMaxBufferedDocs(2); iwc.setUseCompoundFile(true); IndexWriter writer = new IndexWriter(dir, iwc); Document doc = new Document(); doc.add(new TextField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); writer.addDocument(doc); writer.commit(); Set<String> files = new HashSet<>(); for (String f : Lucene.files(Lucene.readSegmentInfos(dir))) { files.add(f); } final boolean simpleTextCFS = files.contains("_0.scf"); assertTrue(files.toString(), files.contains("segments_1")); if (simpleTextCFS) { assertFalse(files.toString(), files.contains("_0.cfs")); assertFalse(files.toString(), files.contains("_0.cfe")); } else { assertTrue(files.toString(), files.contains("_0.cfs")); assertTrue(files.toString(), files.contains("_0.cfe")); } assertTrue(files.toString(), files.contains("_0.si")); doc = new Document(); doc.add(new TextField("id", "2", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); writer.addDocument(doc); doc = new Document(); doc.add(new TextField("id", "3", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); writer.addDocument(doc); writer.commit(); files.clear(); for (String f : Lucene.files(Lucene.readSegmentInfos(dir))) { files.add(f); } assertFalse(files.toString(), files.contains("segments_1")); assertTrue(files.toString(), files.contains("segments_2")); if (simpleTextCFS) { assertFalse(files.toString(), files.contains("_0.cfs")); assertFalse(files.toString(), files.contains("_0.cfe")); } else { assertTrue(files.toString(), files.contains("_0.cfs")); assertTrue(files.toString(), files.contains("_0.cfe")); } assertTrue(files.toString(), files.contains("_0.si")); if (simpleTextCFS) { assertFalse(files.toString(), files.contains("_1.cfs")); assertFalse(files.toString(), files.contains("_1.cfe")); } else { assertTrue(files.toString(), files.contains("_1.cfs")); assertTrue(files.toString(), files.contains("_1.cfe")); } assertTrue(files.toString(), files.contains("_1.si")); writer.close(); dir.close(); }
56. LuceneTests#testCleanIndex()
Project: elasticsearch
File: LuceneTests.java
File: LuceneTests.java
public void testCleanIndex() throws IOException { MockDirectoryWrapper dir = newMockDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(); iwc.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE); iwc.setMergePolicy(NoMergePolicy.INSTANCE); iwc.setMaxBufferedDocs(2); IndexWriter writer = new IndexWriter(dir, iwc); Document doc = new Document(); doc.add(new TextField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); writer.addDocument(doc); writer.commit(); doc = new Document(); doc.add(new TextField("id", "2", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); writer.addDocument(doc); doc = new Document(); doc.add(new TextField("id", "3", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); writer.addDocument(doc); writer.commit(); doc = new Document(); doc.add(new TextField("id", "4", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); writer.addDocument(doc); writer.deleteDocuments(new Term("id", "2")); writer.commit(); try (DirectoryReader open = DirectoryReader.open(writer)) { assertEquals(3, open.numDocs()); assertEquals(1, open.numDeletedDocs()); assertEquals(4, open.maxDoc()); } writer.close(); if (random().nextBoolean()) { for (String file : dir.listAll()) { if (file.startsWith("_1")) { // delete a random file dir.deleteFile(file); break; } } } Lucene.cleanLuceneIndex(dir); if (dir.listAll().length > 0) { for (String file : dir.listAll()) { if (file.startsWith("extra") == false) { assertEquals(file, "write.lock"); } } } dir.close(); }
57. LuceneTests#testWaitForIndex()
Project: elasticsearch
File: LuceneTests.java
File: LuceneTests.java
public void testWaitForIndex() throws Exception { final MockDirectoryWrapper dir = newMockDirectory(); final AtomicBoolean succeeded = new AtomicBoolean(false); final CountDownLatch latch = new CountDownLatch(1); // Create a shadow Engine, which will freak out because there is no // index yet Thread t = new Thread(new Runnable() { @Override public void run() { try { latch.await(); if (Lucene.waitForIndex(dir, 5000)) { succeeded.set(true); } else { fail("index should have eventually existed!"); } } catch (InterruptedException e) { } catch (Exception e) { fail("should have been able to create the engine! " + e.getMessage()); } } }); t.start(); // count down latch // now shadow engine should try to be created latch.countDown(); IndexWriterConfig iwc = newIndexWriterConfig(); iwc.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE); iwc.setMergePolicy(NoMergePolicy.INSTANCE); iwc.setMaxBufferedDocs(2); IndexWriter writer = new IndexWriter(dir, iwc); Document doc = new Document(); doc.add(new TextField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); writer.addDocument(doc); writer.commit(); t.join(); writer.close(); dir.close(); assertTrue("index should have eventually existed", succeeded.get()); }
58. BaseCompoundFormatTestCase#testManySubFiles()
Project: lucene-solr
File: BaseCompoundFormatTestCase.java
File: BaseCompoundFormatTestCase.java
// Make sure we don't somehow use more than 1 descriptor // when reading a CFS with many subs: public void testManySubFiles() throws IOException { final MockDirectoryWrapper dir = newMockFSDirectory(createTempDir("CFSManySubFiles")); final int FILE_COUNT = atLeast(500); List<String> files = new ArrayList<>(); SegmentInfo si = newSegmentInfo(dir, "_123"); for (int fileIdx = 0; fileIdx < FILE_COUNT; fileIdx++) { String file = "_123." + fileIdx; files.add(file); try (IndexOutput out = dir.createOutput(file, newIOContext(random()))) { CodecUtil.writeIndexHeader(out, "Foo", 0, si.getId(), "suffix"); out.writeByte((byte) fileIdx); CodecUtil.writeFooter(out); } } assertEquals(0, dir.getFileHandleCount()); si.setFiles(files); si.getCodec().compoundFormat().write(dir, si, IOContext.DEFAULT); Directory cfs = si.getCodec().compoundFormat().getCompoundReader(dir, si, IOContext.DEFAULT); final IndexInput[] ins = new IndexInput[FILE_COUNT]; for (int fileIdx = 0; fileIdx < FILE_COUNT; fileIdx++) { ins[fileIdx] = cfs.openInput("_123." + fileIdx, newIOContext(random())); CodecUtil.checkIndexHeader(ins[fileIdx], "Foo", 0, 0, si.getId(), "suffix"); } assertEquals(1, dir.getFileHandleCount()); for (int fileIdx = 0; fileIdx < FILE_COUNT; fileIdx++) { assertEquals((byte) fileIdx, ins[fileIdx].readByte()); } assertEquals(1, dir.getFileHandleCount()); for (int fileIdx = 0; fileIdx < FILE_COUNT; fileIdx++) { ins[fileIdx].close(); } cfs.close(); dir.close(); }
59. TestSloppyPhraseQuery#checkPhraseQuery()
Project: lucene-solr
File: TestSloppyPhraseQuery.java
File: TestSloppyPhraseQuery.java
private float checkPhraseQuery(Document doc, PhraseQuery query, int slop, int expectedNumResults) throws Exception { PhraseQuery.Builder builder = new PhraseQuery.Builder(); Term[] terms = query.getTerms(); int[] positions = query.getPositions(); for (int i = 0; i < terms.length; ++i) { builder.add(terms[i], positions[i]); } builder.setSlop(slop); query = builder.build(); MockDirectoryWrapper ramDir = new MockDirectoryWrapper(random(), new RAMDirectory()); RandomIndexWriter writer = new RandomIndexWriter(random(), ramDir, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); writer.addDocument(doc); IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); MaxFreqCollector c = new MaxFreqCollector(); searcher.search(query, c); assertEquals("slop: " + slop + " query: " + query + " doc: " + doc + " Wrong number of hits", expectedNumResults, c.totalHits); //QueryUtils.check(query,searcher); writer.close(); reader.close(); ramDir.close(); // with these different tokens/distributions/lengths.. otherwise this test is very fragile. return c.max; }
60. StoreTests#testMarkCorruptedOnTruncatedSegmentsFile()
Project: elasticsearch
File: StoreTests.java
File: StoreTests.java
public void testMarkCorruptedOnTruncatedSegmentsFile() throws IOException { IndexWriterConfig iwc = newIndexWriterConfig(); final ShardId shardId = new ShardId("index", "_na_", 1); DirectoryService directoryService = new LuceneManagedDirectoryService(random()); Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId)); IndexWriter writer = new IndexWriter(store.directory(), iwc); int numDocs = 1 + random().nextInt(10); List<Document> docs = new ArrayList<>(); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); doc.add(new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random())))); docs.add(doc); } for (Document d : docs) { writer.addDocument(d); } writer.commit(); writer.close(); MockDirectoryWrapper leaf = DirectoryUtils.getLeaf(store.directory(), MockDirectoryWrapper.class); if (leaf != null) { // I do this on purpose leaf.setPreventDoubleWrite(false); } SegmentInfos segmentCommitInfos = store.readLastCommittedSegmentsInfo(); try (IndexOutput out = store.directory().createOutput(segmentCommitInfos.getSegmentsFileName(), IOContext.DEFAULT)) { // empty file } try { if (randomBoolean()) { store.getMetadata(); } else { store.readLastCommittedSegmentsInfo(); } fail("corrupted segments_N file"); } catch (CorruptIndexException ex) { } assertTrue(store.isMarkedCorrupted()); // we have to remove the index since it's corrupted and might fail the MocKDirWrapper checkindex call Lucene.cleanLuceneIndex(store.directory()); store.close(); }
61. InternalEngineTests#testRecoverFromForeignTranslog()
Project: elasticsearch
File: InternalEngineTests.java
File: InternalEngineTests.java
public void testRecoverFromForeignTranslog() throws IOException { final int numDocs = randomIntBetween(1, 10); for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime()); engine.index(firstIndexRequest); assertThat(firstIndexRequest.version(), equalTo(1L)); } engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10)); assertThat(topDocs.totalHits, equalTo(numDocs)); } final MockDirectoryWrapper directory = DirectoryUtils.getLeaf(store.directory(), MockDirectoryWrapper.class); if (directory != null) { // since we rollback the IW we are writing the same segment files again after starting IW but MDW prevents // this so we have to disable the check explicitly directory.setPreventDoubleWrite(false); } Translog.TranslogGeneration generation = engine.getTranslog().getGeneration(); engine.close(); Translog translog = new Translog(new TranslogConfig(shardId, createTempDir(), INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), null); translog.add(new Translog.Index("test", "SomeBogusId", "{}".getBytes(Charset.forName("UTF-8")))); assertEquals(generation.translogFileGeneration, translog.currentFileGeneration()); translog.close(); EngineConfig config = engine.config(); /* create a TranslogConfig that has been created with a different UUID */ TranslogConfig translogConfig = new TranslogConfig(shardId, translog.location(), config.getIndexSettings(), BigArrays.NON_RECYCLING_INSTANCE); EngineConfig brokenConfig = new EngineConfig(EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, shardId, threadPool, config.getIndexSettings(), null, store, createSnapshotDeletionPolicy(), newMergePolicy(), config.getAnalyzer(), config.getSimilarity(), new CodecService(null, logger), config.getEventListener(), config.getTranslogRecoveryPerformer(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), config.getRefreshListeners()); try { InternalEngine internalEngine = new InternalEngine(brokenConfig); fail("translog belongs to a different engine"); } catch (EngineCreationFailureException ex) { } // and recover again! engine = createEngine(store, primaryTranslogDir); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10)); assertThat(topDocs.totalHits, equalTo(numDocs)); } }
62. InternalEngineTests#testTranslogReplay()
Project: elasticsearch
File: InternalEngineTests.java
File: InternalEngineTests.java
public void testTranslogReplay() throws IOException { final int numDocs = randomIntBetween(1, 10); for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime()); engine.index(firstIndexRequest); assertThat(firstIndexRequest.version(), equalTo(1L)); } engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10)); assertThat(topDocs.totalHits, equalTo(numDocs)); } final MockDirectoryWrapper directory = DirectoryUtils.getLeaf(store.directory(), MockDirectoryWrapper.class); if (directory != null) { // since we rollback the IW we are writing the same segment files again after starting IW but MDW prevents // this so we have to disable the check explicitly directory.setPreventDoubleWrite(false); } TranslogHandler parser = (TranslogHandler) engine.config().getTranslogRecoveryPerformer(); parser.mappingUpdate = dynamicUpdate(); engine.close(); // we need to reuse the engine config unless the parser.mappingModified won't work engine = new InternalEngine(copy(engine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG)); engine.recoverFromTranslog(); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10)); assertThat(topDocs.totalHits, equalTo(numDocs)); } parser = (TranslogHandler) engine.config().getTranslogRecoveryPerformer(); assertEquals(numDocs, parser.recoveredOps.get()); if (parser.mappingUpdate != null) { assertEquals(1, parser.getRecoveredTypes().size()); assertTrue(parser.getRecoveredTypes().containsKey("test")); } else { assertEquals(0, parser.getRecoveredTypes().size()); } engine.close(); engine = createEngine(store, primaryTranslogDir); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10)); assertThat(topDocs.totalHits, equalTo(numDocs)); } parser = (TranslogHandler) engine.config().getTranslogRecoveryPerformer(); assertEquals(0, parser.recoveredOps.get()); final boolean flush = randomBoolean(); int randomId = randomIntBetween(numDocs + 1, numDocs + 10); String uuidValue = "test#" + Integer.toString(randomId); ParsedDocument doc = testParsedDocument(uuidValue, Integer.toString(randomId), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); Engine.Index firstIndexRequest = new Engine.Index(newUid(uuidValue), doc, 1, VersionType.EXTERNAL, PRIMARY, System.nanoTime()); engine.index(firstIndexRequest); assertThat(firstIndexRequest.version(), equalTo(1L)); if (flush) { engine.flush(); } doc = testParsedDocument(uuidValue, Integer.toString(randomId), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); Engine.Index idxRequest = new Engine.Index(newUid(uuidValue), doc, 2, VersionType.EXTERNAL, PRIMARY, System.nanoTime()); engine.index(idxRequest); engine.refresh("test"); assertThat(idxRequest.version(), equalTo(2L)); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), numDocs + 1); assertThat(topDocs.totalHits, equalTo(numDocs + 1)); } engine.close(); engine = createEngine(store, primaryTranslogDir); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), numDocs + 1); assertThat(topDocs.totalHits, equalTo(numDocs + 1)); } parser = (TranslogHandler) engine.config().getTranslogRecoveryPerformer(); assertEquals(flush ? 1 : 2, parser.recoveredOps.get()); engine.delete(new Engine.Delete("test", Integer.toString(randomId), newUid(uuidValue))); if (randomBoolean()) { engine.refresh("test"); } else { engine.close(); engine = createEngine(store, primaryTranslogDir); } try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), numDocs); assertThat(topDocs.totalHits, equalTo(numDocs)); } }
63. InternalEngineTests#testSkipTranslogReplay()
Project: elasticsearch
File: InternalEngineTests.java
File: InternalEngineTests.java
public void testSkipTranslogReplay() throws IOException { final int numDocs = randomIntBetween(1, 10); for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime()); engine.index(firstIndexRequest); assertThat(firstIndexRequest.version(), equalTo(1L)); } engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10)); assertThat(topDocs.totalHits, equalTo(numDocs)); } final MockDirectoryWrapper directory = DirectoryUtils.getLeaf(store.directory(), MockDirectoryWrapper.class); if (directory != null) { // since we rollback the IW we are writing the same segment files again after starting IW but MDW prevents // this so we have to disable the check explicitly directory.setPreventDoubleWrite(false); } engine.close(); engine = new InternalEngine(engine.config()); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10)); assertThat(topDocs.totalHits, equalTo(0)); } }
64. InternalEngineTests#testTranslogReplayWithFailure()
Project: elasticsearch
File: InternalEngineTests.java
File: InternalEngineTests.java
public void testTranslogReplayWithFailure() throws IOException { final int numDocs = randomIntBetween(1, 10); for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime()); engine.index(firstIndexRequest); assertThat(firstIndexRequest.version(), equalTo(1L)); } engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10)); assertThat(topDocs.totalHits, equalTo(numDocs)); } engine.close(); final MockDirectoryWrapper directory = DirectoryUtils.getLeaf(store.directory(), MockDirectoryWrapper.class); if (directory != null) { // since we rollback the IW we are writing the same segment files again after starting IW but MDW prevents // this so we have to disable the check explicitly directory.setPreventDoubleWrite(false); boolean started = false; final int numIters = randomIntBetween(10, 20); for (int i = 0; i < numIters; i++) { directory.setRandomIOExceptionRateOnOpen(randomDouble()); directory.setRandomIOExceptionRate(randomDouble()); directory.setFailOnOpenInput(randomBoolean()); directory.setAllowRandomFileNotFoundException(randomBoolean()); try { engine = createEngine(store, primaryTranslogDir); started = true; break; } catch (EngineExceptionIOException | e) { } } directory.setRandomIOExceptionRateOnOpen(0.0); directory.setRandomIOExceptionRate(0.0); directory.setFailOnOpenInput(false); directory.setAllowRandomFileNotFoundException(false); if (started == false) { engine = createEngine(store, primaryTranslogDir); } } else { // no mock directory, no fun. engine = createEngine(store, primaryTranslogDir); } try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10)); assertThat(topDocs.totalHits, equalTo(numDocs)); } }
65. TestBoolean2#beforeClass()
Project: lucene-solr
File: TestBoolean2.java
File: TestBoolean2.java
@BeforeClass public static void beforeClass() throws Exception { // in some runs, test immediate adjacency of matches - in others, force a full bucket gap betwen docs NUM_FILLER_DOCS = random().nextBoolean() ? 0 : BooleanScorer.SIZE; PRE_FILLER_DOCS = TestUtil.nextInt(random(), 0, (NUM_FILLER_DOCS / 2)); if (NUM_FILLER_DOCS * PRE_FILLER_DOCS > 100000) { directory = newFSDirectory(createTempDir()); } else { directory = newDirectory(); } RandomIndexWriter writer = new RandomIndexWriter(random(), directory, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); Document doc = new Document(); for (int filler = 0; filler < PRE_FILLER_DOCS; filler++) { writer.addDocument(doc); } for (int i = 0; i < docFields.length; i++) { doc.add(newTextField(field, docFields[i], Field.Store.NO)); writer.addDocument(doc); doc = new Document(); for (int filler = 0; filler < NUM_FILLER_DOCS; filler++) { writer.addDocument(doc); } } writer.close(); littleReader = DirectoryReader.open(directory); searcher = newSearcher(littleReader); // this is intentionally using the baseline sim, because it compares against bigSearcher (which uses a random one) searcher.setSimilarity(new ClassicSimilarity()); // make a copy of our index using a single segment if (NUM_FILLER_DOCS * PRE_FILLER_DOCS > 100000) { singleSegmentDirectory = newFSDirectory(createTempDir()); } else { singleSegmentDirectory = newDirectory(); } // TODO: this test does not need to be doing this crazy stuff. please improve it! for (String fileName : directory.listAll()) { if (fileName.startsWith("extra")) { continue; } singleSegmentDirectory.copyFrom(directory, fileName, fileName, IOContext.DEFAULT); singleSegmentDirectory.sync(Collections.singleton(fileName)); } IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random())); // we need docID order to be preserved: iwc.setMergePolicy(newLogMergePolicy()); try (IndexWriter w = new IndexWriter(singleSegmentDirectory, iwc)) { w.forceMerge(1, true); } singleSegmentReader = DirectoryReader.open(singleSegmentDirectory); singleSegmentSearcher = newSearcher(singleSegmentReader); singleSegmentSearcher.setSimilarity(searcher.getSimilarity(true)); // Make big index dir2 = new MockDirectoryWrapper(random(), TestUtil.ramCopyOf(directory)); // First multiply small test index: mulFactor = 1; int docCount = 0; if (VERBOSE) { System.out.println("\nTEST: now copy index..."); } do { if (VERBOSE) { System.out.println("\nTEST: cycle..."); } final Directory copy = new MockDirectoryWrapper(random(), TestUtil.ramCopyOf(dir2)); RandomIndexWriter w = new RandomIndexWriter(random(), dir2); w.addIndexes(copy); docCount = w.maxDoc(); w.close(); mulFactor *= 2; } while (docCount < 3000 * NUM_FILLER_DOCS); RandomIndexWriter w = new RandomIndexWriter(random(), dir2, newIndexWriterConfig(new MockAnalyzer(random())).setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000))); doc = new Document(); doc.add(newTextField("field2", "xxx", Field.Store.NO)); for (int i = 0; i < NUM_EXTRA_DOCS / 2; i++) { w.addDocument(doc); } doc = new Document(); doc.add(newTextField("field2", "big bad bug", Field.Store.NO)); for (int i = 0; i < NUM_EXTRA_DOCS / 2; i++) { w.addDocument(doc); } reader = w.getReader(); bigSearcher = newSearcher(reader); w.close(); }