Here are the examples of the java api class org.apache.lucene.store.IOContext taken from open source projects.
1. BlockDirectoryTest#ensureCacheConfigurable()
Project: lucene-solr
File: BlockDirectoryTest.java
File: BlockDirectoryTest.java
/** * Verify the configuration options for the block cache are handled * appropriately. */ @Test public void ensureCacheConfigurable() throws Exception { IOContext mergeContext = new IOContext(new MergeInfo(1, 1, false, 1)); BlockDirectory d = directory; assertTrue(d.useReadCache("", IOContext.DEFAULT)); assertTrue(d.useWriteCache("", IOContext.DEFAULT)); assertFalse(d.useWriteCache("", mergeContext)); d = new BlockDirectory("test", directory, mapperCache, null, true, false); assertTrue(d.useReadCache("", IOContext.DEFAULT)); assertFalse(d.useWriteCache("", IOContext.DEFAULT)); assertFalse(d.useWriteCache("", mergeContext)); d = new BlockDirectory("test", directory, mapperCache, null, false, true); assertFalse(d.useReadCache("", IOContext.DEFAULT)); assertTrue(d.useWriteCache("", IOContext.DEFAULT)); assertFalse(d.useWriteCache("", mergeContext)); }
2. BaseCompoundFormatTestCase#testLargeCFS()
Project: lucene-solr
File: BaseCompoundFormatTestCase.java
File: BaseCompoundFormatTestCase.java
// LUCENE-5724: actually test we play nice with NRTCachingDir and massive file public void testLargeCFS() throws IOException { final String testfile = "_123.test"; IOContext context = new IOContext(new FlushInfo(0, 512 * 1024 * 1024)); Directory dir = new NRTCachingDirectory(newFSDirectory(createTempDir()), 2.0, 25.0); SegmentInfo si = newSegmentInfo(dir, "_123"); try (IndexOutput out = dir.createOutput(testfile, context)) { CodecUtil.writeIndexHeader(out, "Foo", 0, si.getId(), "suffix"); byte[] bytes = new byte[512]; for (int i = 0; i < 1024 * 1024; i++) { out.writeBytes(bytes, 0, bytes.length); } CodecUtil.writeFooter(out); } si.setFiles(Collections.singleton(testfile)); si.getCodec().compoundFormat().write(dir, si, context); dir.close(); }
3. BaseCompoundFormatTestCase#testPassIOContext()
Project: lucene-solr
File: BaseCompoundFormatTestCase.java
File: BaseCompoundFormatTestCase.java
// LUCENE-5724: things like NRTCachingDir rely upon IOContext being properly passed down public void testPassIOContext() throws IOException { final String testfile = "_123.test"; final IOContext myContext = new IOContext(); Directory dir = new FilterDirectory(newDirectory()) { @Override public IndexOutput createOutput(String name, IOContext context) throws IOException { assertSame(myContext, context); return super.createOutput(name, context); } }; SegmentInfo si = newSegmentInfo(dir, "_123"); try (IndexOutput out = dir.createOutput(testfile, myContext)) { CodecUtil.writeIndexHeader(out, "Foo", 0, si.getId(), "suffix"); out.writeInt(3); CodecUtil.writeFooter(out); } si.setFiles(Collections.singleton(testfile)); si.getCodec().compoundFormat().write(dir, si, myContext); dir.close(); }
4. ReadersAndUpdates#writeFieldInfosGen()
Project: lucene-solr
File: ReadersAndUpdates.java
File: ReadersAndUpdates.java
private Set<String> writeFieldInfosGen(FieldInfos fieldInfos, Directory dir, DocValuesFormat dvFormat, FieldInfosFormat infosFormat) throws IOException { final long nextFieldInfosGen = info.getNextFieldInfosGen(); final String segmentSuffix = Long.toString(nextFieldInfosGen, Character.MAX_RADIX); // we write approximately that many bytes (based on Lucene46DVF): // HEADER + FOOTER: 40 // 90 bytes per-field (over estimating long name and attributes map) final long estInfosSize = 40 + 90 * fieldInfos.size(); final IOContext infosContext = new IOContext(new FlushInfo(info.info.maxDoc(), estInfosSize)); // separately also track which files were created for this gen final TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(dir); infosFormat.write(trackingDir, info.info, segmentSuffix, fieldInfos, infosContext); info.advanceFieldInfosGen(); return trackingDir.getCreatedFiles(); }
5. TestDoc#merge()
Project: lucene-solr
File: TestDoc.java
File: TestDoc.java
private SegmentCommitInfo merge(Directory dir, SegmentCommitInfo si1, SegmentCommitInfo si2, String merged, boolean useCompoundFile) throws Exception { IOContext context = newIOContext(random(), new IOContext(new MergeInfo(-1, -1, false, -1))); SegmentReader r1 = new SegmentReader(si1, context); SegmentReader r2 = new SegmentReader(si2, context); final Codec codec = Codec.getDefault(); TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(si1.info.dir); final SegmentInfo si = new SegmentInfo(si1.info.dir, Version.LATEST, merged, -1, false, codec, Collections.emptyMap(), StringHelper.randomId(), new HashMap<>(), null); SegmentMerger merger = new SegmentMerger(Arrays.<CodecReader>asList(r1, r2), si, InfoStream.getDefault(), trackingDir, new FieldInfos.FieldNumbers(), context); MergeState mergeState = merger.merge(); r1.close(); r2.close(); ; si.setFiles(new HashSet<>(trackingDir.getCreatedFiles())); if (useCompoundFile) { Collection<String> filesToDelete = si.files(); codec.compoundFormat().write(dir, si, context); si.setUseCompoundFile(true); for (String name : filesToDelete) { si1.info.dir.deleteFile(name); } } return new SegmentCommitInfo(si, 0, -1L, -1L, -1L); }
6. DocumentsWriterPerThread#sealFlushedSegment()
Project: lucene-solr
File: DocumentsWriterPerThread.java
File: DocumentsWriterPerThread.java
/** * Seals the {@link SegmentInfo} for the new flushed segment and persists * the deleted documents {@link MutableBits}. */ void sealFlushedSegment(FlushedSegment flushedSegment) throws IOException { assert flushedSegment != null; SegmentCommitInfo newSegment = flushedSegment.segmentInfo; IndexWriter.setDiagnostics(newSegment.info, IndexWriter.SOURCE_FLUSH); IOContext context = new IOContext(new FlushInfo(newSegment.info.maxDoc(), newSegment.sizeInBytes())); boolean success = false; try { if (indexWriterConfig.getUseCompoundFile()) { Set<String> originalFiles = newSegment.info.files(); // TODO: like addIndexes, we are relying on createCompoundFile to successfully cleanup... indexWriter.createCompoundFile(infoStream, new TrackingDirectoryWrapper(directory), newSegment.info, context); filesToDelete.addAll(originalFiles); newSegment.info.setUseCompoundFile(true); } // Have codec write SegmentInfo. Must do this after // creating CFS so that 1) .si isn't slurped into CFS, // and 2) .si reflects useCompoundFile=true change // above: codec.segmentInfoFormat().write(directory, newSegment.info, context); // slurp the del file into CFS: if (flushedSegment.liveDocs != null) { final int delCount = flushedSegment.delCount; assert delCount > 0; if (infoStream.isEnabled("DWPT")) { infoStream.message("DWPT", "flush: write " + delCount + " deletes gen=" + flushedSegment.segmentInfo.getDelGen()); } // TODO: we should prune the segment if it's 100% // deleted... but merge will also catch it. // TODO: in the NRT case it'd be better to hand // this del vector over to the // shortly-to-be-opened SegmentReader and let it // carry the changes; there's no reason to use // filesystem as intermediary here. SegmentCommitInfo info = flushedSegment.segmentInfo; Codec codec = info.info.getCodec(); codec.liveDocsFormat().writeLiveDocs(flushedSegment.liveDocs, directory, info, delCount, context); newSegment.setDelCount(delCount); newSegment.advanceDelGen(); } success = true; } finally { if (!success) { if (infoStream.isEnabled("DWPT")) { infoStream.message("DWPT", "hit exception creating compound file for newly flushed segment " + newSegment.info.name); } } } }