Here are the examples of the java api org.apache.distributedlog.api.DistributedLogManager taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
146 Examples
19
Source : DlogBenchmarkDriver.java
with Apache License 2.0
from confluentinc
with Apache License 2.0
from confluentinc
@Override
public CompletableFuture<BenchmarkConsumer> createConsumer(String topic, String subscriptionName, Optional<Integer> parreplacedion, ConsumerCallback consumerCallback) {
return CompletableFuture.supplyAsync(() -> {
try {
DistributedLogManager dlm = namespace.openLog(topic);
log.info("Open stream {} for consumer", topic);
return dlm;
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}).thenApply(dlm -> new DlogBenchmarkConsumer(dlm, consumerCallback));
}
19
Source : DlogBenchmarkDriver.java
with Apache License 2.0
from confluentinc
with Apache License 2.0
from confluentinc
@Override
public CompletableFuture<BenchmarkProducer> createProducer(String topic) {
return CompletableFuture.supplyAsync(() -> {
try {
DistributedLogManager dlm = namespace.openLog(topic);
log.info("Open stream {} for producer", topic);
return dlm;
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}).thenCompose(dlm -> dlm.openAsyncLogWriter()).thenApply(writer -> new DlogBenchmarkProducer(writer));
}
19
Source : MultiReader.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
private static void readLoop(final DistributedLogManager dlm, final DLSN dlsn, final CountDownLatch keepAliveLatch) {
System.out.println("Wait for records from " + dlm.getStreamName() + " starting from " + dlsn);
dlm.openAsyncLogReader(dlsn).whenComplete(new FutureEventListener<AsyncLogReader>() {
@Override
public void onFailure(Throwable cause) {
System.err.println("Encountered error on reading records from stream " + dlm.getStreamName());
cause.printStackTrace(System.err);
keepAliveLatch.countDown();
}
@Override
public void onSuccess(AsyncLogReader reader) {
System.out.println("Open reader to read records from stream " + reader.getStreamName());
readLoop(reader, keepAliveLatch);
}
});
}
19
Source : DLFileSystem.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
private FSDataOutputStream append(Path path, int bufferSize, Optional<DistributedLogConfiguration> confLocal) throws IOException {
try {
DistributedLogManager dlm = namespace.openLog(getStreamName(path), confLocal, Optional.empty(), Optional.empty());
AsyncLogWriter writer = Utils.ioResult(dlm.openAsyncLogWriter());
return new FSDataOutputStream(new BufferedOutputStream(new DLOutputStream(dlm, writer), bufferSize), statistics, writer.getLastTxId() < 0L ? 0L : writer.getLastTxId());
} catch (LogNotFoundException le) {
throw new FileNotFoundException(path.toString());
}
}
19
Source : DLFileSystem.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Override
public FSDataInputStream open(Path path, int bufferSize) throws IOException {
try {
DistributedLogManager dlm = namespace.openLog(getStreamName(path));
LogReader reader;
try {
reader = dlm.openLogReader(DLSN.InitialDLSN);
} catch (LogNotFoundException lnfe) {
throw new FileNotFoundException(path.toString());
} catch (LogEmptyException lee) {
throw new FileNotFoundException(path.toString());
}
return new FSDataInputStream(new BufferedFSInputStream(new DLInputStream(dlm, reader, 0L), bufferSize));
} catch (LogNotFoundException e) {
throw new FileNotFoundException(path.toString());
}
}
19
Source : DLFileSystem.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Override
public FileStatus getFileStatus(Path path) throws IOException {
String logName = getStreamName(path);
boolean exists = namespace.logExists(logName);
if (!exists) {
throw new FileNotFoundException(path.toString());
}
long endPos;
try {
DistributedLogManager dlm = namespace.openLog(logName);
endPos = dlm.getLastTxId();
} catch (LogNotFoundException e) {
throw new FileNotFoundException(path.toString());
} catch (LogEmptyException e) {
endPos = 0L;
}
// we need to store more metadata information on logs for supporting filesystem-like use cases
return new FileStatus(endPos, false, 3, dlConf.getMaxLogSegmentBytes(), 0L, makeAbsolute(path));
}
19
Source : TestReadAheadEntryReader.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
void generateCompletedLogSegments(DistributedLogManager dlm, long numCompletedSegments, long segmentSize) throws Exception {
generateCompletedLogSegments(dlm, numCompletedSegments, segmentSize, 1L);
}
19
Source : TestReadAheadEntryReader.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
AsyncLogWriter createInprogressLogSegment(DistributedLogManager dlm, DistributedLogConfiguration conf, long segmentSize) throws Exception {
AsyncLogWriter writer = Utils.ioResult(dlm.openAsyncLogWriter());
for (long i = 1L; i <= segmentSize; i++) {
Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(i)));
LogRecord ctrlRecord = DLMTestUtil.getLogRecordInstance(i);
ctrlRecord.setControl();
Utils.ioResult(writer.write(ctrlRecord));
}
return writer;
}
19
Source : TestLogSegmentsZK.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test(timeout = 60000)
public void testCompleteLogSegmentConflicts() throws Exception {
URI uri = createURI();
String streamName = testName.getMethodName();
DistributedLogConfiguration conf = new DistributedLogConfiguration().setLockTimeout(99999).setOutputBufferSize(0).setImmediateFlushEnabled(true).setEnableLedgerAllocatorPool(true).setLedgerAllocatorPoolName("test");
Namespace namespace = NamespaceBuilder.newBuilder().conf(conf).uri(uri).build();
namespace.createLog(streamName);
DistributedLogManager dlm1 = namespace.openLog(streamName);
DistributedLogManager dlm2 = namespace.openLog(streamName);
// dlm1 is writing
BKSyncLogWriter out1 = (BKSyncLogWriter) dlm1.startLogSegmentNonParreplacedioned();
out1.write(DLMTestUtil.getLogRecordInstance(1));
// before out1 complete, out2 is in on recovery
// it completed the log segments which bump the version of /ledgers znode
BKAsyncLogWriter out2 = (BKAsyncLogWriter) dlm2.startAsyncLogSegmentNonParreplacedioned();
try {
out1.closeAndComplete();
fail("Should fail closeAndComplete since other people already completed it.");
} catch (IOException ioe) {
}
}
19
Source : TestBKLogReadHandler.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test(timeout = 60000)
public void testGetFirstDLSNAfterCleanTruncation() throws Exception {
String dlName = runtime.getMethodName();
prepareLogSegmentsNonParreplacedioned(dlName, 3, 10);
DistributedLogManager dlm = createNewDLM(conf, dlName);
BKLogReadHandler readHandler = ((BKDistributedLogManager) dlm).createReadHandler();
AsyncLogWriter writer = dlm.startAsyncLogSegmentNonParreplacedioned();
CompletableFuture<Boolean> futureSuccess = writer.truncate(new DLSN(2, 0, 0));
Boolean success = Utils.ioResult(futureSuccess);
replacedertTrue(success);
CompletableFuture<LogRecordWithDLSN> futureRecord = readHandler.asyncGetFirstLogRecord();
LogRecordWithDLSN record = Utils.ioResult(futureRecord);
replacedertEquals(new DLSN(2, 0, 0), record.getDlsn());
}
19
Source : TestBKLogReadHandler.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test(timeout = 60000)
public void testGetLogRecordCountTotalCount() throws Exception {
String dlName = runtime.getMethodName();
prepareLogSegmentsNonParreplacedioned(dlName, 11, 3);
DistributedLogManager dlm = createNewDLM(conf, dlName);
BKLogReadHandler readHandler = ((BKDistributedLogManager) dlm).createReadHandler();
CompletableFuture<Long> count = null;
count = readHandler.asyncGetLogRecordCount(DLSN.InitialDLSN);
replacedertEquals(33, Utils.ioResult(count).longValue());
}
19
Source : TestBKLogReadHandler.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test(timeout = 60000)
public void testGetLogRecordCountLastRecord() throws Exception {
String dlName = runtime.getMethodName();
prepareLogSegmentsNonParreplacedioned(dlName, 11, 3);
DistributedLogManager dlm = createNewDLM(conf, dlName);
BKLogReadHandler readHandler = ((BKDistributedLogManager) dlm).createReadHandler();
CompletableFuture<Long> count = null;
count = readHandler.asyncGetLogRecordCount(new DLSN(11, 2, 0));
replacedertEquals(1, Utils.ioResult(count).longValue());
}
19
Source : TestBKLogReadHandler.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test(timeout = 60000)
public void testGetLogRecordCountInteriorRecords() throws Exception {
String dlName = runtime.getMethodName();
prepareLogSegmentsNonParreplacedioned(dlName, 5, 10);
DistributedLogManager dlm = createNewDLM(conf, dlName);
BKLogReadHandler readHandler = ((BKDistributedLogManager) dlm).createReadHandler();
CompletableFuture<Long> count = null;
count = readHandler.asyncGetLogRecordCount(new DLSN(3, 5, 0));
replacedertEquals(25, Utils.ioResult(count).longValue());
count = readHandler.asyncGetLogRecordCount(new DLSN(2, 5, 0));
replacedertEquals(35, Utils.ioResult(count).longValue());
}
19
Source : TestBKLogReadHandler.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test(timeout = 60000)
public void testGetFirstDLSNAfterPartialTruncation() throws Exception {
String dlName = runtime.getMethodName();
prepareLogSegmentsNonParreplacedioned(dlName, 3, 10);
DistributedLogManager dlm = createNewDLM(conf, dlName);
BKLogReadHandler readHandler = ((BKDistributedLogManager) dlm).createReadHandler();
AsyncLogWriter writer = dlm.startAsyncLogSegmentNonParreplacedioned();
// Only truncates at ledger boundary.
CompletableFuture<Boolean> futureSuccess = writer.truncate(new DLSN(2, 5, 0));
Boolean success = Utils.ioResult(futureSuccess);
replacedertTrue(success);
CompletableFuture<LogRecordWithDLSN> futureRecord = readHandler.asyncGetFirstLogRecord();
LogRecordWithDLSN record = Utils.ioResult(futureRecord);
replacedertEquals(new DLSN(2, 0, 0), record.getDlsn());
}
19
Source : TestBKLogReadHandler.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
private void prepareLogSegmentsNonParreplacedioned(String name, int numSegments, int numEntriesPerSegment) throws Exception {
DistributedLogManager dlm = createNewDLM(conf, name);
long txid = 1;
for (int sid = 0; sid < numSegments; ++sid) {
LogWriter out = dlm.startLogSegmentNonParreplacedioned();
for (int eid = 0; eid < numEntriesPerSegment; ++eid) {
LogRecord record = DLMTestUtil.getLargeLogRecordInstance(txid);
out.write(record);
++txid;
}
out.close();
}
dlm.close();
}
19
Source : TestBKLogReadHandler.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test(timeout = 60000)
public void testGetLogRecordCountWithAllControlRecords() throws Exception {
DistributedLogManager dlm = createNewDLM(conf, runtime.getMethodName());
long txid = 1;
txid += DLMTestUtil.generateLogSegmentNonParreplacedioned(dlm, 5, 0, txid);
txid += DLMTestUtil.generateLogSegmentNonParreplacedioned(dlm, 10, 0, txid);
BKLogReadHandler readHandler = ((BKDistributedLogManager) dlm).createReadHandler();
CompletableFuture<Long> count = null;
count = readHandler.asyncGetLogRecordCount(new DLSN(1, 0, 0));
replacedertEquals(0, Utils.ioResult(count).longValue());
}
19
Source : TestBKLogReadHandler.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test(timeout = 60000)
public void testGetLogRecordCountPastEnd() throws Exception {
String dlName = runtime.getMethodName();
prepareLogSegmentsNonParreplacedioned(dlName, 11, 3);
DistributedLogManager dlm = createNewDLM(conf, dlName);
BKLogReadHandler readHandler = ((BKDistributedLogManager) dlm).createReadHandler();
CompletableFuture<Long> count = null;
count = readHandler.asyncGetLogRecordCount(new DLSN(12, 0, 0));
replacedertEquals(0, Utils.ioResult(count).longValue());
}
19
Source : TestBKLogReadHandler.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test(timeout = 60000)
public void testGetLogRecordCountAtLedgerBoundary() throws Exception {
String dlName = runtime.getMethodName();
prepareLogSegmentsNonParreplacedioned(dlName, 11, 3);
DistributedLogManager dlm = createNewDLM(conf, dlName);
BKLogReadHandler readHandler = ((BKDistributedLogManager) dlm).createReadHandler();
CompletableFuture<Long> count = null;
count = readHandler.asyncGetLogRecordCount(new DLSN(2, 0, 0));
replacedertEquals(30, Utils.ioResult(count).longValue());
count = readHandler.asyncGetLogRecordCount(new DLSN(3, 0, 0));
replacedertEquals(27, Utils.ioResult(count).longValue());
}
19
Source : TestBKLogReadHandler.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test(timeout = 60000)
public void testGetLogRecordCountWithControlRecords() throws Exception {
DistributedLogManager dlm = createNewDLM(conf, runtime.getMethodName());
long txid = 1;
txid += DLMTestUtil.generateLogSegmentNonParreplacedioned(dlm, 5, 5, txid);
txid += DLMTestUtil.generateLogSegmentNonParreplacedioned(dlm, 0, 10, txid);
BKLogReadHandler readHandler = ((BKDistributedLogManager) dlm).createReadHandler();
CompletableFuture<Long> count = null;
count = readHandler.asyncGetLogRecordCount(new DLSN(1, 0, 0));
replacedertEquals(15, Utils.ioResult(count).longValue());
}
19
Source : TestBKLogReadHandler.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test(timeout = 60000)
public void testGetLogRecordCountEmptyLedger() throws Exception {
String dlName = runtime.getMethodName();
DistributedLogManager dlm = createNewDLM(conf, dlName);
BKLogReadHandler readHandler = ((BKDistributedLogManager) dlm).createReadHandler();
CompletableFuture<Long> count = null;
count = readHandler.asyncGetLogRecordCount(DLSN.InitialDLSN);
try {
Utils.ioResult(count);
fail("log is empty, should have returned log empty ex");
} catch (LogNotFoundException ex) {
}
}
19
Source : TestBKDistributedLogNamespace.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test(timeout = 60000)
public void testInvalidStreamName() throws Exception {
replacedertFalse(DLUtils.isReservedStreamName("test"));
replacedertTrue(DLUtils.isReservedStreamName(".test"));
URI uri = createDLMURI("/" + runtime.getMethodName());
Namespace namespace = NamespaceBuilder.newBuilder().conf(conf).uri(uri).build();
try {
namespace.openLog(".test1");
fail("Should fail to create invalid stream .test");
} catch (InvalidStreamNameException isne) {
// expected
}
DistributedLogManager dlm = namespace.openLog("test1");
LogWriter writer = dlm.startLogSegmentNonParreplacedioned();
writer.write(DLMTestUtil.getLogRecordInstance(1));
writer.close();
dlm.close();
try {
namespace.openLog(".test2");
fail("Should fail to create invalid stream .test2");
} catch (InvalidStreamNameException isne) {
// expected
}
try {
namespace.openLog("/ test2");
fail("should fail to create invalid stream / test2");
} catch (InvalidStreamNameException isne) {
// expected
}
try {
char[] chars = new char[6];
for (int i = 0; i < chars.length; i++) {
chars[i] = 'a';
}
chars[0] = 0;
String streamName = new String(chars);
namespace.openLog(streamName);
fail("should fail to create invalid stream " + streamName);
} catch (InvalidStreamNameException isne) {
// expected
}
try {
char[] chars = new char[6];
for (int i = 0; i < chars.length; i++) {
chars[i] = 'a';
}
chars[3] = '\u0010';
String streamName = new String(chars);
namespace.openLog(streamName);
fail("should fail to create invalid stream " + streamName);
} catch (InvalidStreamNameException isne) {
// expected
}
DistributedLogManager newDLM = namespace.openLog("test_2-3");
LogWriter newWriter = newDLM.startLogSegmentNonParreplacedioned();
newWriter.write(DLMTestUtil.getLogRecordInstance(1));
newWriter.close();
newDLM.close();
Iterator<String> streamIter = namespace.getLogs();
Set<String> streamSet = Sets.newHashSet(streamIter);
replacedertEquals(2, streamSet.size());
replacedertTrue(streamSet.contains("test1"));
replacedertTrue(streamSet.contains("test_2-3"));
namespace.close();
}
19
Source : TestBKDistributedLogNamespace.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test(timeout = 60000)
public void testCreateIfNotExists() throws Exception {
URI uri = createDLMURI("/" + runtime.getMethodName());
ensureURICreated(zooKeeperClient.get(), uri);
DistributedLogConfiguration newConf = new DistributedLogConfiguration();
newConf.addConfiguration(conf);
newConf.setCreateStreamIfNotExists(false);
String streamName = "test-stream";
Namespace namespace = NamespaceBuilder.newBuilder().conf(newConf).uri(uri).build();
DistributedLogManager dlm = namespace.openLog(streamName);
LogWriter writer;
try {
writer = dlm.startLogSegmentNonParreplacedioned();
writer.write(DLMTestUtil.getLogRecordInstance(1L));
fail("Should fail to write data if stream doesn't exist.");
} catch (IOException ioe) {
// expected
}
dlm.close();
// create the stream
namespace.createLog(streamName);
DistributedLogManager newDLM = namespace.openLog(streamName);
LogWriter newWriter = newDLM.startLogSegmentNonParreplacedioned();
newWriter.write(DLMTestUtil.getLogRecordInstance(1L));
newWriter.close();
newDLM.close();
}
19
Source : TestBKDistributedLogNamespace.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
private void createLogPathTest(String logName) throws Exception {
URI uri = createDLMURI("/" + runtime.getMethodName());
ensureURICreated(zooKeeperClient.get(), uri);
DistributedLogConfiguration newConf = new DistributedLogConfiguration();
newConf.addConfiguration(conf);
newConf.setCreateStreamIfNotExists(false);
Namespace namespace = NamespaceBuilder.newBuilder().conf(newConf).uri(uri).build();
DistributedLogManager dlm = namespace.openLog(logName);
LogWriter writer;
try {
writer = dlm.startLogSegmentNonParreplacedioned();
writer.write(DLMTestUtil.getLogRecordInstance(1L));
writer.commit();
fail("Should fail to write data if stream doesn't exist.");
} catch (IOException ioe) {
// expected
}
dlm.close();
}
19
Source : TestBKDistributedLogNamespace.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
private void initDlogMeta(String dlNamespace, String un, String streamName) throws Exception {
URI uri = createDLMURI(dlNamespace);
DistributedLogConfiguration newConf = new DistributedLogConfiguration();
newConf.addConfiguration(conf);
newConf.setCreateStreamIfNotExists(true);
newConf.setZkAclId(un);
Namespace namespace = NamespaceBuilder.newBuilder().conf(newConf).uri(uri).build();
DistributedLogManager dlm = namespace.openLog(streamName);
LogWriter writer = dlm.startLogSegmentNonParreplacedioned();
for (int i = 0; i < 10; i++) {
writer.write(DLMTestUtil.getLogRecordInstance(1L));
}
writer.close();
dlm.close();
namespace.close();
}
19
Source : TestAsyncReaderLock.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test(timeout = 60000)
public void testReaderLockManyLocks() throws Exception {
String name = runtime.getMethodName();
DistributedLogManager dlm = createNewDLM(conf, name);
BKAsyncLogWriter writer = (BKAsyncLogWriter) (dlm.startAsyncLogSegmentNonParreplacedioned());
writer.write(DLMTestUtil.getLogRecordInstance(1L));
writer.write(DLMTestUtil.getLogRecordInstance(2L));
writer.closeAndComplete();
int count = 5;
final CountDownLatch acquiredLatch = new CountDownLatch(count);
final ArrayList<CompletableFuture<AsyncLogReader>> readers = new ArrayList<CompletableFuture<AsyncLogReader>>(count);
for (int i = 0; i < count; i++) {
readers.add(null);
}
final DistributedLogManager[] dlms = new DistributedLogManager[count];
for (int i = 0; i < count; i++) {
dlms[i] = createNewDLM(conf, name);
readers.set(i, dlms[i].getAsyncLogReaderWithLock(DLSN.InitialDLSN));
readers.get(i).whenComplete(new FutureEventListener<AsyncLogReader>() {
@Override
public void onSuccess(AsyncLogReader reader) {
acquiredLatch.countDown();
reader.asyncClose();
}
@Override
public void onFailure(Throwable cause) {
fail("acquire shouldnt have failed");
}
});
}
acquiredLatch.await();
for (int i = 0; i < count; i++) {
dlms[i].close();
}
dlm.close();
}
19
Source : NonBlockingReadsTestUtil.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
static void writeRecordsForNonBlockingReads(DistributedLogConfiguration conf, DistributedLogManager dlm, boolean recover) throws Exception {
writeRecordsForNonBlockingReads(conf, dlm, recover, DEFAULT_SEGMENT_SIZE);
}
19
Source : NonBlockingReadsTestUtil.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
static void readNonBlocking(DistributedLogManager dlm, boolean forceStall, long segmentSize, boolean waitForIdle) throws Exception {
BKSyncLogReader reader = null;
try {
reader = (BKSyncLogReader) dlm.getInputStream(1);
} catch (LogNotFoundException lnfe) {
}
while (null == reader) {
TimeUnit.MILLISECONDS.sleep(20);
try {
reader = (BKSyncLogReader) dlm.getInputStream(1);
} catch (LogNotFoundException lnfe) {
} catch (LogEmptyException lee) {
} catch (IOException ioe) {
LOG.error("Failed to open reader reading from {}", dlm.getStreamName());
throw ioe;
}
}
try {
LOG.info("Created reader reading from {}", dlm.getStreamName());
if (forceStall) {
reader.getReadHandler().disableReadAheadLogSegmentsNotification();
}
long numTrans = 0;
long lastTxId = -1;
boolean exceptionEncountered = false;
try {
while (true) {
LogRecordWithDLSN record = reader.readNext(true);
if (null != record) {
DLMTestUtil.verifyLogRecord(record);
replacedertTrue(lastTxId < record.getTransactionId());
replacedertEquals(record.getTransactionId() - 1, record.getSequenceId());
lastTxId = record.getTransactionId();
numTrans++;
continue;
}
if (numTrans >= (3 * segmentSize)) {
if (waitForIdle) {
while (true) {
reader.readNext(true);
TimeUnit.MILLISECONDS.sleep(10);
}
}
break;
}
TimeUnit.MILLISECONDS.sleep(2);
}
} catch (LogReadException readexc) {
exceptionEncountered = true;
} catch (LogNotFoundException exc) {
exceptionEncountered = true;
}
replacedertFalse(exceptionEncountered);
} finally {
reader.close();
}
}
19
Source : NonBlockingReadsTestUtil.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
static void readNonBlocking(DistributedLogManager dlm, boolean forceStall) throws Exception {
readNonBlocking(dlm, forceStall, DEFAULT_SEGMENT_SIZE, false);
}
19
Source : DLMTestUtil.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
public static void generateCompletedLogSegments(DistributedLogManager manager, DistributedLogConfiguration conf, long numCompletedSegments, long segmentSize) throws Exception {
BKDistributedLogManager dlm = (BKDistributedLogManager) manager;
long txid = 1L;
for (long i = 0; i < numCompletedSegments; i++) {
BKSyncLogWriter writer = dlm.startLogSegmentNonParreplacedioned();
for (long j = 1; j <= segmentSize; j++) {
writer.write(DLMTestUtil.getLogRecordInstance(txid++));
}
writer.closeAndComplete();
}
}
19
Source : DLMTestUtil.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
public static void injectLogSegmentWithLastDLSN(DistributedLogManager manager, DistributedLogConfiguration conf, long logSegmentSeqNo, long startTxID, long segmentSize, boolean recordWrongLastDLSN) throws Exception {
BKDistributedLogManager dlm = (BKDistributedLogManager) manager;
BKLogWriteHandler writeHandler = dlm.createWriteHandler(false);
Utils.ioResult(writeHandler.lockHandler());
// Start a log segment with a given ledger seq number.
BookKeeperClient bkc = getBookKeeperClient(dlm);
LedgerHandle lh = bkc.get().createLedger(conf.getEnsembleSize(), conf.getWriteQuorumSize(), conf.getAckQuorumSize(), BookKeeper.DigestType.CRC32, conf.getBKDigestPW().getBytes());
String inprogressZnodeName = writeHandler.inprogressZNodeName(lh.getId(), startTxID, logSegmentSeqNo);
String znodePath = writeHandler.inprogressZNode(lh.getId(), startTxID, logSegmentSeqNo);
LogSegmentMetadata l = new LogSegmentMetadata.LogSegmentMetadataBuilder(znodePath, conf.getDLLedgerMetadataLayoutVersion(), lh.getId(), startTxID).setLogSegmentSequenceNo(logSegmentSeqNo).setInprogress(false).build();
l.write(getZooKeeperClient(dlm));
writeHandler.maxTxId.update(Version.ANY, startTxID);
writeHandler.addLogSegmentToCache(inprogressZnodeName, l);
BKLogSegmentWriter writer = new BKLogSegmentWriter(writeHandler.getFullyQualifiedName(), inprogressZnodeName, conf, conf.getDLLedgerMetadataLayoutVersion(), new BKLogSegmentEntryWriter(lh), writeHandler.lock, startTxID, logSegmentSeqNo, writeHandler.scheduler, writeHandler.statsLogger, writeHandler.statsLogger, writeHandler.alertStatsLogger, PermitLimiter.NULL_PERMIT_LIMITER, new SettableFeatureProvider("", 0), ConfUtils.getConstDynConf(conf));
long txid = startTxID;
DLSN wrongDLSN = null;
for (long j = 1; j <= segmentSize; j++) {
DLSN dlsn = Utils.ioResult(writer.asyncWrite(DLMTestUtil.getLogRecordInstance(txid++)));
if (j == (segmentSize - 1)) {
wrongDLSN = dlsn;
}
}
replacedertNotNull(wrongDLSN);
if (recordWrongLastDLSN) {
Utils.ioResult(writer.asyncClose());
writeHandler.completeAndCloseLogSegment(writeHandler.inprogressZNodeName(writer.getLogSegmentId(), writer.getStartTxId(), writer.getLogSegmentSequenceNumber()), writer.getLogSegmentSequenceNumber(), writer.getLogSegmentId(), writer.getStartTxId(), startTxID + segmentSize - 2, writer.getPositionWithinLogSegment() - 1, wrongDLSN.getEntryId(), wrongDLSN.getSlotId());
} else {
Utils.ioResult(writeHandler.completeAndCloseLogSegment(writer));
}
Utils.ioResult(writeHandler.unlockHandler());
}
19
Source : DLMTestUtil.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
public static void injectLogSegmentWithGivenLogSegmentSeqNo(DistributedLogManager manager, DistributedLogConfiguration conf, long logSegmentSeqNo, long startTxID, boolean writeEntries, long segmentSize, boolean completeLogSegment) throws Exception {
BKDistributedLogManager dlm = (BKDistributedLogManager) manager;
BKLogWriteHandler writeHandler = dlm.createWriteHandler(false);
Utils.ioResult(writeHandler.lockHandler());
// Start a log segment with a given ledger seq number.
BookKeeperClient bkc = getBookKeeperClient(dlm);
LedgerHandle lh = bkc.get().createLedger(conf.getEnsembleSize(), conf.getWriteQuorumSize(), conf.getAckQuorumSize(), BookKeeper.DigestType.CRC32, conf.getBKDigestPW().getBytes());
String inprogressZnodeName = writeHandler.inprogressZNodeName(lh.getId(), startTxID, logSegmentSeqNo);
String znodePath = writeHandler.inprogressZNode(lh.getId(), startTxID, logSegmentSeqNo);
int logSegmentMetadataVersion = conf.getDLLedgerMetadataLayoutVersion();
LogSegmentMetadata l = new LogSegmentMetadata.LogSegmentMetadataBuilder(znodePath, logSegmentMetadataVersion, lh.getId(), startTxID).setLogSegmentSequenceNo(logSegmentSeqNo).setEnvelopeEntries(LogSegmentMetadata.supportsEnvelopedEntries(logSegmentMetadataVersion)).build();
l.write(getZooKeeperClient(dlm));
writeHandler.maxTxId.update(Version.ANY, startTxID);
writeHandler.addLogSegmentToCache(inprogressZnodeName, l);
BKLogSegmentWriter writer = new BKLogSegmentWriter(writeHandler.getFullyQualifiedName(), inprogressZnodeName, conf, conf.getDLLedgerMetadataLayoutVersion(), new BKLogSegmentEntryWriter(lh), writeHandler.lock, startTxID, logSegmentSeqNo, writeHandler.scheduler, writeHandler.statsLogger, writeHandler.statsLogger, writeHandler.alertStatsLogger, PermitLimiter.NULL_PERMIT_LIMITER, new SettableFeatureProvider("", 0), ConfUtils.getConstDynConf(conf));
if (writeEntries) {
long txid = startTxID;
for (long j = 1; j <= segmentSize; j++) {
writer.write(DLMTestUtil.getLogRecordInstance(txid++));
}
Utils.ioResult(writer.flushAndCommit());
}
if (completeLogSegment) {
Utils.ioResult(writeHandler.completeAndCloseLogSegment(writer));
}
Utils.ioResult(writeHandler.unlockHandler());
}
19
Source : DLMTestUtil.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
public static long generateLogSegmentNonParreplacedioned(DistributedLogManager dlm, int controlEntries, int userEntries, long startTxid) throws Exception {
return generateLogSegmentNonParreplacedioned(dlm, controlEntries, userEntries, startTxid, 1L);
}
19
Source : TestDLCK.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
static Map<Long, LogSegmentMetadata> getLogSegments(DistributedLogManager dlm) throws Exception {
Map<Long, LogSegmentMetadata> logSegmentMap = new HashMap<Long, LogSegmentMetadata>();
List<LogSegmentMetadata> segments = dlm.getLogSegments();
for (LogSegmentMetadata segment : segments) {
logSegmentMap.put(segment.getLogSegmentSequenceNumber(), segment);
}
return logSegmentMap;
}
19
Source : DistributedLogAdmin.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
private static StreamCandidate checkStream(final Namespace namespace, final String streamName, final OrderedScheduler scheduler) throws IOException {
DistributedLogManager dlm = namespace.openLog(streamName);
try {
List<LogSegmentMetadata> segments = dlm.getLogSegments();
if (segments.isEmpty()) {
return null;
}
List<CompletableFuture<LogSegmentCandidate>> futures = new ArrayList<CompletableFuture<LogSegmentCandidate>>(segments.size());
for (LogSegmentMetadata segment : segments) {
futures.add(checkLogSegment(namespace, streamName, segment, scheduler));
}
List<LogSegmentCandidate> segmentCandidates;
try {
segmentCandidates = FutureUtils.result(FutureUtils.collect(futures));
} catch (Exception e) {
throw new IOException("Failed on checking stream " + streamName, e);
}
StreamCandidate streamCandidate = new StreamCandidate(streamName);
for (LogSegmentCandidate segmentCandidate : segmentCandidates) {
if (null != segmentCandidate) {
streamCandidate.addLogSegmentCandidate(segmentCandidate);
}
}
if (streamCandidate.segmentCandidates.isEmpty()) {
return null;
}
return streamCandidate;
} finally {
dlm.close();
}
}
18
Source : StreamTransformer.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
private static void readLoop(final DistributedLogManager dlm, final DLSN fromDLSN, final AsyncLogWriter targetWriter, final Transformer<byte[], byte[]> replicationTransformer) throws Exception {
final CountDownLatch keepAliveLatch = new CountDownLatch(1);
System.out.println("Wait for records starting from " + fromDLSN);
final AsyncLogReader reader = FutureUtils.result(dlm.openAsyncLogReader(fromDLSN));
final FutureEventListener<LogRecordWithDLSN> readListener = new FutureEventListener<LogRecordWithDLSN>() {
@Override
public void onFailure(Throwable cause) {
System.err.println("Encountered error on reading records from stream " + dlm.getStreamName());
cause.printStackTrace(System.err);
keepAliveLatch.countDown();
}
@Override
public void onSuccess(LogRecordWithDLSN record) {
if (record.getDlsn().compareTo(fromDLSN) <= 0) {
reader.readNext().whenComplete(this);
return;
}
System.out.println("Received record " + record.getDlsn());
System.out.println("\"\"\"");
System.out.println(new String(record.getPayload(), UTF_8));
System.out.println("\"\"\"");
try {
transform(targetWriter, record, replicationTransformer, keepAliveLatch);
} catch (Exception e) {
System.err.println("Encountered error on transforming record " + record.getDlsn() + " from stream " + dlm.getStreamName());
e.printStackTrace(System.err);
keepAliveLatch.countDown();
}
reader.readNext().whenComplete(this);
}
};
reader.readNext().whenComplete(readListener);
keepAliveLatch.await();
FutureUtils.result(reader.asyncClose(), 5, TimeUnit.SECONDS);
}
18
Source : StreamTransformer.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
public static void main(String[] args) throws Exception {
if (3 != args.length) {
System.out.println(HELP);
return;
}
String dlUriStr = args[0];
final String srcStreamName = args[1];
final String targetStreamName = args[2];
URI uri = URI.create(dlUriStr);
DistributedLogConfiguration conf = new DistributedLogConfiguration();
// 16KB
conf.setOutputBufferSize(16 * 1024);
// 5ms
conf.setPeriodicFlushFrequencyMilliSeconds(5);
Namespace namespace = NamespaceBuilder.newBuilder().conf(conf).uri(uri).build();
// open the dlm
System.out.println("Opening log stream " + srcStreamName);
DistributedLogManager srcDlm = namespace.openLog(srcStreamName);
System.out.println("Opening log stream " + targetStreamName);
DistributedLogManager targetDlm = namespace.openLog(targetStreamName);
Transformer<byte[], byte[]> replicationTransformer = new IdenticalTransformer<byte[]>();
LogRecordWithDLSN lastTargetRecord;
DLSN srcDlsn;
try {
lastTargetRecord = targetDlm.getLastLogRecord();
TransformedRecord lastTransformedRecord = new TransformedRecord();
try {
lastTransformedRecord.read(protocolFactory.getProtocol(new TIOStreamTransport(new ByteArrayInputStream(lastTargetRecord.getPayload()))));
srcDlsn = DLSN.deserializeBytes(lastTransformedRecord.getSrcDlsn());
System.out.println("Last transformed record is " + srcDlsn);
} catch (TException e) {
System.err.println("Error on reading last transformed record");
e.printStackTrace(System.err);
srcDlsn = DLSN.InitialDLSN;
}
} catch (LogNotFoundException lnfe) {
srcDlsn = DLSN.InitialDLSN;
} catch (LogEmptyException lee) {
srcDlsn = DLSN.InitialDLSN;
}
AsyncLogWriter targetWriter = FutureUtils.result(targetDlm.openAsyncLogWriter());
try {
readLoop(srcDlm, srcDlsn, targetWriter, replicationTransformer);
} finally {
FutureUtils.result(targetWriter.asyncClose(), 5, TimeUnit.SECONDS);
targetDlm.close();
srcDlm.close();
namespace.close();
}
}
18
Source : ReaderWithOffsets.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
private static void readLoop(final DistributedLogManager dlm, final DLSN dlsn, final AtomicReference<DLSN> lastDLSN) throws Exception {
final CountDownLatch keepAliveLatch = new CountDownLatch(1);
System.out.println("Wait for records starting from " + dlsn);
final AsyncLogReader reader = FutureUtils.result(dlm.openAsyncLogReader(dlsn));
final FutureEventListener<LogRecordWithDLSN> readListener = new FutureEventListener<LogRecordWithDLSN>() {
@Override
public void onFailure(Throwable cause) {
System.err.println("Encountered error on reading records from stream " + dlm.getStreamName());
cause.printStackTrace(System.err);
keepAliveLatch.countDown();
}
@Override
public void onSuccess(LogRecordWithDLSN record) {
System.out.println("Received record " + record.getDlsn());
System.out.println("\"\"\"");
System.out.println(new String(record.getPayload(), UTF_8));
System.out.println("\"\"\"");
lastDLSN.set(record.getDlsn());
reader.readNext().whenComplete(this);
}
};
reader.readNext().whenComplete(readListener);
keepAliveLatch.await();
FutureUtils.result(reader.asyncClose(), 5, TimeUnit.SECONDS);
}
18
Source : TailReader.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
private static void readLoop(final DistributedLogManager dlm, final DLSN dlsn) throws Exception {
final CountDownLatch keepAliveLatch = new CountDownLatch(1);
System.out.println("Wait for records starting from " + dlsn);
final AsyncLogReader reader = FutureUtils.result(dlm.openAsyncLogReader(dlsn));
final FutureEventListener<LogRecordWithDLSN> readListener = new FutureEventListener<LogRecordWithDLSN>() {
@Override
public void onFailure(Throwable cause) {
System.err.println("Encountered error on reading records from stream " + dlm.getStreamName());
cause.printStackTrace(System.err);
keepAliveLatch.countDown();
}
@Override
public void onSuccess(LogRecordWithDLSN record) {
System.out.println("Received record " + record.getDlsn());
System.out.println("\"\"\"");
System.out.println(new String(record.getPayload(), UTF_8));
System.out.println("\"\"\"");
reader.readNext().whenComplete(this);
}
};
reader.readNext().whenComplete(readListener);
keepAliveLatch.await();
FutureUtils.result(reader.asyncClose(), 5, TimeUnit.SECONDS);
}
18
Source : StreamRewinder.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
private static void readLoop(final DistributedLogManager dlm, final int rewindSeconds) throws Exception {
final CountDownLatch keepAliveLatch = new CountDownLatch(1);
long rewindToTxId = System.currentTimeMillis() - TimeUnit.MILLISECONDS.convert(rewindSeconds, TimeUnit.SECONDS);
System.out.println("Record records starting from " + rewindToTxId + " which is " + rewindSeconds + " seconds ago");
final AsyncLogReader reader = FutureUtils.result(dlm.openAsyncLogReader(rewindToTxId));
final AtomicBoolean caughtup = new AtomicBoolean(false);
final FutureEventListener<LogRecordWithDLSN> readListener = new FutureEventListener<LogRecordWithDLSN>() {
@Override
public void onFailure(Throwable cause) {
System.err.println("Encountered error on reading records from stream " + dlm.getStreamName());
cause.printStackTrace(System.err);
keepAliveLatch.countDown();
}
@Override
public void onSuccess(LogRecordWithDLSN record) {
System.out.println("Received record " + record.getDlsn());
System.out.println("\"\"\"");
System.out.println(new String(record.getPayload(), UTF_8));
System.out.println("\"\"\"");
long diffInMilliseconds = System.currentTimeMillis() - record.getTransactionId();
if (!caughtup.get() && diffInMilliseconds < 2000) {
System.out.println("Reader caught with latest data");
caughtup.set(true);
}
reader.readNext().whenComplete(this);
}
};
reader.readNext().whenComplete(readListener);
keepAliveLatch.await();
FutureUtils.result(reader.asyncClose(), 5, TimeUnit.SECONDS);
}
18
Source : StreamImpl.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
//
// Stream Close Functions
//
void close(DistributedLogManager dlm) {
if (null != dlm) {
try {
dlm.close();
} catch (IOException ioe) {
logger.warn("Failed to close dlm for {} : ", name, ioe);
}
}
}
18
Source : TestTruncate.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test(timeout = 60000)
public void testPurgeLogs() throws Exception {
String name = "distrlog-purge-logs";
URI uri = createDLMURI("/" + name);
populateData(new HashMap<Long, DLSN>(), conf, name, 10, 10, false);
DistributedLogManager distributedLogManager = createNewDLM(conf, name);
List<LogSegmentMetadata> segments = distributedLogManager.getLogSegments();
LOG.info("Segments before modifying completion time : {}", segments);
ZooKeeperClient zkc = TestZooKeeperClientBuilder.newBuilder(conf).uri(uri).build();
// Update completion time of first 5 segments
long newTimeMs = System.currentTimeMillis() - 60 * 60 * 1000 * 2;
for (int i = 0; i < 5; i++) {
LogSegmentMetadata segment = segments.get(i);
updateCompletionTime(zkc, segment, newTimeMs + i);
}
zkc.close();
segments = distributedLogManager.getLogSegments();
LOG.info("Segments after modifying completion time : {}", segments);
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.loadConf(conf);
confLocal.setRetentionPeriodHours(1);
confLocal.setExplicitTruncationByApplication(false);
DistributedLogManager dlm = createNewDLM(confLocal, name);
AsyncLogWriter writer = dlm.startAsyncLogSegmentNonParreplacedioned();
long txid = 1 + 10 * 10;
for (int j = 1; j <= 10; j++) {
Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(txid++)));
}
// to make sure the truncation task is executed
DLSN lastDLSN = Utils.ioResult(dlm.getLastDLSNAsync());
LOG.info("Get last dlsn of stream {} : {}", name, lastDLSN);
replacedertEquals(6, distributedLogManager.getLogSegments().size());
Utils.close(writer);
dlm.close();
distributedLogManager.close();
}
18
Source : TestTruncate.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test(timeout = 60000)
public void testOnlyPurgeSegmentsBeforeNoneFullyTruncatedSegment() throws Exception {
String name = "distrlog-only-purge-segments-before-none-fully-truncated-segment";
URI uri = createDLMURI("/" + name);
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.addConfiguration(conf);
confLocal.setExplicitTruncationByApplication(true);
// populate data
populateData(new HashMap<Long, DLSN>(), confLocal, name, 4, 10, false);
DistributedLogManager dlm = createNewDLM(confLocal, name);
List<LogSegmentMetadata> segments = dlm.getLogSegments();
LOG.info("Segments before modifying segment status : {}", segments);
ZooKeeperClient zkc = TestZooKeeperClientBuilder.newBuilder(conf).uri(uri).build();
setTruncationStatus(zkc, segments.get(0), TruncationStatus.PARTIALLY_TRUNCATED);
for (int i = 1; i < 4; i++) {
LogSegmentMetadata segment = segments.get(i);
setTruncationStatus(zkc, segment, TruncationStatus.TRUNCATED);
}
List<LogSegmentMetadata> segmentsAfterTruncated = dlm.getLogSegments();
dlm.purgeLogsOlderThan(999999);
List<LogSegmentMetadata> newSegments = dlm.getLogSegments();
LOG.info("Segments after purge segments older than 999999 : {}", newSegments);
replacedertArrayEquals(segmentsAfterTruncated.toArray(new LogSegmentMetadata[segmentsAfterTruncated.size()]), newSegments.toArray(new LogSegmentMetadata[newSegments.size()]));
dlm.close();
// Update completion time of all 4 segments
long newTimeMs = System.currentTimeMillis() - 60 * 60 * 1000 * 10;
for (int i = 0; i < 4; i++) {
LogSegmentMetadata segment = newSegments.get(i);
updateCompletionTime(zkc, segment, newTimeMs + i);
}
DistributedLogConfiguration newConf = new DistributedLogConfiguration();
newConf.addConfiguration(confLocal);
newConf.setRetentionPeriodHours(1);
DistributedLogManager newDLM = createNewDLM(newConf, name);
AsyncLogWriter newWriter = newDLM.startAsyncLogSegmentNonParreplacedioned();
long txid = 1 + 4 * 10;
for (int j = 1; j <= 10; j++) {
Utils.ioResult(newWriter.write(DLMTestUtil.getLogRecordInstance(txid++)));
}
// to make sure the truncation task is executed
DLSN lastDLSN = Utils.ioResult(newDLM.getLastDLSNAsync());
LOG.info("Get last dlsn of stream {} : {}", name, lastDLSN);
replacedertEquals(5, newDLM.getLogSegments().size());
Utils.close(newWriter);
newDLM.close();
zkc.close();
}
18
Source : TestTruncate.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test(timeout = 60000)
public void testPartiallyTruncateTruncatedSegments() throws Exception {
String name = "distrlog-partially-truncate-truncated-segments";
URI uri = createDLMURI("/" + name);
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.addConfiguration(conf);
confLocal.setExplicitTruncationByApplication(true);
// populate
Map<Long, DLSN> dlsnMap = new HashMap<Long, DLSN>();
populateData(dlsnMap, confLocal, name, 4, 10, false);
DistributedLogManager dlm = createNewDLM(confLocal, name);
List<LogSegmentMetadata> segments = dlm.getLogSegments();
LOG.info("Segments before modifying segment status : {}", segments);
ZooKeeperClient zkc = TestZooKeeperClientBuilder.newBuilder(conf).uri(uri).build();
for (int i = 0; i < 4; i++) {
LogSegmentMetadata segment = segments.get(i);
setTruncationStatus(zkc, segment, TruncationStatus.TRUNCATED);
}
List<LogSegmentMetadata> newSegments = dlm.getLogSegments();
LOG.info("Segments after changing truncation status : {}", newSegments);
dlm.close();
DistributedLogManager newDLM = createNewDLM(confLocal, name);
AsyncLogWriter newWriter = newDLM.startAsyncLogSegmentNonParreplacedioned();
Utils.ioResult(newWriter.truncate(dlsnMap.get(15L)));
List<LogSegmentMetadata> newSegments2 = newDLM.getLogSegments();
replacedertArrayEquals(newSegments.toArray(new LogSegmentMetadata[4]), newSegments2.toArray(new LogSegmentMetadata[4]));
Utils.close(newWriter);
newDLM.close();
zkc.close();
}
18
Source : TestTruncate.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
private Pair<DistributedLogManager, AsyncLogWriter> populateData(Map<Long, DLSN> txid2DLSN, DistributedLogConfiguration confLocal, String name, int numLogSegments, int numEntriesPerLogSegment, boolean createInprogressLogSegment) throws Exception {
long txid = 1;
for (long i = 1; i <= numLogSegments; i++) {
LOG.info("Writing Log Segment {}.", i);
DistributedLogManager dlm = createNewDLM(confLocal, name);
AsyncLogWriter writer = dlm.startAsyncLogSegmentNonParreplacedioned();
for (int j = 1; j <= numEntriesPerLogSegment; j++) {
long curTxId = txid++;
DLSN dlsn = Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(curTxId)));
txid2DLSN.put(curTxId, dlsn);
}
Utils.close(writer);
dlm.close();
}
if (createInprogressLogSegment) {
DistributedLogManager dlm = createNewDLM(confLocal, name);
AsyncLogWriter writer = dlm.startAsyncLogSegmentNonParreplacedioned();
for (int j = 1; j <= 10; j++) {
long curTxId = txid++;
DLSN dlsn = Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(curTxId)));
txid2DLSN.put(curTxId, dlsn);
}
return new ImmutablePair<DistributedLogManager, AsyncLogWriter>(dlm, writer);
} else {
return null;
}
}
18
Source : TestRollLogSegments.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test(timeout = 600000)
public void testLastDLSNInRollingLogSegments() throws Exception {
final Map<Long, DLSN> lastDLSNs = new HashMap<Long, DLSN>();
String name = "distrlog-lastdlsn-in-rolling-log-segments";
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.loadConf(conf);
confLocal.setImmediateFlushEnabled(true);
confLocal.setOutputBufferSize(0);
confLocal.setLogSegmentRollingIntervalMinutes(0);
confLocal.setMaxLogSegmentBytes(40);
int numEntries = 100;
DistributedLogManager dlm = createNewDLM(confLocal, name);
BKAsyncLogWriter writer = (BKAsyncLogWriter) dlm.startAsyncLogSegmentNonParreplacedioned();
final CountDownLatch latch = new CountDownLatch(numEntries);
// send requests in parallel to have outstanding requests
for (int i = 1; i <= numEntries; i++) {
final int entryId = i;
CompletableFuture<DLSN> writeFuture = writer.write(DLMTestUtil.getLogRecordInstance(entryId)).whenComplete(new FutureEventListener<DLSN>() {
@Override
public void onSuccess(DLSN value) {
logger.info("Completed entry {} : {}.", entryId, value);
synchronized (lastDLSNs) {
DLSN lastDLSN = lastDLSNs.get(value.getLogSegmentSequenceNo());
if (null == lastDLSN || lastDLSN.compareTo(value) < 0) {
lastDLSNs.put(value.getLogSegmentSequenceNo(), value);
}
}
latch.countDown();
}
@Override
public void onFailure(Throwable cause) {
}
});
if (i == 1) {
// wait for first log segment created
Utils.ioResult(writeFuture);
}
}
latch.await();
// make sure all ensure blocks were executed.
writer.closeAndComplete();
List<LogSegmentMetadata> segments = dlm.getLogSegments();
logger.info("lastDLSNs after writes {} {}", lastDLSNs.size(), lastDLSNs);
logger.info("segments after writes {} {}", segments.size(), segments);
replacedertTrue(segments.size() >= 2);
replacedertTrue(lastDLSNs.size() >= 2);
replacedertEquals(lastDLSNs.size(), segments.size());
for (LogSegmentMetadata segment : segments) {
DLSN dlsnInMetadata = segment.getLastDLSN();
DLSN dlsnSeen = lastDLSNs.get(segment.getLogSegmentSequenceNumber());
replacedertNotNull(dlsnInMetadata);
replacedertNotNull(dlsnSeen);
if (dlsnInMetadata.compareTo(dlsnSeen) != 0) {
logger.error("Last dlsn recorded in log segment {} is different from the one already seen {}.", dlsnInMetadata, dlsnSeen);
}
replacedertEquals(0, dlsnInMetadata.compareTo(dlsnSeen));
}
dlm.close();
}
18
Source : TestRollLogSegments.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@FlakyTest
@Test(timeout = 60000)
@SuppressWarnings("deprecation")
public void testCaughtUpReaderOnLogSegmentRolling() throws Exception {
String name = "distrlog-caughtup-reader-on-logsegment-rolling";
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.loadConf(conf);
confLocal.setPeriodicFlushFrequencyMilliSeconds(0);
confLocal.setImmediateFlushEnabled(false);
confLocal.setOutputBufferSize(4 * 1024 * 1024);
confLocal.setTraceReadAheadMetadataChanges(true);
confLocal.setEnsembleSize(1);
confLocal.setWriteQuorumSize(1);
confLocal.setAckQuorumSize(1);
confLocal.setReadLACLongPollTimeout(99999999);
confLocal.setReaderIdleWarnThresholdMillis(2 * 99999999 + 1);
confLocal.setBKClientReadTimeout(99999999 + 1);
DistributedLogManager dlm = createNewDLM(confLocal, name);
BKSyncLogWriter writer = (BKSyncLogWriter) dlm.startLogSegmentNonParreplacedioned();
// 1) writer added 5 entries.
final int numEntries = 5;
for (int i = 1; i <= numEntries; i++) {
writer.write(DLMTestUtil.getLogRecordInstance(i));
writer.flush();
writer.commit();
}
BKDistributedLogManager readDLM = (BKDistributedLogManager) createNewDLM(confLocal, name);
final BKAsyncLogReader reader = (BKAsyncLogReader) readDLM.getAsyncLogReader(DLSN.InitialDLSN);
// 2) reader should be able to read 5 entries.
for (long i = 1; i <= numEntries; i++) {
LogRecordWithDLSN record = Utils.ioResult(reader.readNext());
DLMTestUtil.verifyLogRecord(record);
replacedertEquals(i, record.getTransactionId());
replacedertEquals(record.getTransactionId() - 1, record.getSequenceId());
}
BKLogSegmentWriter perStreamWriter = writer.segmentWriter;
BookKeeperClient bkc = DLMTestUtil.getBookKeeperClient(readDLM);
LedgerHandle readLh = bkc.get().openLedgerNoRecovery(getLedgerHandle(perStreamWriter).getId(), BookKeeper.DigestType.CRC32, conf.getBKDigestPW().getBytes(UTF_8));
// Writer moved to lac = 9, while reader knows lac = 8 and moving to wait on 9
checkAndWaitWriterReaderPosition(perStreamWriter, 9, reader, 9, readLh, 8);
// write 6th record
writer.write(DLMTestUtil.getLogRecordInstance(numEntries + 1));
writer.flush();
// Writer moved to lac = 10, while reader knows lac = 9 and moving to wait on 10
checkAndWaitWriterReaderPosition(perStreamWriter, 10, reader, 10, readLh, 9);
// write records without commit to simulate similar failure cases
writer.write(DLMTestUtil.getLogRecordInstance(numEntries + 2));
writer.flush();
// Writer moved to lac = 11, while reader knows lac = 10 and moving to wait on 11
checkAndWaitWriterReaderPosition(perStreamWriter, 11, reader, 11, readLh, 10);
while (true) {
BKLogSegmentEntryReader entryReader = (BKLogSegmentEntryReader) reader.getReadAheadReader().getCurrentSegmentReader().getEntryReader();
if (null != entryReader && null != entryReader.getOutstandingLongPoll()) {
break;
}
Thread.sleep(1000);
}
logger.info("Waiting for long poll getting interrupted with metadata changed");
// simulate a recovery without closing ledger causing recording wrong last dlsn
BKLogWriteHandler writeHandler = writer.getCachedWriteHandler();
writeHandler.completeAndCloseLogSegment(writeHandler.inprogressZNodeName(perStreamWriter.getLogSegmentId(), perStreamWriter.getStartTxId(), perStreamWriter.getLogSegmentSequenceNumber()), perStreamWriter.getLogSegmentSequenceNumber(), perStreamWriter.getLogSegmentId(), perStreamWriter.getStartTxId(), perStreamWriter.getLastTxId(), perStreamWriter.getPositionWithinLogSegment() - 1, 9, 0);
BKSyncLogWriter anotherWriter = (BKSyncLogWriter) dlm.startLogSegmentNonParreplacedioned();
anotherWriter.write(DLMTestUtil.getLogRecordInstance(numEntries + 3));
anotherWriter.flush();
anotherWriter.commit();
anotherWriter.closeAndComplete();
for (long i = numEntries + 1; i <= numEntries + 3; i++) {
LogRecordWithDLSN record = Utils.ioResult(reader.readNext());
DLMTestUtil.verifyLogRecord(record);
replacedertEquals(i, record.getTransactionId());
}
Utils.close(reader);
readDLM.close();
}
18
Source : TestRollLogSegments.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test(timeout = 60000)
public void testRollingLogSegments() throws Exception {
logger.info("start testRollingLogSegments");
String name = "distrlog-rolling-logsegments-hightraffic";
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.loadConf(conf);
confLocal.setImmediateFlushEnabled(true);
confLocal.setOutputBufferSize(0);
confLocal.setLogSegmentRollingIntervalMinutes(0);
confLocal.setMaxLogSegmentBytes(1);
confLocal.setLogSegmentRollingConcurrency(Integer.MAX_VALUE);
int numLogSegments = 10;
DistributedLogManager dlm = createNewDLM(confLocal, name);
BKAsyncLogWriter writer = (BKAsyncLogWriter) dlm.startAsyncLogSegmentNonParreplacedioned();
final CountDownLatch latch = new CountDownLatch(numLogSegments);
long startTime = System.currentTimeMillis();
// send requests in parallel to have outstanding requests
for (int i = 1; i <= numLogSegments; i++) {
final int entryId = i;
CompletableFuture<DLSN> writeFuture = writer.write(DLMTestUtil.getLogRecordInstance(entryId)).whenComplete(new FutureEventListener<DLSN>() {
@Override
public void onSuccess(DLSN value) {
logger.info("Completed entry {} : {}.", entryId, value);
latch.countDown();
}
@Override
public void onFailure(Throwable cause) {
logger.error("Failed to write entries : {}", cause);
}
});
if (i == 1) {
// wait for first log segment created
Utils.ioResult(writeFuture);
}
}
latch.await();
logger.info("Took {} ms to completed all requests.", System.currentTimeMillis() - startTime);
List<LogSegmentMetadata> segments = dlm.getLogSegments();
logger.info("LogSegments : {}", segments);
replacedertTrue(segments.size() >= 2);
ensureOnlyOneInprogressLogSegments(segments);
int numSegmentsAfterAsyncWrites = segments.size();
// writer should work after rolling log segments
// there would be (numLogSegments/2) segments based on current rolling policy
for (int i = 1; i <= numLogSegments; i++) {
DLSN newDLSN = Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(numLogSegments + i)));
logger.info("Completed entry {} : {}", numLogSegments + i, newDLSN);
}
segments = dlm.getLogSegments();
logger.info("LogSegments : {}", segments);
replacedertEquals(numSegmentsAfterAsyncWrites + numLogSegments / 2, segments.size());
ensureOnlyOneInprogressLogSegments(segments);
writer.close();
dlm.close();
}
18
Source : TestReadAheadEntryReader.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
void generateCompletedLogSegments(DistributedLogManager dlm, long numCompletedSegments, long segmentSize, long startTxId) throws Exception {
long txid = startTxId;
for (long i = 0; i < numCompletedSegments; i++) {
AsyncLogWriter writer = Utils.ioResult(dlm.openAsyncLogWriter());
for (long j = 1; j <= segmentSize; j++) {
Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(txid++)));
LogRecord ctrlRecord = DLMTestUtil.getLogRecordInstance(txid);
ctrlRecord.setControl();
Utils.ioResult(writer.write(ctrlRecord));
}
Utils.close(writer);
}
}
18
Source : TestNonBlockingReadsMultiReader.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
@Test(timeout = 60000)
public void testMultiReaders() throws Exception {
String name = "distrlog-multireaders";
final RateLimiter limiter = RateLimiter.create(1000);
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.setOutputBufferSize(0);
confLocal.setImmediateFlushEnabled(true);
DistributedLogManager dlmwrite = createNewDLM(confLocal, name);
final AsyncLogWriter writer = dlmwrite.startAsyncLogSegmentNonParreplacedioned();
Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(0)));
Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(1)));
final AtomicInteger writeCount = new AtomicInteger(2);
DistributedLogManager dlmread = createNewDLM(conf, name);
BKSyncLogReader reader0 = (BKSyncLogReader) dlmread.getInputStream(0);
try {
ReaderThread[] readerThreads = new ReaderThread[1];
readerThreads[0] = new ReaderThread("reader0-non-blocking", reader0, false);
// readerThreads[1] = new ReaderThread("reader1-non-blocking", reader0, false);
final AtomicBoolean running = new AtomicBoolean(true);
Thread writerThread = new Thread("WriteThread") {
@Override
public void run() {
try {
long txid = 2;
DLSN dlsn = DLSN.InvalidDLSN;
while (running.get()) {
limiter.acquire();
long curTxId = txid++;
dlsn = Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(curTxId)));
writeCount.incrementAndGet();
if (curTxId % 1000 == 0) {
LOG.info("writer write {}", curTxId);
}
}
LOG.info("Completed writing record at {}", dlsn);
Utils.close(writer);
} catch (DLInterruptedException die) {
Thread.currentThread().interrupt();
} catch (Exception e) {
}
}
};
for (ReaderThread rt : readerThreads) {
rt.start();
}
writerThread.start();
TimeUnit.SECONDS.sleep(5);
LOG.info("Stopping writer");
running.set(false);
writerThread.join();
LOG.info("Writer stopped after writing {} records, waiting for reader to complete", writeCount.get());
while (writeCount.get() > (readerThreads[0].getReadCount())) {
LOG.info("Write Count = {}, Read Count = {}", new Object[] { writeCount.get(), readerThreads[0].getReadCount() });
TimeUnit.MILLISECONDS.sleep(100);
}
replacedertEquals(writeCount.get(), (readerThreads[0].getReadCount()));
for (ReaderThread readerThread : readerThreads) {
readerThread.stopReading();
}
} finally {
dlmwrite.close();
reader0.close();
dlmread.close();
}
}
18
Source : TestNonBlockingReads.java
with Apache License 2.0
from apache
with Apache License 2.0
from apache
private long createStreamWithInconsistentMetadata(String name) throws Exception {
DistributedLogManager dlm = createNewDLM(conf, name);
ZooKeeperClient zkClient = TestZooKeeperClientBuilder.newBuilder().uri(createDLMURI("/")).build();
long txid = 1;
long numRecordsWritten = 0;
int segmentSize = 10;
for (long i = 0; i < 3; i++) {
BKAsyncLogWriter out = (BKAsyncLogWriter) dlm.startAsyncLogSegmentNonParreplacedioned();
for (long j = 1; j <= segmentSize; j++) {
LogRecord op = DLMTestUtil.getLogRecordInstance(txid++);
Utils.ioResult(out.write(op));
numRecordsWritten++;
}
out.closeAndComplete();
}
BKLogWriteHandler blplm = ((BKDistributedLogManager) (dlm)).createWriteHandler(true);
String completedZNode = blplm.completedLedgerZNode(txid - segmentSize, txid - 1, 3);
LogSegmentMetadata metadata = Utils.ioResult(LogSegmentMetadata.read(zkClient, completedZNode));
zkClient.get().delete(completedZNode, -1);
LogSegmentMetadata metadataToChange = metadata.mutator().setLastEntryId(metadata.getLastEntryId() + 100).setLastTxId(metadata.getLastTxId() + 100).build();
metadataToChange.write(zkClient);
txid += 100;
for (long i = 0; i < 3; i++) {
BKAsyncLogWriter out = (BKAsyncLogWriter) dlm.startAsyncLogSegmentNonParreplacedioned();
for (long j = 1; j <= segmentSize; j++) {
LogRecord op = DLMTestUtil.getLogRecordInstance(txid++);
Utils.ioResult(out.write(op));
numRecordsWritten++;
}
out.closeAndComplete();
}
dlm.close();
return numRecordsWritten;
}
See More Examples