Here are the examples of the java api class org.apache.activemq.artemis.core.journal.JournalLoadInformation taken from open source projects.
1. ActiveMQServerImpl#loadJournals()
Project: activemq-artemis
File: ActiveMQServerImpl.java
File: ActiveMQServerImpl.java
private JournalLoadInformation[] loadJournals() throws Exception { JournalLoader journalLoader = activation.createJournalLoader(postOffice, pagingManager, storageManager, queueFactory, nodeManager, managementService, groupingHandler, configuration, parentServer); JournalLoadInformation[] journalInfo = new JournalLoadInformation[2]; List<QueueBindingInfo> queueBindingInfos = new ArrayList<>(); List<GroupingInfo> groupingInfos = new ArrayList<>(); journalInfo[0] = storageManager.loadBindingJournal(queueBindingInfos, groupingInfos); recoverStoredConfigs(); Map<Long, QueueBindingInfo> queueBindingInfosMap = new HashMap<>(); journalLoader.initQueues(queueBindingInfosMap, queueBindingInfos); journalLoader.handleGroupingBindings(groupingInfos); Map<SimpleString, List<Pair<byte[], Long>>> duplicateIDMap = new HashMap<>(); HashSet<Pair<Long, Long>> pendingLargeMessages = new HashSet<>(); List<PageCountPending> pendingNonTXPageCounter = new LinkedList<>(); journalInfo[1] = storageManager.loadMessageJournal(postOffice, pagingManager, resourceManager, queueBindingInfosMap, duplicateIDMap, pendingLargeMessages, pendingNonTXPageCounter, journalLoader); journalLoader.handleDuplicateIds(duplicateIDMap); for (Pair<Long, Long> msgToDelete : pendingLargeMessages) { ActiveMQServerLogger.LOGGER.deletingPendingMessage(msgToDelete); LargeServerMessage msg = storageManager.createLargeMessage(); msg.setMessageID(msgToDelete.getB()); msg.setPendingRecordID(msgToDelete.getA()); msg.setDurable(true); msg.deleteFile(); } if (pendingNonTXPageCounter.size() != 0) { try { journalLoader.recoverPendingPageCounters(pendingNonTXPageCounter); } catch (Throwable e) { ActiveMQServerLogger.LOGGER.errorRecoveringPageCounter(e); } } journalLoader.cleanUp(); return journalInfo; }
2. ActiveMQServerImpl#initialisePart2()
Project: activemq-artemis
File: ActiveMQServerImpl.java
File: ActiveMQServerImpl.java
/* * Load the data, and start remoting service so clients can connect */ synchronized void initialisePart2(boolean scalingDown) throws Exception { if (state == SERVER_STATE.STOPPED || state == SERVER_STATE.STOPPING) { return; } pagingManager.reloadStores(); JournalLoadInformation[] journalInfo = loadJournals(); final ServerInfo dumper = new ServerInfo(this, pagingManager); long dumpInfoInterval = configuration.getServerDumpInterval(); if (dumpInfoInterval > 0) { scheduledPool.scheduleWithFixedDelay(new Runnable() { @Override public void run() { ActiveMQServerLogger.LOGGER.dumpServerInfo(dumper.dump()); } }, 0, dumpInfoInterval, TimeUnit.MILLISECONDS); } // Deploy the rest of the stuff // Deploy any predefined queues deployQueuesFromConfiguration(); // We need to call this here, this gives any dependent server a chance to deploy its own addresses // this needs to be done before clustering is fully activated callActivateCallbacks(); checkForPotentialOOMEInAddressConfiguration(); if (!scalingDown) { // Deploy any pre-defined diverts deployDiverts(); if (groupingHandler != null) { groupingHandler.start(); } if (groupingHandler != null && groupingHandler instanceof LocalGroupingHandler) { clusterManager.start(); groupingHandler.awaitBindings(); remotingService.start(); } else { remotingService.start(); clusterManager.start(); } if (nodeManager.getNodeId() == null) { throw ActiveMQMessageBundle.BUNDLE.nodeIdNull(); } // We can only do this after everything is started otherwise we may get nasty races with expired messages postOffice.startExpiryScanner(); } }
3. AbstractJournalStorageManager#loadBindingJournal()
Project: activemq-artemis
File: AbstractJournalStorageManager.java
File: AbstractJournalStorageManager.java
@Override public JournalLoadInformation loadBindingJournal(final List<QueueBindingInfo> queueBindingInfos, final List<GroupingInfo> groupingInfos) throws Exception { List<RecordInfo> records = new ArrayList<>(); List<PreparedTransactionInfo> preparedTransactions = new ArrayList<>(); JournalLoadInformation bindingsInfo = bindingsJournal.load(records, preparedTransactions, null); for (RecordInfo record : records) { long id = record.id; ActiveMQBuffer buffer = ActiveMQBuffers.wrappedBuffer(record.data); byte rec = record.getUserRecordType(); if (rec == JournalRecordIds.QUEUE_BINDING_RECORD) { PersistentQueueBindingEncoding bindingEncoding = newBindingEncoding(id, buffer); queueBindingInfos.add(bindingEncoding); } else if (rec == JournalRecordIds.ID_COUNTER_RECORD) { idGenerator.loadState(record.id, buffer); } else if (rec == JournalRecordIds.GROUP_RECORD) { GroupingEncoding encoding = newGroupEncoding(id, buffer); groupingInfos.add(encoding); } else if (rec == JournalRecordIds.ADDRESS_SETTING_RECORD) { PersistedAddressSetting setting = newAddressEncoding(id, buffer); mapPersistedAddressSettings.put(setting.getAddressMatch(), setting); } else if (rec == JournalRecordIds.SECURITY_RECORD) { PersistedRoles roles = newSecurityRecord(id, buffer); mapPersistedRoles.put(roles.getAddressMatch(), roles); } else { throw new IllegalStateException("Invalid record type " + rec); } } // This will instruct the IDGenerator to beforeStop old records idGenerator.cleanup(); return bindingsInfo; }
4. JournalImpl#load()
Project: activemq-artemis
File: JournalImpl.java
File: JournalImpl.java
/** * @see JournalImpl#load(LoaderCallback) */ public synchronized JournalLoadInformation load(final List<RecordInfo> committedRecords, final List<PreparedTransactionInfo> preparedTransactions, final TransactionFailureCallback failureCallback, final boolean fixBadTX) throws Exception { final Set<Long> recordsToDelete = new HashSet<>(); // ArrayList was taking too long to delete elements on checkDeleteSize final List<RecordInfo> records = new LinkedList<>(); final int DELETE_FLUSH = 20000; JournalLoadInformation info = load(new LoaderCallback() { Runtime runtime = Runtime.getRuntime(); private void checkDeleteSize() { // HORNETQ-482 - Flush deletes only if memory is critical if (recordsToDelete.size() > DELETE_FLUSH && runtime.freeMemory() < runtime.maxMemory() * 0.2) { ActiveMQJournalLogger.LOGGER.debug("Flushing deletes during loading, deleteCount = " + recordsToDelete.size()); // Clean up when the list is too large, or it won't be possible to load large sets of files // Done as part of JBMESSAGING-1678 Iterator<RecordInfo> iter = records.iterator(); while (iter.hasNext()) { RecordInfo record = iter.next(); if (recordsToDelete.contains(record.id)) { iter.remove(); } } recordsToDelete.clear(); ActiveMQJournalLogger.LOGGER.debug("flush delete done"); } } @Override public void addPreparedTransaction(final PreparedTransactionInfo preparedTransaction) { preparedTransactions.add(preparedTransaction); checkDeleteSize(); } @Override public void addRecord(final RecordInfo info) { records.add(info); checkDeleteSize(); } @Override public void updateRecord(final RecordInfo info) { records.add(info); checkDeleteSize(); } @Override public void deleteRecord(final long id) { recordsToDelete.add(id); checkDeleteSize(); } @Override public void failedTransaction(final long transactionID, final List<RecordInfo> records, final List<RecordInfo> recordsToDelete) { if (failureCallback != null) { failureCallback.failedTransaction(transactionID, records, recordsToDelete); } } }, fixBadTX, null); for (RecordInfo record : records) { if (!recordsToDelete.contains(record.id)) { committedRecords.add(record); } } return info; }
5. JDBCJournalImpl#load()
Project: activemq-artemis
File: JDBCJournalImpl.java
File: JDBCJournalImpl.java
@Override public synchronized JournalLoadInformation load(LoaderCallback reloadManager) throws Exception { JournalLoadInformation jli = new JournalLoadInformation(); JDBCJournalReaderCallback jrc = new JDBCJournalReaderCallback(reloadManager); JDBCJournalRecord r; try (ResultSet rs = selectJournalRecords.executeQuery()) { int noRecords = 0; while (rs.next()) { r = JDBCJournalRecord.readRecord(rs); switch(r.getRecordType()) { case JDBCJournalRecord.ADD_RECORD: jrc.onReadAddRecord(r.toRecordInfo()); break; case JDBCJournalRecord.UPDATE_RECORD: jrc.onReadUpdateRecord(r.toRecordInfo()); break; case JDBCJournalRecord.DELETE_RECORD: jrc.onReadDeleteRecord(r.getId()); break; case JDBCJournalRecord.ADD_RECORD_TX: jrc.onReadAddRecordTX(r.getTxId(), r.toRecordInfo()); break; case JDBCJournalRecord.UPDATE_RECORD_TX: jrc.onReadUpdateRecordTX(r.getTxId(), r.toRecordInfo()); break; case JDBCJournalRecord.DELETE_RECORD_TX: jrc.onReadDeleteRecordTX(r.getTxId(), r.toRecordInfo()); break; case JDBCJournalRecord.PREPARE_RECORD: jrc.onReadPrepareRecord(r.getTxId(), r.getTxDataAsByteArray(), r.getTxCheckNoRecords()); break; case JDBCJournalRecord.COMMIT_RECORD: jrc.onReadCommitRecord(r.getTxId(), r.getTxCheckNoRecords()); break; case JDBCJournalRecord.ROLLBACK_RECORD: jrc.onReadRollbackRecord(r.getTxId()); break; default: throw new Exception("Error Reading Journal, Unknown Record Type: " + r.getRecordType()); } noRecords++; if (r.getSeq() > seq.longValue()) { seq.set(r.getSeq()); } } jrc.checkPreparedTx(); jli.setMaxID(((JDBCJournalLoaderCallback) reloadManager).getMaxId()); jli.setNumberOfRecords(noRecords); transactions = jrc.getTransactions(); } return jli; }