Here are the examples of the java api org.hsqldb.lib.DoubleIntIndex taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
22 Examples
19
Source : TestHashStructures.java
with Apache License 2.0
from SERG-Delft
with Apache License 2.0
from SERG-Delft
int[] getSampleIntArray(org.hsqldb.lib.DoubleIntIndex index, int size) {
int[] array = new int[size];
org.hsqldb.lib.IntKeyHashMap map = new org.hsqldb.lib.IntKeyHashMap();
for (; map.size() < size; ) {
int pos = nextIntRandom(randomgen, index.size());
map.put(pos, null);
}
org.hsqldb.lib.Iterator it = map.keySet().iterator();
for (int i = 0; i < size; i++) {
int pos = it.nextInt();
array[i] = index.getKey(pos);
}
return array;
}
19
Source : TestHashStructures.java
with Apache License 2.0
from SERG-Delft
with Apache License 2.0
from SERG-Delft
void compareByHIteratorInt(DoubleIntIndex intLookup, org.hsqldb.lib.IntKeyHashMap hMap) throws Exception {
org.hsqldb.lib.Iterator hIt = hMap.keySet().iterator();
for (int i = 0; hIt.hasNext(); i++) {
int hK = hIt.nextInt();
int lookup = intLookup.findFirstEqualKeyIndex(hK);
int lV = intLookup.getValue(lookup);
Integer hV = (Integer) hMap.get(hK);
if (hV.intValue() != lV) {
throw new Exception("HashMap value mismatch");
}
}
}
19
Source : TableSpaceManagerSimple.java
with Apache License 2.0
from SERG-Delft
with Apache License 2.0
from SERG-Delft
public void initialiseFileBlock(DoubleIntIndex lookup, long blockFreePos, long blockLimit) {
}
19
Source : TableSpaceManagerBlocks.java
with Apache License 2.0
from SERG-Delft
with Apache License 2.0
from SERG-Delft
/**
* Manages allocation of space for rows.<p>
* Maintains a list of free file blocks with fixed capacity.<p>
*
* @author Fred Toussi (fredt@users dot sourceforge.net)
* @version 2.3.3
* @since 2.3.0
*/
public clreplaced TableSpaceManagerBlocks implements TableSpaceManager {
DataSpaceManager spaceManager;
private final int scale;
final int mainBlockSize;
final int spaceID;
final int minReuse;
//
private DoubleIntIndex lookup;
private final int capacity;
private long requestGetCount;
private long releaseCount;
private long requestCount;
private long requestSize;
boolean isModified;
//
long freshBlockFreePos = 0;
long freshBlockLimit = 0;
int fileBlockIndex = -1;
/**
*/
public TableSpaceManagerBlocks(DataSpaceManager spaceManager, int tableId, int fileBlockSize, int capacity, int fileScale, int minReuse) {
this.spaceManager = spaceManager;
this.scale = fileScale;
this.spaceID = tableId;
this.mainBlockSize = fileBlockSize;
this.minReuse = minReuse;
lookup = new DoubleIntIndex(capacity, true);
lookup.setValuesSearchTarget();
this.capacity = capacity;
}
public boolean hasFileRoom(long blockSize) {
return freshBlockLimit - freshBlockFreePos > blockSize;
}
public void addFileBlock(long blockFreePos, long blockLimit) {
int released = (int) (freshBlockLimit - freshBlockFreePos);
if (released > 0) {
release(freshBlockFreePos / scale, released);
}
initialiseFileBlock(null, blockFreePos, blockLimit);
}
public void initialiseFileBlock(DoubleIntIndex spaceList, long blockFreePos, long blockLimit) {
freshBlockFreePos = blockFreePos;
freshBlockLimit = blockLimit;
if (spaceList != null) {
spaceList.copyTo(lookup);
}
}
boolean getNewMainBlock(long rowSize) {
long blockCount = (mainBlockSize + rowSize) / mainBlockSize;
long blockSize = blockCount * mainBlockSize;
long position = spaceManager.getFileBlocks(spaceID, (int) blockCount);
if (position < 0) {
return false;
}
if (position != freshBlockLimit) {
long released = freshBlockLimit - freshBlockFreePos;
if (released > 0) {
release(freshBlockFreePos / scale, (int) released);
}
freshBlockFreePos = position;
freshBlockLimit = position;
}
freshBlockLimit += blockSize;
return true;
}
long getNewBlock(long rowSize, boolean asBlocks) {
if (asBlocks) {
rowSize = (int) ArrayUtil.getBinaryMultipleCeiling(rowSize, DataSpaceManager.fixedBlockSizeUnit);
}
if (freshBlockFreePos + rowSize > freshBlockLimit) {
boolean result = getNewMainBlock(rowSize);
if (!result) {
throw Error.error(ErrorCode.DATA_FILE_IS_FULL);
}
}
long position = freshBlockFreePos;
if (asBlocks) {
position = ArrayUtil.getBinaryMultipleCeiling(position, DataSpaceManager.fixedBlockSizeUnit);
long released = position - freshBlockFreePos;
if (released > 0) {
release(freshBlockFreePos / scale, (int) released);
freshBlockFreePos = position;
}
}
freshBlockFreePos += rowSize;
return position / scale;
}
public int getSpaceID() {
return spaceID;
}
synchronized public void release(long pos, int rowSize) {
isModified = true;
releaseCount++;
if (lookup.size() == capacity) {
resetList();
}
if (pos >= Integer.MAX_VALUE) {
return;
}
lookup.add(pos, rowSize / scale);
}
/**
* Returns the position of a free block or 0.
*/
synchronized public long getFilePosition(int rowSize, boolean asBlocks) {
requestGetCount++;
if (capacity == 0) {
return getNewBlock(rowSize, asBlocks);
}
if (asBlocks) {
rowSize = (int) ArrayUtil.getBinaryMultipleCeiling(rowSize, DataSpaceManager.fixedBlockSizeUnit);
}
int index = -1;
int rowUnits = rowSize / scale;
if (rowSize >= minReuse && lookup.size() > 0) {
if (lookup.getValue(0) >= rowUnits) {
index = 0;
} else if (rowSize > Integer.MAX_VALUE) {
index = -1;
} else {
index = lookup.findFirstGreaterEqualKeyIndex(rowUnits);
}
}
if (index == -1) {
return getNewBlock(rowSize, asBlocks);
}
if (asBlocks) {
for (; index < lookup.size(); index++) {
long pos = lookup.getKey(index);
if (pos % (DataSpaceManager.fixedBlockSizeUnit / scale) == 0) {
break;
}
}
if (index == lookup.size()) {
return getNewBlock(rowSize, asBlocks);
}
}
// statistics for successful requests only - to be used later for midSize
requestCount++;
requestSize += rowSize;
int key = lookup.getKey(index);
int units = lookup.getValue(index);
int difference = units - rowUnits;
lookup.remove(index);
if (difference > 0) {
int pos = key + rowUnits;
lookup.add(pos, difference);
}
return key;
}
public void reset() {
if (freshBlockFreePos == 0) {
fileBlockIndex = -1;
} else {
fileBlockIndex = (int) (freshBlockFreePos / mainBlockSize);
}
spaceManager.freeTableSpace(spaceID, lookup, freshBlockFreePos, freshBlockLimit, true);
freshBlockFreePos = 0;
freshBlockLimit = 0;
}
public long getLostBlocksSize() {
long total = freshBlockLimit - freshBlockFreePos + lookup.getTotalValues() * scale;
return total;
}
public boolean isDefaultSpace() {
return spaceID == DataSpaceManager.tableIdDefault;
}
public int getFileBlockIndex() {
return fileBlockIndex;
}
private void resetList() {
// dummy args for file block release
spaceManager.freeTableSpace(spaceID, lookup, 0, 0, false);
}
}
19
Source : DataSpaceManagerSimple.java
with Apache License 2.0
from SERG-Delft
with Apache License 2.0
from SERG-Delft
/**
* @author Fred Toussi (fredt@users dot sourceforge.net)
* @version 2.3.3
* @since 2.3.0
*/
public clreplaced DataSpaceManagerSimple implements DataSpaceManager {
DataFileCache cache;
TableSpaceManager defaultSpaceManager;
int fileBlockSize = DataSpaceManager.fixedBlockSizeUnit;
long totalFragmentSize;
int spaceIdSequence = tableIdFirst;
DoubleIntIndex lookup;
/**
* Used for default, readonly, Text and Session data files
*/
DataSpaceManagerSimple(DataFileCache cache, boolean isReadOnly) {
this.cache = cache;
if (cache instanceof DataFileCacheSession) {
defaultSpaceManager = new TableSpaceManagerSimple(cache);
} else if (cache instanceof TextCache) {
defaultSpaceManager = new TableSpaceManagerSimple(cache);
} else {
int capacity = cache.database.logger.propMaxFreeBlocks;
defaultSpaceManager = new TableSpaceManagerBlocks(this, DataSpaceManager.tableIdDefault, fileBlockSize, capacity, cache.getDataFileScale(), 0);
if (!isReadOnly) {
initialiseSpaces();
cache.spaceManagerPosition = 0;
}
}
totalFragmentSize = cache.lostSpaceSize;
}
public TableSpaceManager getDefaultTableSpace() {
return defaultSpaceManager;
}
public TableSpaceManager getTableSpace(int spaceId) {
if (spaceId >= spaceIdSequence) {
spaceIdSequence = spaceId + 1;
}
return defaultSpaceManager;
}
public int getNewTableSpaceID() {
return spaceIdSequence++;
}
public long getFileBlocks(int tableId, int blockCount) {
long filePosition = cache.enlargeFileSpace((long) blockCount * fileBlockSize);
return filePosition;
}
public void freeTableSpace(int spaceId) {
}
public void freeTableSpace(int spaceId, DoubleIntIndex spaceList, long offset, long limit, boolean full) {
totalFragmentSize += spaceList.getTotalValues() * cache.getDataFileScale();
if (full) {
if (cache.fileFreePosition == limit) {
cache.writeLock.lock();
try {
cache.fileFreePosition = offset;
} finally {
cache.writeLock.unlock();
}
} else {
totalFragmentSize += limit - offset;
}
if (spaceList.size() != 0) {
lookup = new DoubleIntIndex(spaceList.size(), true);
spaceList.copyTo(lookup);
spaceList.clear();
}
} else {
spaceList.compactLookupAsIntervals();
spaceList.setValuesSearchTarget();
spaceList.sort();
int extra = spaceList.size() - spaceList.capacity() / 2;
if (extra > 0) {
spaceList.removeRange(0, extra);
totalFragmentSize -= spaceList.getTotalValues() * cache.getDataFileScale();
}
}
}
public long getLostBlocksSize() {
return totalFragmentSize + defaultSpaceManager.getLostBlocksSize();
}
public int getFileBlockSize() {
return 1024 * 1024 * cache.getDataFileScale() / 16;
}
public boolean isModified() {
return true;
}
public void initialiseSpaces() {
long currentSize = cache.getFileFreePos();
long totalBlocks = (currentSize + fileBlockSize) / fileBlockSize;
long lastFreePosition = cache.enlargeFileSpace(totalBlocks * fileBlockSize - currentSize);
defaultSpaceManager.initialiseFileBlock(lookup, lastFreePosition, cache.getFileFreePos());
if (lookup != null) {
totalFragmentSize -= lookup.getTotalValues() * cache.getDataFileScale();
lookup = null;
}
}
public void reset() {
defaultSpaceManager.reset();
}
public boolean isMultiSpace() {
return false;
}
public int getFileBlockItemCount() {
return 1024 * 64;
}
public DirectoryBlockCachedObject[] getDirectoryList() {
return new DirectoryBlockCachedObject[0];
}
}
19
Source : DataFileBlockManager.java
with GNU General Public License v3.0
from s-store
with GNU General Public License v3.0
from s-store
/**
* Maintains a list of free file blocks with fixed capacity.<p>
*
* @author Fred Toussi (fredt@users dot sourceforge.net)
* @version 1.8.0
* @since 1.8.0
*/
public clreplaced DataFileBlockManager {
private DoubleIntIndex lookup;
private final int capacity;
private int midSize;
private final int scale;
private long releaseCount;
private long requestCount;
private long requestSize;
// reporting vars
long lostFreeBlockSize;
boolean isModified;
/**
*/
public DataFileBlockManager(int capacity, int scale, long lostSize) {
lookup = new DoubleIntIndex(capacity, true);
lookup.setValuesSearchTarget();
this.capacity = capacity;
this.scale = scale;
this.lostFreeBlockSize = lostSize;
// arbitrary initial value
this.midSize = 128;
}
/**
*/
void add(int pos, int rowSize) {
isModified = true;
if (capacity == 0) {
lostFreeBlockSize += rowSize;
return;
}
releaseCount++;
//
if (lookup.size() == capacity) {
resetList();
}
lookup.add(pos, rowSize);
}
/**
* Returns the position of a free block or 0.
*/
int get(int rowSize) {
if (lookup.size() == 0) {
return -1;
}
int index = lookup.findFirstGreaterEqualKeyIndex(rowSize);
if (index == -1) {
return -1;
}
// statistics for successful requests only - to be used later for midSize
requestCount++;
requestSize += rowSize;
int length = lookup.getValue(index);
int difference = length - rowSize;
int key = lookup.getKey(index);
lookup.remove(index);
if (difference >= midSize) {
int pos = key + (rowSize / scale);
lookup.add(pos, difference);
} else {
lostFreeBlockSize += difference;
}
return key;
}
int size() {
return lookup.size();
}
long getLostBlocksSize() {
return lostFreeBlockSize;
}
boolean isModified() {
return isModified;
}
void clear() {
removeBlocks(lookup.size());
}
private void resetList() {
if (requestCount != 0) {
midSize = (int) (requestSize / requestCount);
}
int first = lookup.findFirstGreaterEqualSlotIndex(midSize);
if (first < lookup.size() / 4) {
first = lookup.size() / 4;
}
removeBlocks(first);
}
private void removeBlocks(int blocks) {
for (int i = 0; i < blocks; i++) {
lostFreeBlockSize += lookup.getValue(i);
}
lookup.removeRange(0, blocks);
}
private void checkIntegrity() throws NullPointerException {
}
}
18
Source : TestHashStructures.java
with Apache License 2.0
from SERG-Delft
with Apache License 2.0
from SERG-Delft
void populateByRandomIntKeysInt(DoubleIntIndex intLookup, org.hsqldb.lib.IntKeyHashMap hMap, int testSize) throws Exception {
for (int i = 0; i < testSize; i++) {
int intValue = randomgen.nextInt();
intLookup.addUnique(intValue, i);
hMap.put(intValue, new Integer(i));
// actually this can happen as duplicates are allowed in DoubleIntTable
if (intLookup.size() != hMap.size()) {
throw new Exception("Duplicate random in int lookup");
}
}
}
18
Source : TestHashStructures.java
with Apache License 2.0
from SERG-Delft
with Apache License 2.0
from SERG-Delft
void populateBySerialIntKeysInt(DoubleIntIndex intLookup, org.hsqldb.lib.IntKeyHashMap hMap, int testSize) throws Exception {
for (int i = 0; i < testSize; i++) {
int intValue = randomgen.nextInt();
intLookup.addUnique(i, intValue);
hMap.put(i, new Integer(intValue));
if (intLookup.size() != hMap.size()) {
throw new Exception("HashMap size mismatch");
}
}
}
18
Source : TestHashStructures.java
with Apache License 2.0
from SERG-Delft
with Apache License 2.0
from SERG-Delft
public void testDoubleIntLookup() throws Exception {
boolean failed = false;
int testSize = 512;
org.hsqldb.lib.IntKeyHashMap hIntMap = new org.hsqldb.lib.IntKeyHashMap();
DoubleIntIndex intLookup = new DoubleIntIndex(12, false);
try {
intLookup.setKeysSearchTarget();
populateBySerialIntKeysInt(intLookup, hIntMap, testSize);
compareByHIteratorInt(intLookup, hIntMap);
populateByRandomIntKeysInt(intLookup, hIntMap, testSize);
compareByHIteratorInt(intLookup, hIntMap);
} catch (Exception e) {
failed = true;
}
replacedertTrue(!failed);
}
18
Source : TableSpaceManagerBlocks.java
with Apache License 2.0
from SERG-Delft
with Apache License 2.0
from SERG-Delft
public void initialiseFileBlock(DoubleIntIndex spaceList, long blockFreePos, long blockLimit) {
freshBlockFreePos = blockFreePos;
freshBlockLimit = blockLimit;
if (spaceList != null) {
spaceList.copyTo(lookup);
}
}
18
Source : DataSpaceManagerBlocks.java
with Apache License 2.0
from SERG-Delft
with Apache License 2.0
from SERG-Delft
public void freeTableSpace(int spaceId, DoubleIntIndex spaceList, long offset, long limit, boolean full) {
if (spaceList.size() == 0 && offset == limit) {
return;
}
// sorts by keys
spaceList.compactLookupAsIntervals();
if (!full) {
int available = spaceList.capacity() - spaceList.size();
if (available > spaceList.capacity() / 4) {
spaceList.setValuesSearchTarget();
spaceList.sort();
return;
}
}
cache.writeLock.lock();
try {
ba.initialise(true);
// spaceId may be the tableIdDefault for moved spaces
int[] keys = spaceList.getKeys();
int[] values = spaceList.getValues();
for (int i = 0; i < spaceList.size(); i++) {
int position = keys[i];
int units = values[i];
freeTableSpacePart(position, units);
}
long position = offset / dataFileScale;
int units = (int) ((limit - offset) / dataFileScale);
freeTableSpacePart(position, units);
ba.reset();
} finally {
cache.writeLock.unlock();
}
spaceList.clear();
spaceList.setValuesSearchTarget();
}
17
Source : DataFileDefrag.java
with Apache License 2.0
from SERG-Delft
with Apache License 2.0
from SERG-Delft
/**
* Routine to defrag the *.data file.
*
* This method iterates over the primary index of a table to find the
* disk position for each row and stores it, together with the new position
* in an array.
*
* A second preplaced over the primary index writes each row to the new disk
* image after translating the old pointers to the new.
*
* @author Fred Toussi (fredt@users dot sourceforge.net)
* @version 2.3.4
* @since 1.7.2
*/
final clreplaced DataFileDefrag {
DataFileCache dataFileOut;
StopWatch stopw = new StopWatch();
String dataFileName;
long[][] rootsList;
Database database;
DataFileCache dataCache;
int scale;
DoubleIntIndex pointerLookup;
DataFileDefrag(Database db, DataFileCache cache) {
this.database = db;
this.dataCache = cache;
this.scale = cache.getDataFileScale();
this.dataFileName = cache.getFileName();
}
void process(Session session) {
Throwable error = null;
database.logger.logDetailEvent("Defrag process begins");
HsqlArrayList allTables = database.schemaManager.getAllTables(true);
rootsList = new long[allTables.size()][];
long maxSize = 0;
for (int i = 0, tSize = allTables.size(); i < tSize; i++) {
Table table = (Table) allTables.get(i);
if (table.getTableType() == TableBase.CACHED_TABLE) {
PersistentStore store = database.persistentStoreCollection.getStore(table);
long size = store.elementCount();
if (size > maxSize) {
maxSize = size;
}
}
}
if (maxSize > Integer.MAX_VALUE) {
throw Error.error(ErrorCode.X_2200T);
}
try {
String baseFileName = database.getCanonicalPath();
pointerLookup = new DoubleIntIndex((int) maxSize, false);
dataFileOut = new DataFileCache(database, baseFileName, true);
pointerLookup.setKeysSearchTarget();
for (int i = 0, tSize = allTables.size(); i < tSize; i++) {
Table t = (Table) allTables.get(i);
if (t.getTableType() == TableBase.CACHED_TABLE) {
long[] rootsArray = writeTableToDataFile(t);
rootsList[i] = rootsArray;
} else {
rootsList[i] = null;
}
database.logger.logDetailEvent("table complete " + t.getName().name);
}
dataFileOut.close();
dataFileOut = null;
for (int i = 0, size = rootsList.length; i < size; i++) {
long[] roots = rootsList[i];
if (roots != null) {
database.logger.logDetailEvent("roots: " + StringUtil.getList(roots, ",", ""));
}
}
} catch (OutOfMemoryError e) {
error = e;
throw Error.error(ErrorCode.OUT_OF_MEMORY, e);
} catch (Throwable t) {
error = t;
throw Error.error(ErrorCode.GENERAL_ERROR, t);
} finally {
try {
if (dataFileOut != null) {
dataFileOut.release();
}
} catch (Throwable t) {
}
if (error instanceof OutOfMemoryError) {
database.logger.logInfoEvent("defrag failed - out of memory - required: " + maxSize * 8);
}
if (error == null) {
database.logger.logDetailEvent("Defrag transfer complete: " + stopw.elapsedTime());
} else {
database.logger.logSevereEvent("defrag failed ", error);
database.logger.getFileAccess().removeElement(dataFileName + Logger.newFileExtension);
}
}
}
long[] writeTableToDataFile(Table table) {
RowStoreAVLDisk store = (RowStoreAVLDisk) table.database.persistentStoreCollection.getStore(table);
long[] rootsArray = table.getIndexRootsArray();
pointerLookup.clear();
database.logger.logDetailEvent("lookup begins " + table.getName().name + " " + stopw.elapsedTime());
store.moveDataToSpace(dataFileOut, pointerLookup);
for (int i = 0; i < table.getIndexCount(); i++) {
if (rootsArray[i] == -1) {
continue;
}
long pos = pointerLookup.lookup(rootsArray[i], -1);
if (pos == -1) {
throw Error.error(ErrorCode.DATA_FILE_ERROR);
}
rootsArray[i] = pos;
}
// log any discrepency in row count
long count = store.elementCount();
if (count != pointerLookup.size()) {
database.logger.logSevereEvent("discrepency in row count " + table.getName().name + " " + count + " " + pointerLookup.size(), null);
}
database.logger.logDetailEvent("table written " + table.getName().name);
return rootsArray;
}
public long[][] getIndexRoots() {
return rootsList;
}
}
17
Source : DataFileDefrag.java
with GNU General Public License v3.0
from s-store
with GNU General Public License v3.0
from s-store
void setTransactionRowLookups(DoubleIntIndex pointerLookup) {
for (int i = 0, size = transactionRowLookup.size(); i < size; i++) {
int key = transactionRowLookup.getKey(i);
int lookupIndex = pointerLookup.findFirstEqualKeyIndex(key);
if (lookupIndex != -1) {
transactionRowLookup.setValue(i, pointerLookup.getValue(lookupIndex));
}
}
}
16
Source : TransactionManager.java
with GNU General Public License v3.0
from s-store
with GNU General Public License v3.0
from s-store
/**
* Convert row ID's for cached table rows in transactions
*/
public void convertTransactionIDs(DoubleIntIndex lookup) {
writeLock.lock();
try {
RowAction[] list = new RowAction[rowActionMap.size()];
Iterator it = this.rowActionMap.values().iterator();
for (int i = 0; it.hasNext(); i++) {
list[i] = (RowAction) it.next();
}
rowActionMap.clear();
for (int i = 0; i < list.length; i++) {
int pos = lookup.lookupFirstEqual(list[i].getPos());
list[i].setPos(pos);
rowActionMap.put(pos, list[i]);
}
} finally {
writeLock.unlock();
}
}
16
Source : DataFileDefrag.java
with GNU General Public License v3.0
from s-store
with GNU General Public License v3.0
from s-store
// [email protected] - changed to file access api
/**
* Routine to defrag the *.data file.
*
* This method iterates over the primary index of a table to find the
* disk position for each row and stores it, together with the new position
* in an array.
*
* A second preplaced over the primary index writes each row to the new disk
* image after translating the old pointers to the new.
*
* @author Fred Toussi (fredt@users dot sourceforge.net)
* @version 1.9.0
* @since 1.7.2
*/
final clreplaced DataFileDefrag {
BufferedOutputStream fileStreamOut;
long fileOffset;
StopWatch stopw = new StopWatch();
String filename;
int[][] rootsList;
Database database;
DataFileCache cache;
int scale;
DoubleIntIndex transactionRowLookup;
DataFileDefrag(Database db, DataFileCache cache, String filename) {
this.database = db;
this.cache = cache;
this.scale = cache.cacheFileScale;
this.filename = filename;
}
void process() throws IOException {
boolean complete = false;
Error.printSystemOut("Defrag Transfer begins");
transactionRowLookup = database.txManager.getTransactionIDList();
HsqlArrayList allTables = database.schemaManager.getAllTables();
rootsList = new int[allTables.size()][];
Storage dest = null;
try {
OutputStream fos = database.getFileAccess().openOutputStreamElement(filename + ".new");
fileStreamOut = new BufferedOutputStream(fos, 1 << 12);
for (int i = 0; i < DataFileCache.INITIAL_FREE_POS; i++) {
fileStreamOut.write(0);
}
fileOffset = DataFileCache.INITIAL_FREE_POS;
for (int i = 0, tSize = allTables.size(); i < tSize; i++) {
Table t = (Table) allTables.get(i);
if (t.getTableType() == TableBase.CACHED_TABLE) {
int[] rootsArray = writeTableToDataFile(t);
rootsList[i] = rootsArray;
} else {
rootsList[i] = null;
}
Error.printSystemOut(t.getName().name + " complete");
}
writeTransactionRows();
fileStreamOut.flush();
fileStreamOut.close();
fileStreamOut = null;
// write out the end of file position
dest = ScaledRAFile.newScaledRAFile(database, filename + ".new", false, ScaledRAFile.DATA_FILE_RAF, database.getURLProperties().getProperty("storage_clreplaced_name"), database.getURLProperties().getProperty("storage_key"));
dest.seek(DataFileCache.LONG_FREE_POS_POS);
dest.writeLong(fileOffset);
dest.close();
dest = null;
for (int i = 0, size = rootsList.length; i < size; i++) {
int[] roots = rootsList[i];
if (roots != null) {
Error.printSystemOut(org.hsqldb.lib.StringUtil.getList(roots, ",", ""));
}
}
complete = true;
} catch (IOException e) {
throw Error.error(ErrorCode.FILE_IO_ERROR, filename + ".new");
} catch (OutOfMemoryError e) {
throw Error.error(ErrorCode.OUT_OF_MEMORY);
} finally {
if (fileStreamOut != null) {
fileStreamOut.close();
}
if (dest != null) {
dest.close();
}
if (!complete) {
database.getFileAccess().removeElement(filename + ".new");
}
}
// Error.printSystemOut("Transfer complete: ", stopw.elapsedTime());
}
/**
* called from outside after the complete end of defrag
*/
void updateTableIndexRoots() {
HsqlArrayList allTables = database.schemaManager.getAllTables();
for (int i = 0, size = allTables.size(); i < size; i++) {
Table t = (Table) allTables.get(i);
if (t.getTableType() == TableBase.CACHED_TABLE) {
int[] rootsArray = rootsList[i];
t.setIndexRoots(rootsArray);
}
}
}
/**
* called from outside after the complete end of defrag
*/
void updateTransactionRowIDs() {
database.txManager.convertTransactionIDs(transactionRowLookup);
}
int[] writeTableToDataFile(Table table) throws IOException {
Session session = database.getSessionManager().getSysSession();
PersistentStore store = session.sessionData.getRowStore(table);
RowOutputInterface rowOut = new RowOutputBinary();
DoubleIntIndex pointerLookup = new DoubleIntIndex(table.getPrimaryIndex().sizeEstimate(store), false);
int[] rootsArray = table.getIndexRootsArray();
long pos = fileOffset;
int count = 0;
pointerLookup.setKeysSearchTarget();
Error.printSystemOut("lookup begins: " + stopw.elapsedTime());
RowIterator it = table.rowIterator(session);
for (; it.hasNext(); count++) {
CachedObject row = it.getNextRow();
pointerLookup.addUnsorted(row.getPos(), (int) (pos / scale));
if (count % 50000 == 0) {
Error.printSystemOut("pointer pair for row " + count + " " + row.getPos() + " " + pos);
}
pos += row.getStorageSize();
}
Error.printSystemOut(table.getName().name + " list done ", stopw.elapsedTime());
count = 0;
it = table.rowIterator(session);
for (; it.hasNext(); count++) {
CachedObject row = it.getNextRow();
rowOut.reset();
row.write(rowOut, pointerLookup);
fileStreamOut.write(rowOut.getOutputStream().getBuffer(), 0, rowOut.size());
fileOffset += row.getStorageSize();
if ((count) % 50000 == 0) {
Error.printSystemOut(count + " rows " + stopw.elapsedTime());
}
}
for (int i = 0; i < rootsArray.length; i++) {
if (rootsArray[i] == -1) {
continue;
}
int lookupIndex = pointerLookup.findFirstEqualKeyIndex(rootsArray[i]);
if (lookupIndex == -1) {
throw Error.error(ErrorCode.DATA_FILE_ERROR);
}
rootsArray[i] = pointerLookup.getValue(lookupIndex);
}
setTransactionRowLookups(pointerLookup);
Error.printSystemOut(table.getName().name + " : table converted");
return rootsArray;
}
void setTransactionRowLookups(DoubleIntIndex pointerLookup) {
for (int i = 0, size = transactionRowLookup.size(); i < size; i++) {
int key = transactionRowLookup.getKey(i);
int lookupIndex = pointerLookup.findFirstEqualKeyIndex(key);
if (lookupIndex != -1) {
transactionRowLookup.setValue(i, pointerLookup.getValue(lookupIndex));
}
}
}
void writeTransactionRows() {
for (int i = 0, size = transactionRowLookup.size(); i < size; i++) {
if (transactionRowLookup.getValue(i) != 0) {
continue;
}
int key = transactionRowLookup.getKey(i);
try {
transactionRowLookup.setValue(i, (int) (fileOffset / scale));
RowInputInterface rowIn = cache.readObject(key);
fileStreamOut.write(rowIn.getBuffer(), 0, rowIn.getSize());
fileOffset += rowIn.getSize();
} catch (HsqlException e) {
} catch (IOException e) {
}
}
}
}
15
Source : TestLibSpeed.java
with Apache License 2.0
from SERG-Delft
with Apache License 2.0
from SERG-Delft
/**
* @author Fred Toussi (fredt@users dot sourceforge.net)
*/
public clreplaced TestLibSpeed {
static final String[][] sNumeric = { { "ABS", "org.hsqldb.Library.abs" }, { "ACOS", "java.lang.Math.acos" }, { "ASIN", "java.lang.Math.asin" }, { "ATAN", "java.lang.Math.atan" }, { "ATAN2", "java.lang.Math.atan2" }, { "CEILING", "java.lang.Math.ceil" }, { "COS", "java.lang.Math.cos" }, { "COT", "org.hsqldb.Library.cot" }, { "DEGREES", "java.lang.Math.toDegrees" }, { "EXP", "java.lang.Math.exp" }, { "FLOOR", "java.lang.Math.floor" }, { "LOG", "java.lang.Math.log" }, { "LOG10", "org.hsqldb.Library.log10" }, { "MOD", "org.hsqldb.Library.mod" }, { "PI", "org.hsqldb.Library.pi" }, { "POWER", "java.lang.Math.pow" }, { "RADIANS", "java.lang.Math.toRadians" }, { "RAND", "java.lang.Math.random" }, { "ROUND", "org.hsqldb.Library.round" }, { "SIGN", "org.hsqldb.Library.sign" }, { "SIN", "java.lang.Math.sin" }, { "SQRT", "java.lang.Math.sqrt" }, { "TAN", "java.lang.Math.tan" }, { "TRUNCATE", "org.hsqldb.Library.truncate" }, { "BITAND", "org.hsqldb.Library.bitand" }, { "BITOR", "org.hsqldb.Library.bitor" }, { "ROUNDMAGIC", "org.hsqldb.Library.roundMagic" } };
static HashSet hashSet = new HashSet();
static DoubleIntIndex doubleIntLookup = new DoubleIntIndex(sNumeric.length, false);
static IntKeyIntValueHashMap intKeyIntValueHashLookup = new IntKeyIntValueHashMap();
static IntValueHashMap intValueHashLookup = new IntValueHashMap(sNumeric.length);
static IntKeyHashMap intKeyHashLookup = new IntKeyHashMap();
static {
doubleIntLookup.setKeysSearchTarget();
java.util.Random randomgen = new java.util.Random();
int[] row = new int[2];
for (int i = 0; i < sNumeric.length; i++) {
hashSet.add(sNumeric[i][0]);
intKeyIntValueHashLookup.put(randomgen.nextInt(sNumeric.length), i);
intKeyHashLookup.put(i, new Integer(i));
doubleIntLookup.add(randomgen.nextInt(sNumeric.length), i);
intValueHashLookup.put(sNumeric[i][0], randomgen.nextInt(sNumeric.length));
}
}
static int count = 100000;
public TestLibSpeed() {
java.util.Random randomgen = new java.util.Random();
StopWatch sw = new StopWatch();
int dummy = 0;
System.out.println("set lookup ");
for (int k = 0; k < 3; k++) {
sw.zero();
for (int j = 0; j < count; j++) {
for (int i = 0; i < sNumeric.length; i++) {
int r = randomgen.nextInt(sNumeric.length);
hashSet.contains(sNumeric[r][0]);
dummy += r;
}
}
System.out.println("HashSet contains " + sw.elapsedTime());
sw.zero();
for (int j = 0; j < count; j++) {
for (int i = 0; i < sNumeric.length; i++) {
int r = randomgen.nextInt(sNumeric.length);
intKeyIntValueHashLookup.get(r, -1);
dummy += r;
}
}
System.out.println("IntKeyIntValueHashMap Lookup with array " + sw.elapsedTime());
sw.zero();
for (int j = 0; j < count; j++) {
for (int i = 0; i < sNumeric.length; i++) {
int r = randomgen.nextInt(sNumeric.length);
intKeyHashLookup.get(r);
dummy += r;
}
}
System.out.println("IntKeyHashMap Lookup " + sw.elapsedTime());
sw.zero();
for (int j = 0; j < count; j++) {
for (int i = 0; i < sNumeric.length; i++) {
int r = randomgen.nextInt(sNumeric.length);
doubleIntLookup.findFirstEqualKeyIndex(r);
dummy += r;
}
}
System.out.println("DoubleIntTable Lookup " + sw.elapsedTime());
sw.zero();
for (int j = 0; j < count; j++) {
for (int i = 0; i < sNumeric.length; i++) {
int r = randomgen.nextInt(sNumeric.length);
intValueHashLookup.get(sNumeric[r][0], 0);
dummy += r;
}
}
System.out.println("IntKeyIntValueHashMap Lookup " + sw.elapsedTime());
sw.zero();
for (int j = 0; j < count; j++) {
for (int i = 0; i < sNumeric.length; i++) {
int r = randomgen.nextInt(sNumeric.length);
dummy += r;
}
}
System.out.println("emptyOp " + sw.elapsedTime());
sw.zero();
for (int j = 0; j < count; j++) {
for (int i = 0; i < sNumeric.length; i++) {
int r = randomgen.nextInt(sNumeric.length);
doubleIntLookup.findFirstEqualKeyIndex(r);
dummy += r;
}
}
System.out.println("DoubleIntTable Lookup " + sw.elapsedTime());
sw.zero();
System.out.println("Object Cache Test " + sw.elapsedTime());
}
}
public static void main(String[] argv) {
TestLibSpeed ls = new TestLibSpeed();
}
}
15
Source : TransactionManager.java
with GNU General Public License v3.0
from s-store
with GNU General Public License v3.0
from s-store
/**
* Return a lookup of all row ids for cached tables in transactions.
* For auto-defrag, as currently there will be no RowAction entries
* at the time of defrag.
*/
public DoubleIntIndex getTransactionIDList() {
writeLock.lock();
try {
DoubleIntIndex lookup = new DoubleIntIndex(10, false);
lookup.setKeysSearchTarget();
Iterator it = this.rowActionMap.keySet().iterator();
for (; it.hasNext(); ) {
lookup.addUnique(it.nextInt(), 0);
}
return lookup;
} finally {
writeLock.unlock();
}
}
13
Source : RowStoreAVLDisk.java
with Apache License 2.0
from SERG-Delft
with Apache License 2.0
from SERG-Delft
public void moveDataToSpace(Session session) {
Table table = (Table) this.table;
long rowCount = elementCount();
if (rowCount == 0) {
return;
}
if (rowCount > Integer.MAX_VALUE) {
// error too big
return;
}
DoubleIntIndex pointerLookup = new DoubleIntIndex((int) rowCount, false);
pointerLookup.setKeysSearchTarget();
writeLock();
try {
moveDataToSpace(cache, pointerLookup);
CachedObject[] newAccessorList = new CachedObject[accessorList.length];
for (int i = 0; i < accessorList.length; i++) {
long pos = pointerLookup.lookup(accessorList[i].getPos());
newAccessorList[i] = cache.get(pos, this, false);
}
RowIterator it = rowIterator();
// todo - check this - must remove from old space, not new one
while (it.hasNext()) {
Row row = it.getNextRow();
cache.remove(row);
tableSpace.release(row.getPos(), row.getStorageSize());
}
accessorList = newAccessorList;
} finally {
writeUnlock();
}
database.logger.logDetailEvent("table written " + table.getName().name);
}
12
Source : TestHashStructures.java
with Apache License 2.0
from SERG-Delft
with Apache License 2.0
from SERG-Delft
public void testDoubleIntSpeed() throws Exception {
boolean failed = false;
int testSize = 500;
org.hsqldb.lib.IntKeyHashMap hIntMap = new org.hsqldb.lib.IntKeyHashMap();
DoubleIntIndex intLookup = new DoubleIntIndex(testSize, false);
intLookup.setKeysSearchTarget();
populateByRandomIntKeysInt(intLookup, hIntMap, testSize);
compareByHIteratorInt(intLookup, hIntMap);
int[] sample = getSampleIntArray(intLookup, 100);
int[] sampleVals = new int[sample.length];
int i = 0;
int j = 0;
StopWatch sw = new StopWatch();
try {
for (j = 0; j < 10000; j++) {
for (i = 0; i < sample.length; i++) {
int pos = intLookup.findFirstEqualKeyIndex(sample[i]);
sampleVals[i] = intLookup.getValue(pos);
intLookup.remove(pos);
}
for (i = 0; i < sample.length; i++) {
intLookup.addUnique(sample[i], sampleVals[i]);
}
}
System.out.println(sw.elapsedTimeToMessage("Double int table times"));
// sort
intLookup.findFirstEqualKeyIndex(0);
compareByHIteratorInt(intLookup, hIntMap);
} catch (Exception e) {
System.out.println(sw.elapsedTimeToMessage("Double int table error: i =" + i));
failed = true;
}
replacedertTrue(!failed);
}
12
Source : RowStoreAVLDisk.java
with Apache License 2.0
from SERG-Delft
with Apache License 2.0
from SERG-Delft
public void moveDataToSpace(DataFileCache targetCache, DoubleIntIndex pointerLookup) {
int spaceId = table.getSpaceID();
TableSpaceManager targetSpace = targetCache.spaceManager.getTableSpace(spaceId);
pointerLookup.setKeysSearchTarget();
RowIterator it = indexList[0].firstRow(this);
while (it.hasNext()) {
CachedObject row = it.getNextRow();
pointerLookup.addUnsorted(row.getPos(), row.getStorageSize());
}
pointerLookup.sort();
for (int i = 0; i < pointerLookup.size(); i++) {
long newPos = targetSpace.getFilePosition(pointerLookup.getValue(i), false);
pointerLookup.setValue(i, (int) newPos);
}
it = indexList[0].firstRow(this);
while (it.hasNext()) {
CachedObject row = it.getNextRow();
long newPos = pointerLookup.lookup(row.getPos());
// write
targetCache.rowOut.reset();
row.write(targetCache.rowOut, pointerLookup);
targetCache.saveRowOutput(newPos);
}
}
12
Source : DataSpaceManagerSimple.java
with Apache License 2.0
from SERG-Delft
with Apache License 2.0
from SERG-Delft
public void freeTableSpace(int spaceId, DoubleIntIndex spaceList, long offset, long limit, boolean full) {
totalFragmentSize += spaceList.getTotalValues() * cache.getDataFileScale();
if (full) {
if (cache.fileFreePosition == limit) {
cache.writeLock.lock();
try {
cache.fileFreePosition = offset;
} finally {
cache.writeLock.unlock();
}
} else {
totalFragmentSize += limit - offset;
}
if (spaceList.size() != 0) {
lookup = new DoubleIntIndex(spaceList.size(), true);
spaceList.copyTo(lookup);
spaceList.clear();
}
} else {
spaceList.compactLookupAsIntervals();
spaceList.setValuesSearchTarget();
spaceList.sort();
int extra = spaceList.size() - spaceList.capacity() / 2;
if (extra > 0) {
spaceList.removeRange(0, extra);
totalFragmentSize -= spaceList.getTotalValues() * cache.getDataFileScale();
}
}
}
5
Source : DataFileDefrag.java
with GNU General Public License v3.0
from s-store
with GNU General Public License v3.0
from s-store
int[] writeTableToDataFile(Table table) throws IOException {
Session session = database.getSessionManager().getSysSession();
PersistentStore store = session.sessionData.getRowStore(table);
RowOutputInterface rowOut = new RowOutputBinary();
DoubleIntIndex pointerLookup = new DoubleIntIndex(table.getPrimaryIndex().sizeEstimate(store), false);
int[] rootsArray = table.getIndexRootsArray();
long pos = fileOffset;
int count = 0;
pointerLookup.setKeysSearchTarget();
Error.printSystemOut("lookup begins: " + stopw.elapsedTime());
RowIterator it = table.rowIterator(session);
for (; it.hasNext(); count++) {
CachedObject row = it.getNextRow();
pointerLookup.addUnsorted(row.getPos(), (int) (pos / scale));
if (count % 50000 == 0) {
Error.printSystemOut("pointer pair for row " + count + " " + row.getPos() + " " + pos);
}
pos += row.getStorageSize();
}
Error.printSystemOut(table.getName().name + " list done ", stopw.elapsedTime());
count = 0;
it = table.rowIterator(session);
for (; it.hasNext(); count++) {
CachedObject row = it.getNextRow();
rowOut.reset();
row.write(rowOut, pointerLookup);
fileStreamOut.write(rowOut.getOutputStream().getBuffer(), 0, rowOut.size());
fileOffset += row.getStorageSize();
if ((count) % 50000 == 0) {
Error.printSystemOut(count + " rows " + stopw.elapsedTime());
}
}
for (int i = 0; i < rootsArray.length; i++) {
if (rootsArray[i] == -1) {
continue;
}
int lookupIndex = pointerLookup.findFirstEqualKeyIndex(rootsArray[i]);
if (lookupIndex == -1) {
throw Error.error(ErrorCode.DATA_FILE_ERROR);
}
rootsArray[i] = pointerLookup.getValue(lookupIndex);
}
setTransactionRowLookups(pointerLookup);
Error.printSystemOut(table.getName().name + " : table converted");
return rootsArray;
}