Compare commits
2 Commits
master
...
feature/te
Author | SHA1 | Date |
---|---|---|
Eyck Jentzsch | 397863875e | |
Eyck Jentzsch | 73c257af03 |
|
@ -6,8 +6,5 @@
|
|||
SCViewer initiator_target.launch
|
||||
SCViewer.xcf
|
||||
SCViewer_1.png
|
||||
*.launch
|
||||
copyrightLog.txt
|
||||
/workspace
|
||||
?*.launch
|
||||
/.settings/
|
||||
.tycho-consumer-pom.xml
|
|
@ -1,23 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<launchConfiguration type="org.eclipse.m2e.Maven2LaunchConfigurationType">
|
||||
<intAttribute key="M2_COLORS" value="0"/>
|
||||
<booleanAttribute key="M2_DEBUG_OUTPUT" value="false"/>
|
||||
<stringAttribute key="M2_GOALS" value="package"/>
|
||||
<booleanAttribute key="M2_NON_RECURSIVE" value="false"/>
|
||||
<booleanAttribute key="M2_OFFLINE" value="false"/>
|
||||
<stringAttribute key="M2_PROFILES" value=""/>
|
||||
<listAttribute key="M2_PROPERTIES"/>
|
||||
<stringAttribute key="M2_RUNTIME" value="EMBEDDED"/>
|
||||
<booleanAttribute key="M2_SKIP_TESTS" value="false"/>
|
||||
<intAttribute key="M2_THREADS" value="1"/>
|
||||
<booleanAttribute key="M2_UPDATE_SNAPSHOTS" value="false"/>
|
||||
<stringAttribute key="M2_USER_SETTINGS" value=""/>
|
||||
<booleanAttribute key="M2_WORKSPACE_RESOLUTION" value="false"/>
|
||||
<booleanAttribute key="org.eclipse.debug.core.ATTR_FORCE_SYSTEM_CONSOLE_ENCODING" value="false"/>
|
||||
<booleanAttribute key="org.eclipse.jdt.launching.ATTR_ATTR_USE_ARGFILE" value="false"/>
|
||||
<booleanAttribute key="org.eclipse.jdt.launching.ATTR_SHOW_CODEDETAILS_IN_EXCEPTION_MESSAGES" value="true"/>
|
||||
<booleanAttribute key="org.eclipse.jdt.launching.ATTR_USE_CLASSPATH_ONLY_JAR" value="false"/>
|
||||
<booleanAttribute key="org.eclipse.jdt.launching.ATTR_USE_START_ON_FIRST_THREAD" value="true"/>
|
||||
<stringAttribute key="org.eclipse.jdt.launching.JRE_CONTAINER" value="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-17/"/>
|
||||
<stringAttribute key="org.eclipse.jdt.launching.WORKING_DIRECTORY" value="${project_loc:com.minres.scviewer.parent}"/>
|
||||
</launchConfiguration>
|
|
@ -1,23 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<launchConfiguration type="org.eclipse.m2e.Maven2LaunchConfigurationType">
|
||||
<intAttribute key="M2_COLORS" value="0"/>
|
||||
<booleanAttribute key="M2_DEBUG_OUTPUT" value="false"/>
|
||||
<stringAttribute key="M2_GOALS" value="package"/>
|
||||
<booleanAttribute key="M2_NON_RECURSIVE" value="false"/>
|
||||
<booleanAttribute key="M2_OFFLINE" value="false"/>
|
||||
<stringAttribute key="M2_PROFILES" value="release-composite"/>
|
||||
<listAttribute key="M2_PROPERTIES"/>
|
||||
<stringAttribute key="M2_RUNTIME" value="EMBEDDED"/>
|
||||
<booleanAttribute key="M2_SKIP_TESTS" value="false"/>
|
||||
<intAttribute key="M2_THREADS" value="4"/>
|
||||
<booleanAttribute key="M2_UPDATE_SNAPSHOTS" value="false"/>
|
||||
<stringAttribute key="M2_USER_SETTINGS" value=""/>
|
||||
<booleanAttribute key="M2_WORKSPACE_RESOLUTION" value="true"/>
|
||||
<booleanAttribute key="org.eclipse.debug.core.ATTR_FORCE_SYSTEM_CONSOLE_ENCODING" value="false"/>
|
||||
<booleanAttribute key="org.eclipse.jdt.launching.ATTR_ATTR_USE_ARGFILE" value="false"/>
|
||||
<booleanAttribute key="org.eclipse.jdt.launching.ATTR_SHOW_CODEDETAILS_IN_EXCEPTION_MESSAGES" value="true"/>
|
||||
<booleanAttribute key="org.eclipse.jdt.launching.ATTR_USE_CLASSPATH_ONLY_JAR" value="false"/>
|
||||
<booleanAttribute key="org.eclipse.jdt.launching.ATTR_USE_START_ON_FIRST_THREAD" value="true"/>
|
||||
<stringAttribute key="org.eclipse.jdt.launching.JRE_CONTAINER" value="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-17/"/>
|
||||
<stringAttribute key="org.eclipse.jdt.launching.WORKING_DIRECTORY" value="${project_loc:com.minres.scviewer.parent}"/>
|
||||
</launchConfiguration>
|
|
@ -1,26 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<launchConfiguration type="org.eclipse.m2e.Maven2LaunchConfigurationType">
|
||||
<intAttribute key="M2_COLORS" value="0"/>
|
||||
<booleanAttribute key="M2_DEBUG_OUTPUT" value="false"/>
|
||||
<stringAttribute key="M2_GOALS" value="tycho-versions:set-version tycho-versions:update-pom"/>
|
||||
<booleanAttribute key="M2_NON_RECURSIVE" value="false"/>
|
||||
<booleanAttribute key="M2_OFFLINE" value="false"/>
|
||||
<stringAttribute key="M2_PROFILES" value=""/>
|
||||
<listAttribute key="M2_PROPERTIES">
|
||||
<listEntry value="newVersion=${string_prompt:new version number}"/>
|
||||
</listAttribute>
|
||||
<stringAttribute key="M2_RUNTIME" value="EMBEDDED"/>
|
||||
<booleanAttribute key="M2_SKIP_TESTS" value="false"/>
|
||||
<intAttribute key="M2_THREADS" value="1"/>
|
||||
<booleanAttribute key="M2_UPDATE_SNAPSHOTS" value="false"/>
|
||||
<stringAttribute key="M2_USER_SETTINGS" value=""/>
|
||||
<booleanAttribute key="M2_WORKSPACE_RESOLUTION" value="false"/>
|
||||
<stringAttribute key="bad_container_name" value="/com.minres.scviewer.parent/.launch"/>
|
||||
<booleanAttribute key="org.eclipse.debug.core.ATTR_FORCE_SYSTEM_CONSOLE_ENCODING" value="false"/>
|
||||
<booleanAttribute key="org.eclipse.jdt.launching.ATTR_ATTR_USE_ARGFILE" value="false"/>
|
||||
<booleanAttribute key="org.eclipse.jdt.launching.ATTR_SHOW_CODEDETAILS_IN_EXCEPTION_MESSAGES" value="true"/>
|
||||
<booleanAttribute key="org.eclipse.jdt.launching.ATTR_USE_CLASSPATH_ONLY_JAR" value="false"/>
|
||||
<booleanAttribute key="org.eclipse.jdt.launching.ATTR_USE_START_ON_FIRST_THREAD" value="true"/>
|
||||
<stringAttribute key="org.eclipse.jdt.launching.JRE_CONTAINER" value="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-17/"/>
|
||||
<stringAttribute key="org.eclipse.jdt.launching.WORKING_DIRECTORY" value="${project_loc:com.minres.scviewer.parent}"/>
|
||||
</launchConfiguration>
|
100
README.md
100
README.md
|
@ -1,70 +1,42 @@
|
|||
SCViewer
|
||||
========
|
||||
|
||||
SCViewer is composed of a set of eclipse plugins to display VCD (e.g. created by SystemC VCD trace) and transaction streams. Those streams can be
|
||||
created by the SystemC Verification Library (SCV, For further description of the SCV please refer to https://www.accellera.org/activities/working-groups/systemc-verification) or by the **L**ight**w**eight **T**ranasaction **R**ecording for SystemC ( [LWTR4SC](https://github.com/Minres/LWTR4SC) ).
|
||||
SCViewer is composed of a set of eclipse plugins to display VCD and transaction streams
|
||||
created by the SystemC VCD trace implementation and the SystemC Verification Library (SCV).
|
||||
For further description of the SCV please refer to
|
||||
http://www.accellera.org/activities/committees/systemc-verification.
|
||||
|
||||
The viewer has the following features
|
||||
- support of VCD files (compressed and uncompressed)
|
||||
- real numbers
|
||||
- showing vectors and real numbers as analog (step-wise & continuous)
|
||||
- various value representations of bit vectors
|
||||
- support of SCV transaction recordings in various formats
|
||||
- text log files (compressed and uncompressed)
|
||||
- sqlite based
|
||||
- visualization of transaction relations
|
||||
The viewer is in early alpha stage and not yet ready for production use!
|
||||
|
||||
> If you encounter issue when running on Linux please try running as `SWT_GTK3=0 scviewer` as there exist issues wiht GTK3.
|
||||
|
||||
To build the plugins the Eclipse SDK or PDE can be used.
|
||||
|
||||
Key Shortcuts
|
||||
=============
|
||||
|
||||
Legend:
|
||||
|
||||
* Left Mouse Button: LMB
|
||||
* Middle Mouse Button: MMB
|
||||
* Mouse Scroll wheel: MScrl
|
||||
* Context any means Name List, Value List or Waveform
|
||||
|
||||
| Input | Modifier | Context | Action |
|
||||
|------------|----------|----------|-----------------------------------|
|
||||
| LMB click | | any | select |
|
||||
| LMB click | Shift | Waveform | move selected marker to position |
|
||||
| LMB click | Control | Waveform | move cursor to position |
|
||||
| LMB drag | | Waveform | zoom to range |
|
||||
| MMB click | | Waveform | move selected marker to position |
|
||||
| MScrl | | any | scroll window up/down |
|
||||
| MScrl | Shift | any | scroll window left/right |
|
||||
| MScrl | Control | Waveform | zoom in/out |
|
||||
| Key left | | Waveform | scroll window to the left (slow) |
|
||||
| Key right | | Waveform | scroll window to the right (slow) |
|
||||
| Key left | Shift | Waveform | scroll window to the left (fast) |
|
||||
| Key right | Shift | Waveform | scroll window to the right (fast) |
|
||||
| Key up | | Waveform | move selection up |
|
||||
| Key down | | Waveform | move selection down |
|
||||
| Key up | Control | Waveform | move selected track up |
|
||||
| Key down | Control | Waveform | move selected track down |
|
||||
| Key + | Control | Waveform | zoom in |
|
||||
| Key - | Control | Waveform | zoom out |
|
||||
| Key Pos1 | | Waveform | jump to selected marker |
|
||||
| Key End | | Waveform | jump to cursor |
|
||||
| Key Del | | any | delete selected entries |
|
||||
| LMB click | | ZoomBar | increment/decrement 1 page |
|
||||
| LMB drag | | ZoomBar | drag both markers (pan) |
|
||||
| LMB drag | Control | ZoomBar | drag one marker (zoom) |
|
||||
| MMB drag | | ZoomBar | drag one marker (zoom) |
|
||||
| xMB dclick | | ZoomBar | pan to position |
|
||||
| MScrl | | ZoomBar | scroll window left/right |
|
||||
| MScrl | Shift | ZoomBar | scroll window left/right double speed |
|
||||
| MScrl | Control | ZoomBar | zoom in/out |
|
||||
| Key left | | ZoomBar | scroll window to the left (slow) |
|
||||
| Key right | | ZoomBar | scroll window to the right (slow) |
|
||||
| Key up | | ZoomBar | scroll window to the left (slow) |
|
||||
| Key down | | ZoomBar | scroll window to the right (slow) |
|
||||
| Key PgUp | | ZoomBar | scroll window to the left (fast) |
|
||||
| Key PgDown | | ZoomBar | scroll window to the right (fast) |
|
||||
| Key Pos1 | | ZoomBar | scroll to begin |
|
||||
| Key End | | ZoomBar | scroll to end |
|
||||
The plugins are structured as follows:
|
||||
- com.minres.scviewer.database
|
||||
the interface defining the API to access the database and the implementation for VCD
|
||||
- com.minres.scviewer.database.text
|
||||
an implementation of the API to read the text files generated by the SCV
|
||||
sc_tr_text database
|
||||
- com.minres.scviewer.database.sqlite
|
||||
an implementation of the API to read the files generated by implementation in the
|
||||
sc_tr_sqlite project using a SQLite based database
|
||||
- com.minres.scviewer.database.test
|
||||
a some JUnit tests of the 3 back ends
|
||||
- com.minres.scviewer.ui
|
||||
the viewer it self to diplay the transactions and associated views like the
|
||||
outline of the DB and the properties of the transaction
|
||||
- com.minres.scviewer.feature
|
||||
the feature combining the plugins above into a somhow usable form
|
||||
- scv_tr_sqlite
|
||||
a C++ project containing the SQLite based SCV database implementation and the scv4tlm
|
||||
socket implementations.
|
||||
A simple example (scv_tr_recording_example.cpp) for testig purposes of the database is
|
||||
provided.
|
||||
|
||||
To build the plugins the Eclipse SDK or PDE can be used. In both cases the Groovy
|
||||
eclipse plugin (http://groovy.codehaus.org/Eclipse+Plugin or Market) has to be
|
||||
installed.
|
||||
|
||||
TODO
|
||||
====
|
||||
- add more tests
|
||||
- move to feature based product to allow automatic updates
|
||||
- improve graphics
|
||||
- catch-up e3 plugin to functionality of e4 product
|
|
@ -1,11 +1,8 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<classpath>
|
||||
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-17">
|
||||
<attributes>
|
||||
<attribute name="module" value="true"/>
|
||||
</attributes>
|
||||
</classpathentry>
|
||||
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8"/>
|
||||
<classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>
|
||||
<classpathentry kind="src" path="src"/>
|
||||
<classpathentry exported="true" kind="lib" path="sqlite-jdbc-3.8.7.jar"/>
|
||||
<classpathentry kind="output" path="target/classes"/>
|
||||
</classpath>
|
|
@ -0,0 +1,7 @@
|
|||
eclipse.preferences.version=1
|
||||
org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
|
||||
org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.8
|
||||
org.eclipse.jdt.core.compiler.compliance=1.8
|
||||
org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
|
||||
org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
|
||||
org.eclipse.jdt.core.compiler.source=1.8
|
|
@ -2,14 +2,16 @@ Manifest-Version: 1.0
|
|||
Bundle-ManifestVersion: 2
|
||||
Bundle-Name: SQLite transaction database
|
||||
Bundle-SymbolicName: com.minres.scviewer.database.sqlite
|
||||
Bundle-Version: 1.1.0.qualifier
|
||||
Bundle-Version: 1.0.0.qualifier
|
||||
Bundle-Vendor: MINRES Technologies GmbH
|
||||
Bundle-RequiredExecutionEnvironment: JavaSE-17
|
||||
Require-Bundle: com.minres.scviewer.database;bundle-version="1.0.0"
|
||||
Bundle-RequiredExecutionEnvironment: JavaSE-1.8
|
||||
Require-Bundle: com.minres.scviewer.database;bundle-version="1.0.0",
|
||||
org.eclipse.equinox.util;bundle-version="1.0.500",
|
||||
org.eclipse.equinox.ds;bundle-version="1.4.200",
|
||||
org.eclipse.osgi.services;bundle-version="3.4.0"
|
||||
Bundle-ClassPath: .,sqlite-jdbc-3.8.7.jar
|
||||
Service-Component: OSGI-INF/component.xml
|
||||
Bundle-ActivationPolicy: lazy
|
||||
Embed-Dependency: sqlite-jdbc
|
||||
Embedded-Artifacts: sqlite-jdbc-3.8.7.jar;g="org.xerial";
|
||||
a="sqlite-jdbc";v="3.8.7"
|
||||
Automatic-Module-Name: com.minres.scviewer.database.sqlite
|
|
@ -1,5 +1,5 @@
|
|||
###############################################################################
|
||||
# Copyright (c) 2014, 2015-2021 MINRES Technologies GmbH and others.
|
||||
# Copyright (c) 2014, 2015 MINRES Technologies GmbH and others.
|
||||
# All rights reserved. This program and the accompanying materials
|
||||
# are made available under the terms of the Eclipse Public License v1.0
|
||||
# which accompanies this distribution, and is available at
|
|
@ -4,8 +4,8 @@
|
|||
<parent>
|
||||
<groupId>com.minres.scviewer</groupId>
|
||||
<artifactId>com.minres.scviewer.parent</artifactId>
|
||||
<version>2.19.4</version>
|
||||
<relativePath>../..</relativePath>
|
||||
<version>1.0.0-SNAPSHOT</version>
|
||||
<relativePath>../com.minres.scviewer.parent</relativePath>
|
||||
</parent>
|
||||
<packaging>eclipse-plugin</packaging>
|
||||
<dependencies>
|
||||
|
@ -15,5 +15,4 @@
|
|||
<version>3.8.7</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<version>1.1.0-SNAPSHOT</version>
|
||||
</project>
|
||||
</project>
|
|
@ -1,5 +1,5 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2015-2021 MINRES Technologies GmbH and others.
|
||||
* Copyright (c) 2015 MINRES Technologies GmbH and others.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
|
@ -11,9 +11,8 @@
|
|||
package com.minres.scviewer.database.sqlite;
|
||||
|
||||
import java.beans.IntrospectionException;
|
||||
import java.beans.PropertyChangeListener;
|
||||
import java.beans.PropertyChangeSupport;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.sql.SQLException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -21,8 +20,9 @@ import java.util.Collection;
|
|||
import java.util.List;
|
||||
|
||||
import com.minres.scviewer.database.IWaveform;
|
||||
import com.minres.scviewer.database.IWaveformDb;
|
||||
import com.minres.scviewer.database.IWaveformDbLoader;
|
||||
import com.minres.scviewer.database.InputFormatException;
|
||||
import com.minres.scviewer.database.IWaveformEvent;
|
||||
import com.minres.scviewer.database.RelationType;
|
||||
import com.minres.scviewer.database.sqlite.db.IDatabase;
|
||||
import com.minres.scviewer.database.sqlite.db.SQLiteDatabase;
|
||||
|
@ -36,107 +36,78 @@ public class SQLiteDbLoader implements IWaveformDbLoader {
|
|||
protected IDatabase database;
|
||||
|
||||
private List<RelationType> usedRelationsList = new ArrayList<>();
|
||||
|
||||
private IWaveformDb db;
|
||||
|
||||
private ScvSimProps scvSimProps;
|
||||
|
||||
/** The pcs. */
|
||||
protected PropertyChangeSupport pcs = new PropertyChangeSupport(this);
|
||||
public SQLiteDbLoader() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getMaxTime() {
|
||||
SQLiteDatabaseSelectHandler<ScvTxEvent> handler = new SQLiteDatabaseSelectHandler<>(ScvTxEvent.class,
|
||||
public Long getMaxTime() {
|
||||
SQLiteDatabaseSelectHandler<ScvTxEvent> handler = new SQLiteDatabaseSelectHandler<ScvTxEvent>(ScvTxEvent.class,
|
||||
database, "time = (SELECT MAX(time) FROM ScvTxEvent)");
|
||||
try {
|
||||
List<ScvTxEvent> event = handler.selectObjects();
|
||||
if(!event.isEmpty())
|
||||
if(event.size()>0)
|
||||
return event.get(0).getTime()*scvSimProps.getTime_resolution();
|
||||
} catch (SecurityException | IllegalArgumentException | InstantiationException | IllegalAccessException
|
||||
| InvocationTargetException | SQLException | IntrospectionException | NoSuchMethodException e) {
|
||||
| InvocationTargetException | SQLException | IntrospectionException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
return 0L;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<IWaveform> getAllWaves() {
|
||||
SQLiteDatabaseSelectHandler<ScvStream> handler = new SQLiteDatabaseSelectHandler<>(ScvStream.class, database);
|
||||
List<IWaveform> streams=new ArrayList<>();
|
||||
public List<IWaveform<? extends IWaveformEvent>> getAllWaves() {
|
||||
SQLiteDatabaseSelectHandler<ScvStream> handler = new SQLiteDatabaseSelectHandler<ScvStream>(ScvStream.class, database);
|
||||
List<IWaveform<? extends IWaveformEvent>> streams=new ArrayList<IWaveform<? extends IWaveformEvent>>();
|
||||
try {
|
||||
for(ScvStream scvStream:handler.selectObjects()){
|
||||
TxStream stream = new TxStream(database, scvStream);
|
||||
TxStream stream = new TxStream(database, db, scvStream);
|
||||
stream.setRelationTypeList(usedRelationsList);
|
||||
streams.add(stream);
|
||||
}
|
||||
} catch (SecurityException | IllegalArgumentException | InstantiationException | IllegalAccessException
|
||||
| InvocationTargetException | SQLException | IntrospectionException | NoSuchMethodException e) {
|
||||
| InvocationTargetException | SQLException | IntrospectionException e) {
|
||||
// e.printStackTrace();
|
||||
}
|
||||
return streams;
|
||||
}
|
||||
|
||||
// @Override
|
||||
// public boolean canLoad(File inputFile) {
|
||||
// if (!inputFile.isDirectory() && inputFile.exists()) {
|
||||
// try(InputStream stream = new FileInputStream(inputFile)){
|
||||
// byte[] buffer = new byte[x.length];
|
||||
// int readCnt = stream.read(buffer, 0, x.length);
|
||||
// if (readCnt == x.length) {
|
||||
// for (int i = 0; i < x.length; i++)
|
||||
// if (buffer[i] != x[i])
|
||||
// return false;
|
||||
// }
|
||||
// return true;
|
||||
// } catch (Exception e) {
|
||||
// return false;
|
||||
// }
|
||||
// }
|
||||
// return false;
|
||||
// }
|
||||
private byte[] x = "SQLite format 3".getBytes();
|
||||
|
||||
@Override
|
||||
public void load(File file) throws InputFormatException {
|
||||
public boolean load(IWaveformDb db, File file) throws Exception {
|
||||
this.db=db;
|
||||
FileInputStream fis = new FileInputStream(file);
|
||||
byte[] buffer = new byte[x.length];
|
||||
int read = fis.read(buffer, 0, x.length);
|
||||
fis.close();
|
||||
if (read == x.length)
|
||||
for (int i = 0; i < x.length; i++)
|
||||
if (buffer[i] != x[i]) return false;
|
||||
|
||||
database=new SQLiteDatabase(file.getAbsolutePath());
|
||||
database.setData("TIMERESOLUTION", 1L);
|
||||
SQLiteDatabaseSelectHandler<ScvSimProps> handler = new SQLiteDatabaseSelectHandler<>(ScvSimProps.class, database);
|
||||
SQLiteDatabaseSelectHandler<ScvSimProps> handler = new SQLiteDatabaseSelectHandler<ScvSimProps>(ScvSimProps.class, database);
|
||||
try {
|
||||
for(ScvSimProps simProps:handler.selectObjects()){
|
||||
scvSimProps=simProps;
|
||||
database.setData("TIMERESOLUTION", scvSimProps.getTime_resolution());
|
||||
}
|
||||
pcs.firePropertyChange(IWaveformDbLoader.LOADING_FINISHED, null, null);
|
||||
return true;
|
||||
} catch (SecurityException | IllegalArgumentException | InstantiationException | IllegalAccessException
|
||||
| InvocationTargetException | SQLException | IntrospectionException | NoSuchMethodException e) {
|
||||
throw new InputFormatException(e.toString());
|
||||
| InvocationTargetException | SQLException | IntrospectionException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public void dispose() {
|
||||
database=null;
|
||||
usedRelationsList=null;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Collection<RelationType> getAllRelationTypes(){
|
||||
return usedRelationsList;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds the property change listener.
|
||||
*
|
||||
* @param l the l
|
||||
*/
|
||||
@Override
|
||||
public void addPropertyChangeListener(PropertyChangeListener l) {
|
||||
pcs.addPropertyChangeListener(l);
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes the property change listener.
|
||||
*
|
||||
* @param l the l
|
||||
*/
|
||||
@Override
|
||||
public void removePropertyChangeListener(PropertyChangeListener l) {
|
||||
pcs.removePropertyChangeListener(l);
|
||||
}
|
||||
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2015-2021 MINRES Technologies GmbH and others.
|
||||
* Copyright (c) 2015 MINRES Technologies GmbH and others.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
|
@ -18,7 +18,12 @@ import java.util.Collection;
|
|||
import java.util.List;
|
||||
|
||||
import com.minres.scviewer.database.AssociationType;
|
||||
import com.minres.scviewer.database.IWaveform;
|
||||
import com.minres.scviewer.database.ITx;
|
||||
import com.minres.scviewer.database.ITxAttribute;
|
||||
import com.minres.scviewer.database.ITxEvent;
|
||||
import com.minres.scviewer.database.ITxGenerator;
|
||||
import com.minres.scviewer.database.ITxRelation;
|
||||
import com.minres.scviewer.database.ITxStream;
|
||||
import com.minres.scviewer.database.sqlite.db.IDatabase;
|
||||
import com.minres.scviewer.database.sqlite.db.SQLiteDatabaseSelectHandler;
|
||||
import com.minres.scviewer.database.sqlite.tables.ScvStream;
|
||||
|
@ -26,9 +31,6 @@ import com.minres.scviewer.database.sqlite.tables.ScvTx;
|
|||
import com.minres.scviewer.database.sqlite.tables.ScvTxAttribute;
|
||||
import com.minres.scviewer.database.sqlite.tables.ScvTxEvent;
|
||||
import com.minres.scviewer.database.sqlite.tables.ScvTxRelation;
|
||||
import com.minres.scviewer.database.tx.ITx;
|
||||
import com.minres.scviewer.database.tx.ITxAttribute;
|
||||
import com.minres.scviewer.database.tx.ITxRelation;
|
||||
|
||||
public class Tx implements ITx {
|
||||
|
||||
|
@ -37,10 +39,8 @@ public class Tx implements ITx {
|
|||
private TxGenerator trGenerator;
|
||||
private ScvTx scvTx;
|
||||
private List<ITxAttribute> attributes;
|
||||
private long begin=-1;
|
||||
private long end=-1;
|
||||
private List<ITxRelation> incoming;
|
||||
private List<ITxRelation> outgoing;
|
||||
private Long begin, end;
|
||||
private List<ITxRelation> incoming, outgoing;
|
||||
|
||||
public Tx(IDatabase database, TxStream trStream, TxGenerator trGenerator, ScvTx scvTx) {
|
||||
this.database=database;
|
||||
|
@ -50,51 +50,52 @@ public class Tx implements ITx {
|
|||
}
|
||||
|
||||
@Override
|
||||
public long getId() {
|
||||
public Long getId() {
|
||||
return (long) scvTx.getId();
|
||||
}
|
||||
|
||||
@Override
|
||||
public IWaveform getStream() {
|
||||
public ITxStream<ITxEvent> getStream() {
|
||||
return trStream;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IWaveform getGenerator() {
|
||||
public ITxGenerator getGenerator() {
|
||||
return trGenerator;
|
||||
}
|
||||
|
||||
int getConcurrencyIndex() {
|
||||
@Override
|
||||
public int getConcurrencyIndex() {
|
||||
return scvTx.getConcurrencyLevel();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getBeginTime() {
|
||||
if(begin<0){
|
||||
SQLiteDatabaseSelectHandler<ScvTxEvent> handler = new SQLiteDatabaseSelectHandler<>(ScvTxEvent.class,
|
||||
public Long getBeginTime() {
|
||||
if(begin==null){
|
||||
SQLiteDatabaseSelectHandler<ScvTxEvent> handler = new SQLiteDatabaseSelectHandler<ScvTxEvent>(ScvTxEvent.class,
|
||||
database, "tx="+scvTx.getId()+" AND type="+ AssociationType.BEGIN.ordinal());
|
||||
try {
|
||||
for(ScvTxEvent scvEvent:handler.selectObjects()){
|
||||
begin= scvEvent.getTime()*(Long)database.getData("TIMERESOLUTION");
|
||||
}
|
||||
} catch (SecurityException | IllegalArgumentException | InstantiationException | IllegalAccessException
|
||||
| InvocationTargetException | SQLException | IntrospectionException | NoSuchMethodException e) {
|
||||
| InvocationTargetException | SQLException | IntrospectionException e) {
|
||||
}
|
||||
}
|
||||
return begin;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getEndTime() {
|
||||
if(end<0){
|
||||
SQLiteDatabaseSelectHandler<ScvTxEvent> handler = new SQLiteDatabaseSelectHandler<>(ScvTxEvent.class,
|
||||
public Long getEndTime() {
|
||||
if(end==null){
|
||||
SQLiteDatabaseSelectHandler<ScvTxEvent> handler = new SQLiteDatabaseSelectHandler<ScvTxEvent>(ScvTxEvent.class,
|
||||
database, "tx="+scvTx.getId()+" AND type="+ AssociationType.END.ordinal());
|
||||
try {
|
||||
for(ScvTxEvent scvEvent:handler.selectObjects()){
|
||||
end = scvEvent.getTime()*(Long)database.getData("TIMERESOLUTION");
|
||||
}
|
||||
} catch (SecurityException | IllegalArgumentException | InstantiationException | IllegalAccessException
|
||||
| InvocationTargetException | SQLException | IntrospectionException | NoSuchMethodException e) {
|
||||
| InvocationTargetException | SQLException | IntrospectionException e) {
|
||||
}
|
||||
}
|
||||
return end;
|
||||
|
@ -103,16 +104,16 @@ public class Tx implements ITx {
|
|||
@Override
|
||||
public List<ITxAttribute> getAttributes() {
|
||||
if(attributes==null){
|
||||
SQLiteDatabaseSelectHandler<ScvTxAttribute> handler = new SQLiteDatabaseSelectHandler<>(
|
||||
SQLiteDatabaseSelectHandler<ScvTxAttribute> handler = new SQLiteDatabaseSelectHandler<ScvTxAttribute>(
|
||||
ScvTxAttribute.class, database, "tx="+scvTx.getId());
|
||||
try {
|
||||
attributes = new ArrayList<>();
|
||||
attributes = new ArrayList<ITxAttribute>();
|
||||
for(ScvTxAttribute scvAttribute:handler.selectObjects()){
|
||||
attributes.add(new TxAttribute(this, scvAttribute));
|
||||
|
||||
}
|
||||
} catch (SecurityException | IllegalArgumentException | InstantiationException | IllegalAccessException
|
||||
| InvocationTargetException | SQLException | IntrospectionException | NoSuchMethodException e) {
|
||||
| InvocationTargetException | SQLException | IntrospectionException e) {
|
||||
}
|
||||
}
|
||||
return attributes;
|
||||
|
@ -121,15 +122,15 @@ public class Tx implements ITx {
|
|||
@Override
|
||||
public Collection<ITxRelation> getIncomingRelations() {
|
||||
if(incoming==null){
|
||||
SQLiteDatabaseSelectHandler<ScvTxRelation> handler = new SQLiteDatabaseSelectHandler<>(
|
||||
SQLiteDatabaseSelectHandler<ScvTxRelation> handler = new SQLiteDatabaseSelectHandler<ScvTxRelation>(
|
||||
ScvTxRelation.class, database, "sink="+scvTx.getId());
|
||||
try {
|
||||
incoming = new ArrayList<>();
|
||||
incoming = new ArrayList<ITxRelation>();
|
||||
for(ScvTxRelation scvRelation:handler.selectObjects()){
|
||||
incoming.add(createRelation(scvRelation, false));
|
||||
}
|
||||
} catch (SecurityException | IllegalArgumentException | InstantiationException | IllegalAccessException
|
||||
| InvocationTargetException | SQLException | IntrospectionException | NoSuchMethodException e) {
|
||||
| InvocationTargetException | SQLException | IntrospectionException e) {
|
||||
}
|
||||
}
|
||||
return incoming;
|
||||
|
@ -138,15 +139,15 @@ public class Tx implements ITx {
|
|||
@Override
|
||||
public Collection<ITxRelation> getOutgoingRelations() {
|
||||
if(outgoing==null){
|
||||
SQLiteDatabaseSelectHandler<ScvTxRelation> handler = new SQLiteDatabaseSelectHandler<>(
|
||||
SQLiteDatabaseSelectHandler<ScvTxRelation> handler = new SQLiteDatabaseSelectHandler<ScvTxRelation>(
|
||||
ScvTxRelation.class, database, "src="+scvTx.getId());
|
||||
try {
|
||||
outgoing = new ArrayList<>();
|
||||
outgoing = new ArrayList<ITxRelation>();
|
||||
for(ScvTxRelation scvRelation:handler.selectObjects()){
|
||||
outgoing.add(createRelation(scvRelation, true));
|
||||
}
|
||||
} catch (SecurityException | IllegalArgumentException | InstantiationException | IllegalAccessException
|
||||
| InvocationTargetException | SQLException | IntrospectionException | NoSuchMethodException e) {
|
||||
| InvocationTargetException | SQLException | IntrospectionException e) {
|
||||
}
|
||||
}
|
||||
return outgoing;
|
||||
|
@ -154,7 +155,7 @@ public class Tx implements ITx {
|
|||
|
||||
private ITxRelation createRelation(ScvTxRelation rel, boolean outgoing) {
|
||||
int otherId = outgoing?rel.getSink():rel.getSrc();
|
||||
SQLiteDatabaseSelectHandler<ScvTx> handler = new SQLiteDatabaseSelectHandler<>(ScvTx.class, database,
|
||||
SQLiteDatabaseSelectHandler<ScvTx> handler = new SQLiteDatabaseSelectHandler<ScvTx>(ScvTx.class, database,
|
||||
"id="+otherId);
|
||||
try {
|
||||
List<ScvTx> res = handler.selectObjects();
|
||||
|
@ -162,14 +163,14 @@ public class Tx implements ITx {
|
|||
List<ScvStream> streams = new SQLiteDatabaseSelectHandler<ScvStream>(ScvStream.class, database,
|
||||
"id="+res.get(0).getStream()).selectObjects();
|
||||
if(streams.size()!=1) return null;
|
||||
TxStream tgtStream = (TxStream) database.getWaveformDb().getStreamByName(streams.get(0).getName());
|
||||
TxStream tgtStream = (TxStream) trStream.getDb().getStreamByName(streams.get(0).getName());
|
||||
Tx that = (Tx) tgtStream.getTransactions().get(otherId);
|
||||
if(outgoing)
|
||||
return new TxRelation(trStream.getRelationType(rel.getName()), this, that);
|
||||
else
|
||||
return new TxRelation(trStream.getRelationType(rel.getName()), that, this);
|
||||
} catch (SecurityException | IllegalArgumentException | InstantiationException | IllegalAccessException
|
||||
| InvocationTargetException | SQLException | IntrospectionException | NoSuchMethodException e) {
|
||||
| InvocationTargetException | SQLException | IntrospectionException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
||||
|
@ -178,11 +179,11 @@ public class Tx implements ITx {
|
|||
|
||||
@Override
|
||||
public int compareTo(ITx o) {
|
||||
int res = Long.compare(this.getBeginTime(), o.getBeginTime());
|
||||
int res = this.getBeginTime().compareTo(o.getBeginTime());
|
||||
if(res!=0)
|
||||
return res;
|
||||
else
|
||||
return Long.compare(this.getId(), o.getId());
|
||||
return this.getId().compareTo(o.getId());
|
||||
}
|
||||
|
||||
@Override
|
|
@ -1,5 +1,5 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2015-2021 MINRES Technologies GmbH and others.
|
||||
* Copyright (c) 2015 MINRES Technologies GmbH and others.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
|
@ -12,8 +12,8 @@ package com.minres.scviewer.database.sqlite;
|
|||
|
||||
import com.minres.scviewer.database.AssociationType;
|
||||
import com.minres.scviewer.database.DataType;
|
||||
import com.minres.scviewer.database.ITxAttribute;
|
||||
import com.minres.scviewer.database.sqlite.tables.ScvTxAttribute;
|
||||
import com.minres.scviewer.database.tx.ITxAttribute;
|
||||
|
||||
public class TxAttribute implements ITxAttribute{
|
||||
|
||||
|
@ -32,18 +32,7 @@ public class TxAttribute implements ITxAttribute{
|
|||
|
||||
@Override
|
||||
public DataType getDataType() {
|
||||
int dt = scvAttribute.getData_type();
|
||||
switch(dt) {
|
||||
case 12:
|
||||
return DataType.STRING;
|
||||
case 10:
|
||||
return DataType.POINTER;
|
||||
default:
|
||||
if(dt<9)
|
||||
return DataType.values()[dt];
|
||||
else
|
||||
return DataType.NONE;
|
||||
}
|
||||
return DataType.values()[scvAttribute.getData_type()];
|
||||
}
|
||||
|
||||
@Override
|
|
@ -1,5 +1,5 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2015-2021 MINRES Technologies GmbH and others.
|
||||
* Copyright (c) 2015 MINRES Technologies GmbH and others.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
|
@ -10,40 +10,43 @@
|
|||
*******************************************************************************/
|
||||
package com.minres.scviewer.database.sqlite;
|
||||
|
||||
import com.minres.scviewer.database.EventKind;
|
||||
import com.minres.scviewer.database.IEvent;
|
||||
import com.minres.scviewer.database.WaveformType;
|
||||
import com.minres.scviewer.database.tx.ITx;
|
||||
import com.minres.scviewer.database.tx.ITxEvent;
|
||||
import com.minres.scviewer.database.ITx;
|
||||
import com.minres.scviewer.database.ITxEvent;
|
||||
import com.minres.scviewer.database.IWaveformEvent;
|
||||
|
||||
public class TxEvent implements ITxEvent {
|
||||
|
||||
private final EventKind type;
|
||||
private final Type type;
|
||||
private ITx tx;
|
||||
|
||||
public TxEvent(EventKind type, ITx tx) {
|
||||
public TxEvent(Type type, ITx tx) {
|
||||
super();
|
||||
this.type = type;
|
||||
this.tx = tx;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getTime() {
|
||||
return type==EventKind.BEGIN?tx.getBeginTime():tx.getEndTime();
|
||||
public Long getTime() {
|
||||
return type==Type.BEGIN?tx.getBeginTime():tx.getEndTime();
|
||||
}
|
||||
|
||||
@Override
|
||||
public IEvent duplicate() throws CloneNotSupportedException {
|
||||
public IWaveformEvent duplicate() throws CloneNotSupportedException {
|
||||
return new TxEvent(type, tx);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(IWaveformEvent o) {
|
||||
return getTime().compareTo(o.getTime());
|
||||
}
|
||||
|
||||
@Override
|
||||
public ITx getTransaction() {
|
||||
return tx;
|
||||
}
|
||||
|
||||
@Override
|
||||
public EventKind getKind() {
|
||||
public Type getType() {
|
||||
return type;
|
||||
}
|
||||
|
||||
|
@ -51,15 +54,4 @@ public class TxEvent implements ITxEvent {
|
|||
public String toString() {
|
||||
return type.toString()+"@"+getTime()+" of tx #"+tx.getId();
|
||||
}
|
||||
|
||||
@Override
|
||||
public WaveformType getType() {
|
||||
return WaveformType.TRANSACTION;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getRowIndex() {
|
||||
return ((Tx)tx).getConcurrencyIndex();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2015 MINRES Technologies GmbH and others.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* MINRES Technologies GmbH - initial API and implementation
|
||||
*******************************************************************************/
|
||||
package com.minres.scviewer.database.sqlite;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import com.minres.scviewer.database.ITx;
|
||||
import com.minres.scviewer.database.ITxEvent;
|
||||
import com.minres.scviewer.database.ITxGenerator;
|
||||
import com.minres.scviewer.database.ITxStream;
|
||||
import com.minres.scviewer.database.sqlite.tables.ScvGenerator;
|
||||
|
||||
public class TxGenerator implements ITxGenerator {
|
||||
|
||||
private ITxStream<ITxEvent> stream;
|
||||
|
||||
private ScvGenerator scvGenerator;
|
||||
|
||||
public TxGenerator(ITxStream<ITxEvent> stream, ScvGenerator scvGenerator) {
|
||||
this.stream=stream;
|
||||
this.scvGenerator=scvGenerator;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getId() {
|
||||
return (long) scvGenerator.getId();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ITxStream<ITxEvent> getStream() {
|
||||
return stream;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return scvGenerator.getName();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<ITx> getTransactions() {
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2015-2021 MINRES Technologies GmbH and others.
|
||||
* Copyright (c) 2015 MINRES Technologies GmbH and others.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
|
@ -10,15 +10,14 @@
|
|||
*******************************************************************************/
|
||||
package com.minres.scviewer.database.sqlite;
|
||||
|
||||
import com.minres.scviewer.database.ITxRelation;
|
||||
import com.minres.scviewer.database.ITx;
|
||||
import com.minres.scviewer.database.RelationType;
|
||||
import com.minres.scviewer.database.tx.ITx;
|
||||
import com.minres.scviewer.database.tx.ITxRelation;
|
||||
|
||||
public class TxRelation implements ITxRelation {
|
||||
|
||||
RelationType relationType;
|
||||
Tx source;
|
||||
Tx target;
|
||||
Tx source, target;
|
||||
|
||||
public TxRelation(RelationType relationType, Tx source, Tx target) {
|
||||
this.source = source;
|
|
@ -0,0 +1,199 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2015 MINRES Technologies GmbH and others.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* MINRES Technologies GmbH - initial API and implementation
|
||||
*******************************************************************************/
|
||||
package com.minres.scviewer.database.sqlite;
|
||||
|
||||
import java.beans.IntrospectionException;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.sql.SQLException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.NavigableMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.Vector;
|
||||
|
||||
import com.minres.scviewer.database.HierNode;
|
||||
import com.minres.scviewer.database.ITx;
|
||||
import com.minres.scviewer.database.ITxEvent;
|
||||
import com.minres.scviewer.database.ITxGenerator;
|
||||
import com.minres.scviewer.database.ITxStream;
|
||||
import com.minres.scviewer.database.IWaveform;
|
||||
import com.minres.scviewer.database.IWaveformDb;
|
||||
import com.minres.scviewer.database.IWaveformEvent;
|
||||
import com.minres.scviewer.database.RelationType;
|
||||
import com.minres.scviewer.database.sqlite.db.IDatabase;
|
||||
import com.minres.scviewer.database.sqlite.db.SQLiteDatabaseSelectHandler;
|
||||
import com.minres.scviewer.database.sqlite.tables.ScvGenerator;
|
||||
import com.minres.scviewer.database.sqlite.tables.ScvStream;
|
||||
import com.minres.scviewer.database.sqlite.tables.ScvTx;
|
||||
|
||||
public class TxStream extends HierNode implements ITxStream<ITxEvent> {
|
||||
|
||||
private IDatabase database;
|
||||
|
||||
private String fullName;
|
||||
|
||||
private IWaveformDb db;
|
||||
|
||||
private ScvStream scvStream;
|
||||
|
||||
private TreeMap<Integer, TxGenerator> generators;
|
||||
|
||||
private TreeMap<Integer, ITx> transactions;
|
||||
|
||||
private Integer maxConcurrency;
|
||||
|
||||
private TreeMap<Long, List<ITxEvent>> events;
|
||||
|
||||
private List<RelationType> usedRelationsList;
|
||||
|
||||
public TxStream(IDatabase database, IWaveformDb waveformDb, ScvStream scvStream) {
|
||||
super(scvStream.getName());
|
||||
this.database=database;
|
||||
fullName=scvStream.getName();
|
||||
this.scvStream=scvStream;
|
||||
db=waveformDb;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IWaveformDb getDb() {
|
||||
return db;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getFullName() {
|
||||
return fullName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getId() {
|
||||
return (long) scvStream.getId();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getKind() {
|
||||
return scvStream.getKind();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<ITxGenerator> getGenerators() {
|
||||
if(generators==null){
|
||||
SQLiteDatabaseSelectHandler<ScvGenerator> handler = new SQLiteDatabaseSelectHandler<ScvGenerator>(
|
||||
ScvGenerator.class, database, "stream="+scvStream.getId());
|
||||
generators=new TreeMap<Integer, TxGenerator>();
|
||||
try {
|
||||
for(ScvGenerator scvGenerator:handler.selectObjects()){
|
||||
generators.put(scvGenerator.getId(), new TxGenerator(this, scvGenerator));
|
||||
}
|
||||
} catch (SecurityException | IllegalArgumentException | InstantiationException | IllegalAccessException
|
||||
| InvocationTargetException | SQLException | IntrospectionException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
return new ArrayList<ITxGenerator>(generators.values());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getMaxConcurrency() {
|
||||
if(maxConcurrency==null){
|
||||
java.sql.Connection connection=null;
|
||||
java.sql.Statement statement=null;
|
||||
java.sql.ResultSet resultSet=null;
|
||||
try {
|
||||
connection = database.createConnection();
|
||||
statement = connection.createStatement();
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("SELECT MAX(concurrencyLevel) as concurrencyLevel FROM ScvTx where stream=");
|
||||
sb.append(scvStream.getId());
|
||||
resultSet = statement.executeQuery(sb.toString());
|
||||
while (resultSet.next()) {
|
||||
Object value = resultSet.getObject("concurrencyLevel");
|
||||
if(value!=null)
|
||||
maxConcurrency=(Integer) value;
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
if(maxConcurrency==null) maxConcurrency=0;
|
||||
} finally {
|
||||
try{
|
||||
if(resultSet!=null) resultSet.close();
|
||||
if(statement!=null) statement.close();
|
||||
if(connection!=null) connection.close();
|
||||
} catch (SQLException e) { }
|
||||
}
|
||||
maxConcurrency+=1;
|
||||
}
|
||||
return maxConcurrency;
|
||||
}
|
||||
|
||||
@Override
|
||||
public NavigableMap<Long, List<ITxEvent>> getEvents(){
|
||||
if(events==null){
|
||||
events=new TreeMap<Long, List<ITxEvent>>();
|
||||
for(Entry<Integer, ITx> entry:getTransactions().entrySet()){
|
||||
putEvent(new TxEvent(TxEvent.Type.BEGIN, entry.getValue()));
|
||||
putEvent(new TxEvent(TxEvent.Type.END, entry.getValue()));
|
||||
}
|
||||
}
|
||||
return events;
|
||||
}
|
||||
|
||||
private void putEvent(TxEvent ev){
|
||||
Long time = ev.getTime();
|
||||
if(!events.containsKey(time)){
|
||||
Vector<ITxEvent> vector=new Vector<ITxEvent>();
|
||||
vector.add(ev);
|
||||
events.put(time, vector);
|
||||
} else {
|
||||
events.get(time).add(ev);
|
||||
}
|
||||
}
|
||||
|
||||
protected Map<Integer, ITx> getTransactions() {
|
||||
if(transactions==null){
|
||||
if(generators==null) getGenerators();
|
||||
transactions = new TreeMap<Integer, ITx>();
|
||||
SQLiteDatabaseSelectHandler<ScvTx> handler = new SQLiteDatabaseSelectHandler<ScvTx>(ScvTx.class, database,
|
||||
"stream="+scvStream.getId());
|
||||
try {
|
||||
for(ScvTx scvTx:handler.selectObjects()){
|
||||
transactions.put(scvTx.getId(), new Tx(database, this, generators.get(scvTx.getGenerator()), scvTx));
|
||||
}
|
||||
} catch (SecurityException | IllegalArgumentException | InstantiationException | IllegalAccessException
|
||||
| InvocationTargetException | SQLException | IntrospectionException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
return transactions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<ITxEvent> getWaveformEventsAtTime(Long time) {
|
||||
return getEvents().get(time);
|
||||
}
|
||||
|
||||
public void setRelationTypeList(List<RelationType> usedRelationsList){
|
||||
this.usedRelationsList=usedRelationsList;
|
||||
}
|
||||
|
||||
public RelationType getRelationType(String name) {
|
||||
RelationType relType=RelationType.create(name);
|
||||
if(!usedRelationsList.contains(relType)) usedRelationsList.add(relType);
|
||||
return relType;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Boolean equals(IWaveform<? extends IWaveformEvent> other) {
|
||||
return(other instanceof TxStream && this.getId()==other.getId());
|
||||
}
|
||||
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2015-2021 MINRES Technologies GmbH and others.
|
||||
* Copyright (c) 2015 MINRES Technologies GmbH and others.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
|
@ -1,5 +1,5 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2015-2021 MINRES Technologies GmbH and others.
|
||||
* Copyright (c) 2015 MINRES Technologies GmbH and others.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
|
@ -16,8 +16,6 @@ import java.sql.ResultSet;
|
|||
import java.sql.SQLException;
|
||||
import java.sql.Statement;
|
||||
|
||||
import com.minres.scviewer.database.IWaveformDb;
|
||||
|
||||
/**
|
||||
*
|
||||
* Creates a connection to a database.
|
||||
|
@ -60,6 +58,4 @@ public interface IDatabase {
|
|||
public void setData(String name, Object value);
|
||||
|
||||
public Object getData(String name);
|
||||
|
||||
public IWaveformDb getWaveformDb();
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2015-2021 MINRES Technologies GmbH and others.
|
||||
* Copyright (c) 2015 MINRES Technologies GmbH and others.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
|
@ -20,8 +20,6 @@ import java.sql.SQLException;
|
|||
import java.sql.Statement;
|
||||
import java.util.HashMap;
|
||||
|
||||
import com.minres.scviewer.database.IWaveformDb;
|
||||
|
||||
public class SQLiteDatabase implements IDatabase {
|
||||
|
||||
protected String dbFileName;
|
||||
|
@ -91,9 +89,4 @@ public class SQLiteDatabase implements IDatabase {
|
|||
return props.get(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public IWaveformDb getWaveformDb() {
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2015-2021 MINRES Technologies GmbH and others.
|
||||
* Copyright (c) 2015 MINRES Technologies GmbH and others.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
|
@ -1,5 +1,5 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2015-2021 MINRES Technologies GmbH and others.
|
||||
* Copyright (c) 2015 MINRES Technologies GmbH and others.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
|
@ -75,11 +75,11 @@ public class SQLiteDatabaseSelectHandler<T> extends AbstractDatabaseHandler<T> {
|
|||
* @throws IllegalAccessException
|
||||
* @throws IntrospectionException
|
||||
* @throws InvocationTargetException
|
||||
* @throws NoSuchMethodException
|
||||
*/
|
||||
public synchronized List<T> selectObjects() throws SQLException,
|
||||
SecurityException, IllegalArgumentException,
|
||||
InstantiationException, IllegalAccessException,
|
||||
IntrospectionException, InvocationTargetException, IllegalArgumentException, NoSuchMethodException, SecurityException {
|
||||
IntrospectionException, InvocationTargetException {
|
||||
|
||||
Connection connection = null;
|
||||
Statement statement = null;
|
||||
|
@ -114,18 +114,18 @@ public class SQLiteDatabaseSelectHandler<T> extends AbstractDatabaseHandler<T> {
|
|||
* @throws IllegalAccessException
|
||||
* @throws IntrospectionException
|
||||
* @throws InvocationTargetException
|
||||
* @throws NoSuchMethodException
|
||||
*/
|
||||
private List<T> createObjects(ResultSet resultSet)
|
||||
throws SQLException, InstantiationException,
|
||||
throws SecurityException, IllegalArgumentException,
|
||||
SQLException, InstantiationException,
|
||||
IllegalAccessException, IntrospectionException,
|
||||
InvocationTargetException, IllegalArgumentException, NoSuchMethodException, SecurityException {
|
||||
InvocationTargetException {
|
||||
|
||||
List<T> list = new ArrayList<>();
|
||||
List<T> list = new ArrayList<T>();
|
||||
|
||||
while (resultSet.next()) {
|
||||
|
||||
T instance = type.getDeclaredConstructor().newInstance();
|
||||
T instance = type.newInstance();
|
||||
|
||||
for (Field field : type.getDeclaredFields()) {
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2015-2021 MINRES Technologies GmbH and others.
|
||||
* Copyright (c) 2015 MINRES Technologies GmbH and others.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
|
@ -1,5 +1,5 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2015-2021 MINRES Technologies GmbH and others.
|
||||
* Copyright (c) 2015 MINRES Technologies GmbH and others.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
|
@ -1,5 +1,5 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2015-2021 MINRES Technologies GmbH and others.
|
||||
* Copyright (c) 2015 MINRES Technologies GmbH and others.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
|
@ -1,5 +1,5 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2015-2021 MINRES Technologies GmbH and others.
|
||||
* Copyright (c) 2015 MINRES Technologies GmbH and others.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
|
@ -1,5 +1,5 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2015-2021 MINRES Technologies GmbH and others.
|
||||
* Copyright (c) 2015 MINRES Technologies GmbH and others.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
|
@ -1,5 +1,5 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2015-2021 MINRES Technologies GmbH and others.
|
||||
* Copyright (c) 2015 MINRES Technologies GmbH and others.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
|
@ -1,5 +1,5 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2015-2021 MINRES Technologies GmbH and others.
|
||||
* Copyright (c) 2015 MINRES Technologies GmbH and others.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
|
@ -0,0 +1,7 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<classpath>
|
||||
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8"/>
|
||||
<classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>
|
||||
<classpathentry kind="src" path="src"/>
|
||||
<classpathentry kind="output" path="target/classes"/>
|
||||
</classpath>
|
|
@ -0,0 +1,4 @@
|
|||
eclipse.preferences.version=1
|
||||
encoding//src/com/minres/scviewer/database/test/DatabaseServicesPerformanceTest.java=UTF-8
|
||||
encoding//src/com/minres/scviewer/database/test/DatabaseServicesTest.java=UTF-8
|
||||
encoding/<project>=UTF-8
|
|
@ -0,0 +1,8 @@
|
|||
eclipse.preferences.version=1
|
||||
org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
|
||||
org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.8
|
||||
org.eclipse.jdt.core.compiler.compliance=1.8
|
||||
org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
|
||||
org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
|
||||
org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning
|
||||
org.eclipse.jdt.core.compiler.source=1.8
|
|
@ -0,0 +1,46 @@
|
|||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<launchConfiguration type="org.eclipse.pde.ui.JunitLaunchConfig">
|
||||
<booleanAttribute key="append.args" value="true"/>
|
||||
<stringAttribute key="application" value="org.eclipse.pde.junit.runtime.coretestapplication"/>
|
||||
<booleanAttribute key="askclear" value="false"/>
|
||||
<booleanAttribute key="automaticAdd" value="true"/>
|
||||
<booleanAttribute key="automaticValidate" value="false"/>
|
||||
<stringAttribute key="bootstrap" value=""/>
|
||||
<stringAttribute key="checked" value="[NONE]"/>
|
||||
<booleanAttribute key="clearConfig" value="true"/>
|
||||
<booleanAttribute key="clearws" value="true"/>
|
||||
<booleanAttribute key="clearwslog" value="false"/>
|
||||
<stringAttribute key="configLocation" value="${workspace_loc}/.metadata/.plugins/org.eclipse.pde.core/pde-junit"/>
|
||||
<booleanAttribute key="default" value="false"/>
|
||||
<stringAttribute key="deselected_workspace_plugins" value="com.minres.scviewer.e4.application,com.minres.scviewer.ui"/>
|
||||
<booleanAttribute key="includeOptional" value="true"/>
|
||||
<stringAttribute key="location" value="${workspace_loc}/../junit-workspace"/>
|
||||
<listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_PATHS">
|
||||
<listEntry value="/com.minres.scviewer.database.test/src/com/minres/scviewer/database/test/DatabaseServicesTest.java"/>
|
||||
</listAttribute>
|
||||
<listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_TYPES">
|
||||
<listEntry value="1"/>
|
||||
</listAttribute>
|
||||
<stringAttribute key="org.eclipse.jdt.junit.CONTAINER" value=""/>
|
||||
<booleanAttribute key="org.eclipse.jdt.junit.KEEPRUNNING_ATTR" value="false"/>
|
||||
<stringAttribute key="org.eclipse.jdt.junit.TESTNAME" value=""/>
|
||||
<stringAttribute key="org.eclipse.jdt.junit.TEST_KIND" value="org.eclipse.jdt.junit.loader.junit4"/>
|
||||
<booleanAttribute key="org.eclipse.jdt.launching.ATTR_USE_START_ON_FIRST_THREAD" value="true"/>
|
||||
<stringAttribute key="org.eclipse.jdt.launching.JRE_CONTAINER" value="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8"/>
|
||||
<stringAttribute key="org.eclipse.jdt.launching.MAIN_TYPE" value="com.minres.scviewer.database.test.DatabaseServicesTest"/>
|
||||
<stringAttribute key="org.eclipse.jdt.launching.PROGRAM_ARGUMENTS" value="-os ${target.os} -ws ${target.ws} -arch ${target.arch} -nl ${target.nl} -consoleLog"/>
|
||||
<stringAttribute key="org.eclipse.jdt.launching.PROJECT_ATTR" value="com.minres.scviewer.database.test"/>
|
||||
<stringAttribute key="org.eclipse.jdt.launching.SOURCE_PATH_PROVIDER" value="org.eclipse.pde.ui.workbenchClasspathProvider"/>
|
||||
<stringAttribute key="org.eclipse.jdt.launching.VM_ARGUMENTS" value="-Xms40m -Xmx512m"/>
|
||||
<stringAttribute key="pde.version" value="3.3"/>
|
||||
<stringAttribute key="product" value="com.minres.scviewer.e4.product"/>
|
||||
<booleanAttribute key="run_in_ui_thread" value="true"/>
|
||||
<stringAttribute key="selected_target_plugins" value="com.google.guava@default:default,javax.annotation@default:default,javax.inject@default:default,javax.servlet@default:default,javax.xml@default:default,org.apache.ant@default:default,org.apache.commons.jxpath@default:default,org.apache.felix.gogo.command@default:default,org.apache.felix.gogo.runtime@default:default,org.codehaus.groovy@default:default,org.eclipse.ant.core@default:default,org.eclipse.core.commands@default:default,org.eclipse.core.contenttype@default:default,org.eclipse.core.expressions@default:default,org.eclipse.core.filesystem@default:default,org.eclipse.core.jobs@default:default,org.eclipse.core.resources@default:default,org.eclipse.core.runtime@default:true,org.eclipse.core.variables@default:default,org.eclipse.e4.core.contexts@default:default,org.eclipse.e4.core.di.annotations@default:default,org.eclipse.e4.core.di.extensions@default:default,org.eclipse.e4.core.di@default:default,org.eclipse.e4.core.services@default:default,org.eclipse.e4.emf.xpath@default:default,org.eclipse.e4.ui.di@default:default,org.eclipse.e4.ui.model.workbench@default:default,org.eclipse.e4.ui.services@default:default,org.eclipse.emf.common@default:default,org.eclipse.emf.ecore@default:default,org.eclipse.equinox.app@default:default,org.eclipse.equinox.bidi@default:default,org.eclipse.equinox.common@2:true,org.eclipse.equinox.ds@1:true,org.eclipse.equinox.preferences@default:default,org.eclipse.equinox.registry@default:default,org.eclipse.equinox.util@default:default,org.eclipse.jface@default:default,org.eclipse.osgi.compatibility.state@default:false,org.eclipse.osgi.services@default:default,org.eclipse.osgi@-1:true,org.eclipse.swt@default:default,org.hamcrest.core@default:default,org.junit@default:default"/>
|
||||
<stringAttribute key="selected_workspace_plugins" value="com.minres.scviewer.database.binary@default:default,com.minres.scviewer.database.sqlite@default:true,com.minres.scviewer.database.test@default:default,com.minres.scviewer.database.text@default:true,com.minres.scviewer.database.ui.swt@default:default,com.minres.scviewer.database.ui@default:default,com.minres.scviewer.database.vcd@default:default,com.minres.scviewer.database@default:true,com.opcoach.e4.preferences@default:default"/>
|
||||
<booleanAttribute key="show_selected_only" value="false"/>
|
||||
<booleanAttribute key="tracing" value="false"/>
|
||||
<booleanAttribute key="useCustomFeatures" value="false"/>
|
||||
<booleanAttribute key="useDefaultConfig" value="true"/>
|
||||
<booleanAttribute key="useDefaultConfigArea" value="false"/>
|
||||
<booleanAttribute key="useProduct" value="false"/>
|
||||
</launchConfiguration>
|
|
@ -2,14 +2,13 @@ Manifest-Version: 1.0
|
|||
Bundle-ManifestVersion: 2
|
||||
Bundle-Name: SCViewer database tests
|
||||
Bundle-SymbolicName: com.minres.scviewer.database.test
|
||||
Bundle-Version: 4.0.0.qualifier
|
||||
Bundle-Vendor: MINRES Technologies GmbH
|
||||
Bundle-RequiredExecutionEnvironment: JavaSE-17
|
||||
Require-Bundle: com.minres.scviewer.database,
|
||||
Bundle-Version: 1.0.0.qualifier
|
||||
Bundle-Vendor: MINRES Technologies GnbH
|
||||
Bundle-RequiredExecutionEnvironment: JavaSE-1.8
|
||||
Require-Bundle: org.junit,
|
||||
com.minres.scviewer.database,
|
||||
com.minres.scviewer.database.sqlite;bundle-version="1.0.0",
|
||||
com.minres.scviewer.database.text;bundle-version="1.0.0",
|
||||
com.minres.scviewer.database.vcd;bundle-version="1.0.0",
|
||||
org.junit
|
||||
com.minres.scviewer.database.vcd;bundle-version="1.0.0"
|
||||
Bundle-ActivationPolicy: lazy
|
||||
Service-Component: OSGI-INF/component.xml
|
||||
Automatic-Module-Name: com.minres.scviewer.database.test
|
|
@ -1,5 +1,5 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<scr:component xmlns:scr="http://www.osgi.org/xmlns/scr/v1.1.0" name="com.minres.scviewer.database.test">
|
||||
<implementation class="com.minres.scviewer.database.test.TestWaveformDbFactory"/>
|
||||
<implementation class="com.minres.scviewer.database.test.DatabaseFactory"/>
|
||||
<reference bind="setFactory" cardinality="1..1" interface="com.minres.scviewer.database.IWaveformDbFactory" name="IWaveformDbFactory" policy="static" unbind="unsetFactory"/>
|
||||
</scr:component>
|
|
@ -0,0 +1,3 @@
|
|||
/.scviewer.*
|
||||
/.my_db.txlog*
|
||||
/.simple_system.txlog_bdb.*
|
|
@ -235,4 +235,20 @@ tx_end 45 5 3380 ns
|
|||
a 37
|
||||
tx_begin 51 5 3380 ns
|
||||
a 58
|
||||
tx_record_attribute 51 "data_
|
||||
tx_record_attribute 51 "data_size" UNSIGNED = 24
|
||||
tx_begin 52 6 3380 ns
|
||||
a 58
|
||||
tx_relation "addr_phase" 52 51
|
||||
tx_begin 53 8 3380 ns
|
||||
a 220
|
||||
tx_relation "data_phase" 53 49
|
||||
tx_end 52 6 3440 ns
|
||||
tx_end 53 8 3560 ns
|
||||
tx_end 49 5 3560 ns
|
||||
a 220
|
||||
tx_begin 54 8 3560 ns
|
||||
a 109
|
||||
tx_relation "data_phase" 54 51
|
||||
tx_end 54 8 3660 ns
|
||||
tx_end 51 5 3660 ns
|
||||
a 109
|
Binary file not shown.
|
@ -2,12 +2,11 @@
|
|||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>com.minres.scviewer.database.test</artifactId>
|
||||
<version>4.0.0-SNAPSHOT</version>
|
||||
<parent>
|
||||
<groupId>com.minres.scviewer</groupId>
|
||||
<artifactId>com.minres.scviewer.parent</artifactId>
|
||||
<version>2.19.4</version>
|
||||
<relativePath>../..</relativePath>
|
||||
<version>1.0.0-SNAPSHOT</version>
|
||||
<relativePath>../com.minres.scviewer.parent</relativePath>
|
||||
</parent>
|
||||
<packaging>eclipse-test-plugin</packaging>
|
||||
<build>
|
||||
|
@ -15,8 +14,17 @@
|
|||
<plugin>
|
||||
<groupId>org.eclipse.tycho</groupId>
|
||||
<artifactId>tycho-surefire-plugin</artifactId>
|
||||
<version>${tycho-version}</version>
|
||||
<version>0.23.1</version>
|
||||
<configuration>
|
||||
<!-- <bundleStartLevel /> -->
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<type>p2-installable-unit</type>
|
||||
<artifactId>org.eclipse.equinox.ds</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
</project>
|
|
@ -3,9 +3,8 @@ package com.minres.scviewer.database.test;
|
|||
import com.minres.scviewer.database.IWaveformDb;
|
||||
import com.minres.scviewer.database.IWaveformDbFactory;
|
||||
|
||||
public class TestWaveformDbFactory {
|
||||
public class DatabaseFactory {
|
||||
private static IWaveformDbFactory waveformDbFactory;
|
||||
|
||||
|
||||
public synchronized void setFactory(IWaveformDbFactory service) {
|
||||
waveformDbFactory = service;
|
||||
|
@ -16,10 +15,10 @@ public class TestWaveformDbFactory {
|
|||
waveformDbFactory = null;
|
||||
}
|
||||
}
|
||||
|
||||
public static IWaveformDb getDatabase() throws Exception {
|
||||
|
||||
public IWaveformDb getDatabase() {
|
||||
return waveformDbFactory.getDatabase();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,65 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2015 MINRES Technologies GmbH and others.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* MINRES Technologies GmbH - initial API and implementation
|
||||
*******************************************************************************/
|
||||
package com.minres.scviewer.database.test;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.File;
|
||||
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import com.minres.scviewer.database.IWaveformDb;
|
||||
import com.minres.scviewer.database.IWaveformDbFactory;
|
||||
|
||||
public class DatabaseServicesPerformanceTest {
|
||||
|
||||
private DatabaseFactory databaseFactory;
|
||||
|
||||
private IWaveformDb waveformDb;
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
databaseFactory = new DatabaseFactory();
|
||||
waveformDb=databaseFactory.getDatabase();
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testVCD() throws Exception {
|
||||
File f = new File("inputs/my_db.vcd").getAbsoluteFile();
|
||||
assertTrue(f.exists());
|
||||
waveformDb.load(f);
|
||||
assertNotNull(waveformDb);
|
||||
assertEquals(14, waveformDb.getAllWaves().size());
|
||||
assertEquals(2, waveformDb.getChildNodes().size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTxText() throws Exception {
|
||||
File f = new File("inputs/simple_system.txlog").getAbsoluteFile();
|
||||
assertTrue(f.exists());
|
||||
long timeBefore = System.currentTimeMillis();
|
||||
waveformDb.load(f);
|
||||
long timeAfter = System.currentTimeMillis();
|
||||
long elapsed = timeAfter - timeBefore;
|
||||
assertNotNull(waveformDb);
|
||||
System.out.println("elapsed:" + elapsed);
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2015 MINRES Technologies GmbH and others.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* MINRES Technologies GmbH - initial API and implementation
|
||||
*******************************************************************************/
|
||||
package com.minres.scviewer.database.test;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.File;
|
||||
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import com.minres.scviewer.database.IWaveformDb;
|
||||
import com.minres.scviewer.database.IWaveformDbFactory;
|
||||
|
||||
public class DatabaseServicesTest {
|
||||
|
||||
|
||||
private DatabaseFactory databaseFactory;
|
||||
|
||||
private IWaveformDb waveformDb;
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
waveformDb=new DatabaseFactory().getDatabase();
|
||||
// Wait for OSGi dependencies
|
||||
// for (int i = 0; i < 10; i++) {
|
||||
// if (waveformDb.size() == 3) // Dependencies fulfilled
|
||||
// return;
|
||||
// Thread.sleep(1000);
|
||||
// }
|
||||
// assertEquals("OSGi dependencies unfulfilled", 3, WaveformDb.getLoaders().size());
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testVCD() throws Exception {
|
||||
File f = new File("inputs/my_db.vcd").getAbsoluteFile();
|
||||
assertTrue(f.exists());
|
||||
waveformDb.load(f);
|
||||
assertNotNull(waveformDb);
|
||||
assertEquals(14, waveformDb.getAllWaves().size());
|
||||
assertEquals(2, waveformDb.getChildNodes().size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTxSQLite() throws Exception {
|
||||
File f = new File("inputs/my_db.txdb").getAbsoluteFile();
|
||||
assertTrue(f.exists());
|
||||
waveformDb.load(f);
|
||||
assertNotNull(waveformDb);
|
||||
assertEquals(3, waveformDb.getAllWaves().size());
|
||||
assertEquals(1, waveformDb.getChildNodes().size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTxText() throws Exception {
|
||||
File f = new File("inputs/my_db.txlog").getAbsoluteFile();
|
||||
assertTrue(f.exists());
|
||||
waveformDb.load(f);
|
||||
assertNotNull(waveformDb);
|
||||
assertEquals(3, waveformDb.getAllWaves().size());
|
||||
assertEquals(1, waveformDb.getChildNodes().size());
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<classpath>
|
||||
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8"/>
|
||||
<classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>
|
||||
<classpathentry kind="src" path="src"/>
|
||||
<classpathentry exported="true" kind="con" path="GROOVY_DSL_SUPPORT"/>
|
||||
<classpathentry kind="output" path="target/classes"/>
|
||||
</classpath>
|
|
@ -33,6 +33,7 @@
|
|||
</buildSpec>
|
||||
<natures>
|
||||
<nature>org.eclipse.m2e.core.maven2Nature</nature>
|
||||
<nature>org.eclipse.jdt.groovy.core.groovyNature</nature>
|
||||
<nature>org.eclipse.pde.PluginNature</nature>
|
||||
<nature>org.eclipse.jdt.core.javanature</nature>
|
||||
</natures>
|
|
@ -0,0 +1,8 @@
|
|||
eclipse.preferences.version=1
|
||||
org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
|
||||
org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.8
|
||||
org.eclipse.jdt.core.compiler.compliance=1.8
|
||||
org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
|
||||
org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
|
||||
org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning
|
||||
org.eclipse.jdt.core.compiler.source=1.8
|
|
@ -1,3 +1,3 @@
|
|||
eclipse.preferences.version=1
|
||||
groovy.compiler.level=-1
|
||||
groovy.compiler.level=24
|
||||
groovy.script.filters=**/*.dsld,y,**/*.gradle,n
|
|
@ -0,0 +1,17 @@
|
|||
Manifest-Version: 1.0
|
||||
Bundle-ManifestVersion: 2
|
||||
Bundle-Name: Textual transaction database
|
||||
Bundle-SymbolicName: com.minres.scviewer.database.text
|
||||
Bundle-Version: 1.0.0.qualifier
|
||||
Bundle-Vendor: MINRES Technologies GmbH
|
||||
Bundle-RequiredExecutionEnvironment: JavaSE-1.8
|
||||
Import-Package: com.minres.scviewer.database,
|
||||
org.osgi.framework;version="1.3.0"
|
||||
Require-Bundle: com.minres.scviewer.database;bundle-version="1.0.0",
|
||||
org.codehaus.groovy;bundle-version="1.8.6",
|
||||
org.eclipse.equinox.util;bundle-version="1.0.500",
|
||||
org.eclipse.equinox.ds;bundle-version="1.4.200",
|
||||
org.eclipse.osgi.services;bundle-version="3.4.0",
|
||||
com.google.guava;bundle-version="15.0.0"
|
||||
Service-Component: OSGI-INF/component.xml
|
||||
Bundle-ActivationPolicy: lazy
|
|
@ -1,7 +1,7 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<scr:component xmlns:scr="http://www.osgi.org/xmlns/scr/v1.1.0" name="TextDbLoaderFactory">
|
||||
<implementation class="com.minres.scviewer.database.text.TextDbLoaderFactory"/>
|
||||
<scr:component xmlns:scr="http://www.osgi.org/xmlns/scr/v1.1.0" name="TextDbLoader">
|
||||
<implementation class="com.minres.scviewer.database.text.TextDbLoader"/>
|
||||
<service>
|
||||
<provide interface="com.minres.scviewer.database.IWaveformDbLoaderFactory"/>
|
||||
<provide interface="com.minres.scviewer.database.IWaveformDbLoader"/>
|
||||
</service>
|
||||
</scr:component>
|
|
@ -1,5 +1,5 @@
|
|||
###############################################################################
|
||||
# Copyright (c) 2014, 2015-2021 MINRES Technologies GmbH and others.
|
||||
# Copyright (c) 2014, 2015 MINRES Technologies GmbH and others.
|
||||
# All rights reserved. This program and the accompanying materials
|
||||
# are made available under the terms of the Eclipse Public License v1.0
|
||||
# which accompanies this distribution, and is available at
|
||||
|
@ -11,4 +11,5 @@
|
|||
bin.includes = META-INF/,\
|
||||
.,\
|
||||
OSGI-INF/
|
||||
bin.excludes = **/*.groovy
|
||||
source.. = src/
|
|
@ -0,0 +1,42 @@
|
|||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>com.minres.scviewer.database.text</artifactId>
|
||||
<parent>
|
||||
<groupId>com.minres.scviewer</groupId>
|
||||
<artifactId>com.minres.scviewer.parent</artifactId>
|
||||
<version>1.0.0-SNAPSHOT</version>
|
||||
<relativePath>../com.minres.scviewer.parent</relativePath>
|
||||
</parent>
|
||||
<packaging>eclipse-plugin</packaging>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.eclipse.tycho</groupId>
|
||||
<artifactId>tycho-compiler-plugin</artifactId>
|
||||
<version>0.23.1</version>
|
||||
<configuration>
|
||||
<compilerId>groovy-eclipse-compiler</compilerId>
|
||||
<!-- set verbose to be true if you want lots of uninteresting messages -->
|
||||
<!-- <verbose>true</verbose> -->
|
||||
<source>1.7</source>
|
||||
<target>1.7</target>
|
||||
</configuration>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.codehaus.groovy</groupId>
|
||||
<artifactId>groovy-eclipse-compiler</artifactId>
|
||||
<version>${groovy-eclipse-compiler-version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.codehaus.groovy</groupId>
|
||||
<artifactId>groovy-eclipse-batch</artifactId>
|
||||
<version>${groovy-eclipse-batch-version}</version>
|
||||
<!-- or choose a different compiler version -->
|
||||
<!-- <version>2.1.8-01</version> -->
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
|
@ -0,0 +1,224 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2012 IT Just working.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* IT Just working - initial API and implementation
|
||||
*******************************************************************************/
|
||||
package com.minres.scviewer.database.text;
|
||||
|
||||
import java.nio.charset.CharsetDecoder;
|
||||
import java.util.Collection;
|
||||
import java.util.zip.GZIPInputStream
|
||||
import org.apache.jdbm.DB
|
||||
import org.apache.jdbm.DBMaker
|
||||
import groovy.io.FileType
|
||||
|
||||
import com.minres.scviewer.database.AssociationType
|
||||
import com.minres.scviewer.database.DataType
|
||||
import com.minres.scviewer.database.ITxGenerator
|
||||
import com.minres.scviewer.database.ITxStream
|
||||
import com.minres.scviewer.database.IWaveform
|
||||
import com.minres.scviewer.database.IWaveformDb
|
||||
import com.minres.scviewer.database.IWaveformDbLoader
|
||||
import com.minres.scviewer.database.RelationType
|
||||
|
||||
public class TextDbLoader implements IWaveformDbLoader, Serializable{
|
||||
|
||||
private Long maxTime;
|
||||
|
||||
transient IWaveformDb db;
|
||||
|
||||
transient DB backingDb;
|
||||
|
||||
transient def streamsById = [:]
|
||||
|
||||
transient def generatorsById = [:]
|
||||
|
||||
transient def transactionsById = [:]
|
||||
|
||||
transient def relationTypes=[:]
|
||||
|
||||
public TextDbLoader() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getMaxTime() {
|
||||
return maxTime;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<IWaveform> getAllWaves() {
|
||||
return new LinkedList<IWaveform>(streamsById.values());
|
||||
}
|
||||
|
||||
// public Map<Long, ITxGenerator> getGeneratorsById() {
|
||||
// TreeMap<Long, ITxGenerator> res = new TreeMap<Long, ITxGenerator>();
|
||||
// streamsById.values().each{TxStream stream ->
|
||||
// stream.generators.each{res.put(it.id, id)} }
|
||||
// return res;
|
||||
// }
|
||||
|
||||
static final byte[] x = "scv_tr_stream".bytes
|
||||
|
||||
@Override
|
||||
boolean load(IWaveformDb db, File file) throws Exception {
|
||||
this.db=db
|
||||
def gzipped = isGzipped(file)
|
||||
if(isTxfile(gzipped?new GZIPInputStream(new FileInputStream(file)):new FileInputStream(file))){
|
||||
def parentDir=file.absoluteFile.parent
|
||||
def filename=file.name
|
||||
new File(parentDir).eachFileRecurse (FileType.FILES) { f -> if(f.name=~/^\.${filename}/) f.delete() }
|
||||
this.backingDb = DBMaker.openFile(parentDir+File.separator+"."+filename+"_bdb")
|
||||
.deleteFilesAfterClose()
|
||||
.useRandomAccessFile()
|
||||
//.enableHardCache()
|
||||
.enableMRUCache()
|
||||
.setMRUCacheSize(1024*4096)
|
||||
.disableTransactions()
|
||||
.disableLocking()
|
||||
.make();
|
||||
streamsById = backingDb.createHashMap("streamsById")
|
||||
generatorsById = backingDb.createHashMap("generatorsById")
|
||||
transactionsById = backingDb.createHashMap("transactionsById")
|
||||
relationTypes=backingDb.createHashMap("relationTypes")
|
||||
parseInput(gzipped?new GZIPInputStream(new FileInputStream(file)):new FileInputStream(file))
|
||||
calculateConcurrencyIndicees()
|
||||
return true
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private static boolean isTxfile(InputStream istream) {
|
||||
byte[] buffer = new byte[x.size()]
|
||||
def readCnt = istream.read(buffer, 0, x.size())
|
||||
istream.close()
|
||||
if(readCnt==x.size()){
|
||||
for(int i=0; i<x.size(); i++)
|
||||
if(buffer[i]!=x[i]) return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
private static boolean isGzipped(File f) {
|
||||
InputStream is = null;
|
||||
try {
|
||||
is = new FileInputStream(f);
|
||||
byte [] signature = new byte[2];
|
||||
int nread = is.read( signature ); //read the gzip signature
|
||||
return nread == 2 && signature[ 0 ] == (byte) 0x1f && signature[ 1 ] == (byte) 0x8b;
|
||||
} catch (IOException e) {
|
||||
return false;
|
||||
} finally {
|
||||
is.close()
|
||||
}
|
||||
}
|
||||
|
||||
private stringToScale(String scale){
|
||||
switch(scale.trim()){
|
||||
case "fs":return 1L
|
||||
case "ps":return 1000L
|
||||
case "ns":return 1000000L
|
||||
case "us":return 1000000000L
|
||||
case "ms":return 1000000000000L
|
||||
case "s": return 1000000000000000L
|
||||
}
|
||||
}
|
||||
private def parseInput(InputStream inputStream){
|
||||
//def transactionsById = backingDb.createHashMap("transactionsById")
|
||||
TxGenerator generator
|
||||
Tx transaction
|
||||
boolean endTransaction=false
|
||||
def matcher
|
||||
BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream, "UTF-8"));
|
||||
long lineCnt=0;
|
||||
reader.eachLine { line ->
|
||||
def tokens = line.split(/\s+/)
|
||||
switch(tokens[0]){
|
||||
case "scv_tr_stream":
|
||||
case "scv_tr_generator":
|
||||
case "begin_attribute":
|
||||
case "end_attribute":
|
||||
if ((matcher = line =~ /^scv_tr_stream\s+\(ID (\d+),\s+name\s+"([^"]+)",\s+kind\s+"([^"]+)"\)$/)) {
|
||||
def id = Long.parseLong(matcher[0][1])
|
||||
def stream = new TxStream(this, id, matcher[0][2], matcher[0][3])
|
||||
streamsById[id]=stream
|
||||
} else if ((matcher = line =~ /^scv_tr_generator\s+\(ID\s+(\d+),\s+name\s+"([^"]+)",\s+scv_tr_stream\s+(\d+),$/)) {
|
||||
def id = Long.parseLong(matcher[0][1])
|
||||
ITxStream stream=streamsById[Long.parseLong(matcher[0][3])]
|
||||
generator=new TxGenerator(this, id, stream.id, matcher[0][2])
|
||||
stream.generators<<id
|
||||
generatorsById[id]=generator
|
||||
} else if ((matcher = line =~ /^begin_attribute \(ID (\d+), name "([^"]+)", type "([^"]+)"\)$/)) {
|
||||
generator.begin_attrs << TxAttributeType.getAttrType(matcher[0][2], DataType.valueOf(matcher[0][3]), AssociationType.BEGIN)
|
||||
} else if ((matcher = line =~ /^end_attribute \(ID (\d+), name "([^"]+)", type "([^"]+)"\)$/)) {
|
||||
generator.end_attrs << TxAttributeType.getAttrType(matcher[0][2], DataType.valueOf(matcher[0][3]), AssociationType.END)
|
||||
}
|
||||
break;
|
||||
case ")":
|
||||
generator=null
|
||||
break
|
||||
case "tx_begin"://matcher = line =~ /^tx_begin\s+(\d+)\s+(\d+)\s+(\d+)\s+([munpf]?s)/
|
||||
def id = Long.parseLong(tokens[1])
|
||||
TxGenerator gen=generatorsById[Long.parseLong(tokens[2])]
|
||||
transaction = new Tx(this, id, gen.id, Long.parseLong(tokens[3])*stringToScale(tokens[4]))
|
||||
gen.transactions << transaction
|
||||
transactionsById[id]= transaction
|
||||
gen.begin_attrs_idx=0;
|
||||
maxTime = maxTime>transaction.beginTime?maxTime:transaction.beginTime
|
||||
endTransaction=false
|
||||
break
|
||||
case "tx_end"://matcher = line =~ /^tx_end\s+(\d+)\s+(\d+)\s+(\d+)\s+([munpf]?s)/
|
||||
def id = Long.parseLong(tokens[1])
|
||||
transaction = transactionsById[id]
|
||||
assert Long.parseLong(tokens[2])==transaction.generator.id
|
||||
transaction.endTime = Long.parseLong(tokens[3])*stringToScale(tokens[4])
|
||||
transaction.generator.end_attrs_idx=0;
|
||||
maxTime = maxTime>transaction.endTime?maxTime:transaction.endTime
|
||||
endTransaction=true
|
||||
break
|
||||
case "tx_record_attribute"://matcher = line =~ /^tx_record_attribute\s+(\d+)\s+"([^"]+)"\s+(\S+)\s*=\s*(.+)$/
|
||||
def id = Long.parseLong(tokens[1])
|
||||
transactionsById[id].attributes<<new TxAttribute(tokens[2][1..-2], DataType.valueOf(tokens[3]), AssociationType.RECORD, tokens[5..-1].join(' '))
|
||||
break
|
||||
case "a"://matcher = line =~ /^a\s+(.+)$/
|
||||
if(endTransaction){
|
||||
transaction.attributes << new TxAttribute(transaction.generator.end_attrs[0], tokens[1])
|
||||
} else {
|
||||
transaction.attributes << new TxAttribute(transaction.generator.begin_attrs[0], tokens[1])
|
||||
}
|
||||
break
|
||||
case "tx_relation"://matcher = line =~ /^tx_relation\s+\"(\S+)\"\s+(\d+)\s+(\d+)$/
|
||||
Tx tr2= transactionsById[Long.parseLong(tokens[2])]
|
||||
Tx tr1= transactionsById[Long.parseLong(tokens[3])]
|
||||
def relType=tokens[1][1..-2]
|
||||
if(!relationTypes.containsKey(relType)) relationTypes[relType]=RelationType.create(relType)
|
||||
def rel = new TxRelation(relationTypes[relType], tr1, tr2)
|
||||
tr1.outgoingRelations<<rel
|
||||
tr2.incomingRelations<<rel
|
||||
break
|
||||
default:
|
||||
println "Don't know what to do with: '$line'"
|
||||
|
||||
}
|
||||
lineCnt++
|
||||
}
|
||||
backingDb.commit();
|
||||
}
|
||||
|
||||
private def calculateConcurrencyIndicees(){
|
||||
streamsById.values().each{ TxStream stream ->
|
||||
stream.getMaxConcurrency()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public Collection<RelationType> getAllRelationTypes(){
|
||||
return relationTypes.values();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,77 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2012 IT Just working.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* IT Just working - initial API and implementation
|
||||
*******************************************************************************/
|
||||
package com.minres.scviewer.database.text
|
||||
|
||||
import com.minres.scviewer.database.*
|
||||
|
||||
class Tx implements ITx, Serializable {
|
||||
|
||||
TextDbLoader loader
|
||||
|
||||
Long id
|
||||
|
||||
Long generator_id
|
||||
|
||||
int concurrencyIndex
|
||||
|
||||
Long beginTime
|
||||
|
||||
Long endTime
|
||||
|
||||
ArrayList<ITxAttribute> attributes = new ArrayList<ITxAttribute>()
|
||||
|
||||
def incomingRelations =[]
|
||||
|
||||
def outgoingRelations =[]
|
||||
|
||||
Tx(TextDbLoader loader, Long id, Long generator_id, Long begin){
|
||||
this.loader=loader
|
||||
this.id=id
|
||||
this.generator_id=generator_id
|
||||
this.beginTime=begin
|
||||
this.endTime=begin
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<ITxRelation> getIncomingRelations() {
|
||||
return incomingRelations;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<ITxRelation> getOutgoingRelations() {
|
||||
return outgoingRelations;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(ITx o) {
|
||||
def res =beginTime.compareTo(o.beginTime)
|
||||
if(res!=0)
|
||||
return res
|
||||
else
|
||||
return id.compareTo(o.id)
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "tx#"+getId()+"["+getBeginTime()/1000000+"ns - "+getEndTime()/1000000+"ns]";
|
||||
}
|
||||
|
||||
@Override
|
||||
public ITxStream<ITxEvent> getStream() {
|
||||
return generator.stream;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ITxGenerator getGenerator() {
|
||||
return loader.generatorsById[generator_id];
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2012 IT Just working.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* IT Just working - initial API and implementation
|
||||
*******************************************************************************/
|
||||
package com.minres.scviewer.database.text
|
||||
|
||||
import com.minres.scviewer.database.AssociationType;
|
||||
import com.minres.scviewer.database.DataType;
|
||||
import com.minres.scviewer.database.ITxAttributeType;
|
||||
import com.minres.scviewer.database.ITxAttribute
|
||||
|
||||
class TxAttribute implements ITxAttribute, Serializable {
|
||||
|
||||
TxAttributeType attributeType
|
||||
|
||||
def value
|
||||
|
||||
TxAttribute(String name, DataType dataType, AssociationType type, value){
|
||||
attributeType = TxAttributeTypeFactory.instance.getAttrType(name, dataType, type)
|
||||
switch(dataType){
|
||||
case DataType.STRING:
|
||||
case DataType.ENUMERATION:
|
||||
this.value=value[1..-2]
|
||||
break;
|
||||
default:
|
||||
this.value=value
|
||||
}
|
||||
}
|
||||
|
||||
TxAttribute(TxAttributeType other){
|
||||
attributeType=other
|
||||
}
|
||||
|
||||
TxAttribute(TxAttributeType other, value){
|
||||
this(other.name, other.dataType, other.type, value)
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return attributeType.getName();
|
||||
}
|
||||
|
||||
@Override
|
||||
public AssociationType getType() {
|
||||
attributeType.type;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataType getDataType() {
|
||||
attributeType.dataType;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2012 IT Just working.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* IT Just working - initial API and implementation
|
||||
*******************************************************************************/
|
||||
package com.minres.scviewer.database.text
|
||||
|
||||
import com.minres.scviewer.database.AssociationType;
|
||||
import com.minres.scviewer.database.DataType;
|
||||
import com.minres.scviewer.database.ITxAttributeType;
|
||||
|
||||
class TxAttributeType implements ITxAttributeType, Serializable {
|
||||
String name
|
||||
DataType dataType
|
||||
AssociationType type
|
||||
|
||||
static TxAttributeType getAttrType(String name, DataType dataType, AssociationType type){
|
||||
TxAttributeTypeFactory.instance.getAttrType(name, dataType, type)
|
||||
}
|
||||
|
||||
TxAttributeType(String name, DataType dataType, AssociationType type){
|
||||
this.name=name
|
||||
this.dataType=dataType
|
||||
this.type=type
|
||||
}
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2012 IT Just working.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* IT Just working - initial API and implementation
|
||||
*******************************************************************************/
|
||||
package com.minres.scviewer.database.text
|
||||
|
||||
import com.minres.scviewer.database.AssociationType;
|
||||
import com.minres.scviewer.database.DataType
|
||||
import com.minres.scviewer.database.ITxAttributeType
|
||||
import com.minres.scviewer.database.ITxAttribute
|
||||
|
||||
class TxAttributeTypeFactory {
|
||||
private static final instance = new TxAttributeTypeFactory()
|
||||
|
||||
def attributes = [:]
|
||||
|
||||
private TxAttributeTypeFactory() {
|
||||
TxAttributeTypeFactory.metaClass.constructor = {-> instance }
|
||||
}
|
||||
|
||||
ITxAttributeType getAttrType(String name, DataType dataType, AssociationType type){
|
||||
def key = name+":"+dataType.toString()
|
||||
ITxAttributeType res
|
||||
if(attributes.containsKey(key)){
|
||||
res=attributes[key]
|
||||
} else {
|
||||
res=new TxAttributeType(name, dataType, type)
|
||||
attributes[key]=res
|
||||
}
|
||||
return res
|
||||
}
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
package com.minres.scviewer.database.text;
|
||||
|
||||
import com.minres.scviewer.database.ITx
|
||||
import com.minres.scviewer.database.ITxEvent
|
||||
import com.minres.scviewer.database.IWaveformEvent
|
||||
|
||||
class TxEvent implements ITxEvent, Serializable {
|
||||
|
||||
final ITxEvent.Type type;
|
||||
|
||||
final Tx transaction;
|
||||
|
||||
final Long time
|
||||
|
||||
TxEvent(ITxEvent.Type type, ITx transaction) {
|
||||
super();
|
||||
this.type = type;
|
||||
this.transaction = transaction;
|
||||
this.time = type==ITxEvent.Type.BEGIN?transaction.beginTime:transaction.endTime
|
||||
}
|
||||
|
||||
@Override
|
||||
IWaveformEvent duplicate() throws CloneNotSupportedException {
|
||||
new TxEvent(type, transaction, time)
|
||||
}
|
||||
|
||||
@Override
|
||||
int compareTo(IWaveformEvent o) {
|
||||
time.compareTo(o.time)
|
||||
}
|
||||
|
||||
@Override
|
||||
String toString() {
|
||||
type.toString()+"@"+time+" of tx #"+transaction.id;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2012 IT Just working.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* IT Just working - initial API and implementation
|
||||
*******************************************************************************/
|
||||
package com.minres.scviewer.database.text
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import org.apache.jdbm.DB
|
||||
import com.minres.scviewer.database.ITxAttributeType
|
||||
import com.minres.scviewer.database.ITxAttribute;
|
||||
import com.minres.scviewer.database.ITxEvent;
|
||||
import com.minres.scviewer.database.ITxGenerator;
|
||||
import com.minres.scviewer.database.ITxStream;
|
||||
import com.minres.scviewer.database.ITx;
|
||||
import com.minres.scviewer.database.IWaveformEvent;
|
||||
|
||||
class TxGenerator implements ITxGenerator, Serializable{
|
||||
Long id
|
||||
Long stream_id
|
||||
String name
|
||||
TextDbLoader loader;
|
||||
Boolean active = false
|
||||
ArrayList<ITx> transactions=[]
|
||||
|
||||
ArrayList<ITxAttributeType> begin_attrs = []
|
||||
int begin_attrs_idx = 0
|
||||
ArrayList<ITxAttributeType> end_attrs= []
|
||||
int end_attrs_idx = 0
|
||||
|
||||
TxGenerator(TextDbLoader loader, Long id, Long stream_id, name){
|
||||
this.id=id
|
||||
this.stream_id=stream_id
|
||||
this.name=name
|
||||
this.loader=loader
|
||||
}
|
||||
|
||||
ITxStream<? extends ITxEvent> getStream(){
|
||||
return loader.streamsById[stream_id];
|
||||
}
|
||||
|
||||
List<ITx> getTransactions(){
|
||||
return transactions
|
||||
}
|
||||
|
||||
Boolean isActive() {return active};
|
||||
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
package com.minres.scviewer.database.text
|
||||
|
||||
import com.minres.scviewer.database.ITxRelation
|
||||
import com.minres.scviewer.database.ITx;
|
||||
import com.minres.scviewer.database.RelationType;
|
||||
|
||||
class TxRelation implements ITxRelation, Serializable {
|
||||
Tx source
|
||||
|
||||
Tx target
|
||||
|
||||
RelationType relationType
|
||||
|
||||
|
||||
public TxRelation(RelationType relationType, Tx source, Tx target) {
|
||||
this.source = source;
|
||||
this.target = target;
|
||||
this.relationType = relationType;
|
||||
}
|
||||
|
||||
@Override
|
||||
public RelationType getRelationType() {
|
||||
return relationType;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ITx getSource() {
|
||||
return source;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ITx getTarget() {
|
||||
return target;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,117 @@
|
|||
/*******************************************************************************
|
||||
* Copyright (c) 2012 IT Just working.
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* IT Just working - initial API and implementation
|
||||
*******************************************************************************/
|
||||
package com.minres.scviewer.database.text
|
||||
|
||||
import java.beans.PropertyChangeListener;
|
||||
import java.util.Collection;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.NavigableMap;
|
||||
import org.apache.jdbm.DB
|
||||
import com.minres.scviewer.database.ITxEvent;
|
||||
import com.minres.scviewer.database.IWaveform;
|
||||
import com.minres.scviewer.database.IWaveformDb
|
||||
import com.minres.scviewer.database.IWaveformEvent
|
||||
import com.minres.scviewer.database.ITxGenerator
|
||||
import com.minres.scviewer.database.HierNode;
|
||||
import com.minres.scviewer.database.IHierNode
|
||||
import com.minres.scviewer.database.ITxStream
|
||||
import com.minres.scviewer.database.ITx
|
||||
|
||||
class TxStream extends HierNode implements ITxStream, Serializable {
|
||||
|
||||
Long id
|
||||
|
||||
transient TextDbLoader loader
|
||||
|
||||
String fullName
|
||||
|
||||
String kind
|
||||
|
||||
def generators = []
|
||||
|
||||
int maxConcurrency
|
||||
|
||||
private TreeMap<Long, List<ITxEvent>> events
|
||||
|
||||
TxStream(TextDbLoader loader, Long id, String name, String kind){
|
||||
super(name)
|
||||
this.id=id
|
||||
this.loader=loader
|
||||
this.fullName=name
|
||||
this.kind=kind
|
||||
this.maxConcurrency=0
|
||||
events = new TreeMap<Long, List<ITxEvent>>()
|
||||
//events=backingStore.createTreeMap("stream-"+name)
|
||||
}
|
||||
|
||||
List<ITxGenerator> getGenerators(){
|
||||
return generators.collect { loader.generatorsById[it] } as List<ITxGenerator>
|
||||
}
|
||||
|
||||
@Override
|
||||
public IWaveformDb getDb() {
|
||||
return loader.db
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getMaxConcurrency() {
|
||||
if(!maxConcurrency){
|
||||
generators.each {TxGenerator generator ->
|
||||
generator.transactions.each{ Tx tx ->
|
||||
putEvent(new TxEvent(ITxEvent.Type.BEGIN, tx))
|
||||
putEvent(new TxEvent(ITxEvent.Type.END, tx))
|
||||
}
|
||||
}
|
||||
def rowendtime = [0]
|
||||
events.keySet().each{long time ->
|
||||
def value=events.get(time)
|
||||
def starts=value.findAll{ITxEvent event ->event.type==ITxEvent.Type.BEGIN}
|
||||
starts.each {ITxEvent event ->
|
||||
Tx tx = event.transaction
|
||||
def rowIdx = 0
|
||||
for(rowIdx=0; rowIdx<rowendtime.size() && rowendtime[rowIdx]>tx.beginTime; rowIdx++);
|
||||
if(rowendtime.size<=rowIdx)
|
||||
rowendtime<<tx.endTime
|
||||
else
|
||||
rowendtime[rowIdx]=tx.endTime
|
||||
tx.concurrencyIndex=rowIdx
|
||||
}
|
||||
}
|
||||
maxConcurrency=rowendtime.size()
|
||||
}
|
||||
return maxConcurrency
|
||||
}
|
||||
|
||||
private putEvent(ITxEvent event){
|
||||
if(!events.containsKey(event.time))
|
||||
events.put(event.time, [event])
|
||||
else
|
||||
events[event.time]<<event
|
||||
}
|
||||
|
||||
@Override
|
||||
public NavigableMap getEvents() {
|
||||
return events;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection getWaveformEventsAtTime(Long time) {
|
||||
return events.get(time);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Boolean equals(IWaveform<? extends IWaveformEvent> other) {
|
||||
return(other instanceof TxStream && this.getId()==other.getId());
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,706 @@
|
|||
/*******************************************************************************
|
||||
* Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
******************************************************************************/
|
||||
|
||||
package org.apache.jdbm;
|
||||
|
||||
import java.io.*;
|
||||
import java.util.Arrays;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.locks.ReadWriteLock;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
/**
|
||||
* B+Tree persistent indexing data structure. B+Trees are optimized for
|
||||
* block-based, random I/O storage because they store multiple keys on
|
||||
* one tree node (called <code>BTreeNode</code>). In addition, the leaf nodes
|
||||
* directly contain (inline) small values associated with the keys, allowing a
|
||||
* single (or sequential) disk read of all the values on the node.
|
||||
* <p/>
|
||||
* B+Trees are n-airy, yeilding log(N) search cost. They are self-balancing,
|
||||
* preventing search performance degradation when the size of the tree grows.
|
||||
* <p/>
|
||||
* BTree stores its keys sorted. By default JDBM expects key to implement
|
||||
* <code>Comparable</code> interface but user may supply its own <code>Comparator</code>
|
||||
* at BTree creation time. Comparator is serialized and stored as part of BTree.
|
||||
* <p/>
|
||||
* The B+Tree allows traversing the keys in forward and reverse order using a
|
||||
* TupleBrowser obtained from the browse() methods. But it is better to use
|
||||
* <code>BTreeMap</code> wrapper which implements <code>SortedMap</code> interface
|
||||
* <p/>
|
||||
* This implementation does not directly support duplicate keys. It is
|
||||
* possible to handle duplicates by grouping values using an ArrayList as value.
|
||||
* This scenario is supported by JDBM serialization so there is no big performance penalty.
|
||||
* <p/>
|
||||
* There is no limit on key size or value size, but it is recommended to keep
|
||||
* keys as small as possible to reduce disk I/O. If serialized value exceeds 32 bytes,
|
||||
* it is stored in separate record and tree contains only recid reference to it.
|
||||
* BTree uses delta compression for its keys.
|
||||
*
|
||||
*
|
||||
* @author Alex Boisvert
|
||||
* @author Jan Kotek
|
||||
*/
|
||||
class BTree<K, V> {
|
||||
|
||||
|
||||
private static final boolean DEBUG = false;
|
||||
|
||||
|
||||
/**
|
||||
* Default node size (number of entries per node)
|
||||
*/
|
||||
public static final int DEFAULT_SIZE = 32; //TODO test optimal size, it has serious impact on sequencial write and read
|
||||
|
||||
|
||||
/**
|
||||
* Record manager used to persist changes in BTreeNodes
|
||||
*/
|
||||
protected transient DBAbstract _db;
|
||||
|
||||
|
||||
/**
|
||||
* This BTree's record ID in the DB.
|
||||
*/
|
||||
private transient long _recid;
|
||||
|
||||
|
||||
/**
|
||||
* Comparator used to index entries (optional)
|
||||
*/
|
||||
protected Comparator<K> _comparator;
|
||||
|
||||
|
||||
/**
|
||||
* Serializer used to serialize index keys (optional)
|
||||
*/
|
||||
protected Serializer<K> keySerializer;
|
||||
|
||||
|
||||
/**
|
||||
* Serializer used to serialize index values (optional)
|
||||
*/
|
||||
protected Serializer<V> valueSerializer;
|
||||
|
||||
/**
|
||||
* indicates if values should be loaded during deserialization, set to false during defragmentation
|
||||
*/
|
||||
boolean loadValues = true;
|
||||
|
||||
/** if false map contains only keys, used for set*/
|
||||
boolean hasValues = true;
|
||||
|
||||
/**
|
||||
* The number of structural modifications to the tree for fail fast iterators. This value is just for runtime, it is not persisted
|
||||
*/
|
||||
transient int modCount = 0;
|
||||
|
||||
/**
|
||||
* cached instance of an insert result, so we do not have to allocate new object on each insert
|
||||
*/
|
||||
protected BTreeNode.InsertResult<K, V> insertResultReuse; //TODO investigate performance impact of removing this
|
||||
|
||||
|
||||
public Serializer<K> getKeySerializer() {
|
||||
return keySerializer;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
public Serializer<V> getValueSerializer() {
|
||||
return valueSerializer;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Height of the B+Tree. This is the number of BTreeNodes you have to traverse
|
||||
* to get to a leaf BTreeNode, starting from the root.
|
||||
*/
|
||||
private int _height;
|
||||
|
||||
|
||||
/**
|
||||
* Recid of the root BTreeNode
|
||||
*/
|
||||
private transient long _root;
|
||||
|
||||
|
||||
/**
|
||||
* Total number of entries in the BTree
|
||||
*/
|
||||
protected volatile long _entries;
|
||||
|
||||
|
||||
/**
|
||||
* Serializer used for BTreeNodes of this tree
|
||||
*/
|
||||
private transient BTreeNode<K, V> _nodeSerializer = new BTreeNode();
|
||||
{
|
||||
_nodeSerializer._btree = this;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Listeners which are notified about changes in records
|
||||
*/
|
||||
protected RecordListener[] recordListeners = new RecordListener[0];
|
||||
|
||||
final protected ReadWriteLock lock = new ReentrantReadWriteLock();
|
||||
|
||||
/**
|
||||
* No-argument constructor used by serialization.
|
||||
*/
|
||||
public BTree() {
|
||||
// empty
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Create a new persistent BTree
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public static <K extends Comparable, V> BTree<K, V> createInstance(DBAbstract db)
|
||||
throws IOException {
|
||||
return createInstance(db, null, null, null,true);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Create a new persistent BTree
|
||||
*/
|
||||
public static <K, V> BTree<K, V> createInstance(DBAbstract db,
|
||||
Comparator<K> comparator,
|
||||
Serializer<K> keySerializer,
|
||||
Serializer<V> valueSerializer,
|
||||
boolean hasValues)
|
||||
throws IOException {
|
||||
BTree<K, V> btree;
|
||||
|
||||
if (db == null) {
|
||||
throw new IllegalArgumentException("Argument 'db' is null");
|
||||
}
|
||||
|
||||
btree = new BTree<K, V>();
|
||||
btree._db = db;
|
||||
btree._comparator = comparator;
|
||||
btree.keySerializer = keySerializer;
|
||||
btree.valueSerializer = valueSerializer;
|
||||
btree.hasValues = hasValues;
|
||||
btree._recid = db.insert(btree, btree.getRecordManager().defaultSerializer(),false);
|
||||
|
||||
return btree;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Load a persistent BTree.
|
||||
*
|
||||
* @param db DB used to store the persistent btree
|
||||
* @param recid Record id of the BTree
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public static <K, V> BTree<K, V> load(DBAbstract db, long recid)
|
||||
throws IOException {
|
||||
BTree<K, V> btree = (BTree<K, V>) db.fetch(recid);
|
||||
btree._recid = recid;
|
||||
btree._db = db;
|
||||
btree._nodeSerializer = new BTreeNode<K, V>();
|
||||
btree._nodeSerializer._btree = btree;
|
||||
return btree;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the {@link ReadWriteLock} associated with this BTree.
|
||||
* This should be used with browsing operations to ensure
|
||||
* consistency.
|
||||
*
|
||||
* @return
|
||||
*/
|
||||
public ReadWriteLock getLock() {
|
||||
return lock;
|
||||
}
|
||||
|
||||
/**
|
||||
* Insert an entry in the BTree.
|
||||
* <p/>
|
||||
* The BTree cannot store duplicate entries. An existing entry can be
|
||||
* replaced using the <code>replace</code> flag. If an entry with the
|
||||
* same key already exists in the BTree, its value is returned.
|
||||
*
|
||||
* @param key Insert key
|
||||
* @param value Insert value
|
||||
* @param replace Set to true to replace an existing key-value pair.
|
||||
* @return Existing value, if any.
|
||||
*/
|
||||
public V insert(final K key, final V value,
|
||||
final boolean replace)
|
||||
throws IOException {
|
||||
if (key == null) {
|
||||
throw new IllegalArgumentException("Argument 'key' is null");
|
||||
}
|
||||
if (value == null) {
|
||||
throw new IllegalArgumentException("Argument 'value' is null");
|
||||
}
|
||||
try {
|
||||
lock.writeLock().lock();
|
||||
BTreeNode<K, V> rootNode = getRoot();
|
||||
|
||||
if (rootNode == null) {
|
||||
// BTree is currently empty, create a new root BTreeNode
|
||||
if (DEBUG) {
|
||||
System.out.println("BTree.insert() new root BTreeNode");
|
||||
}
|
||||
rootNode = new BTreeNode<K, V>(this, key, value);
|
||||
_root = rootNode._recid;
|
||||
_height = 1;
|
||||
_entries = 1;
|
||||
_db.update(_recid, this);
|
||||
modCount++;
|
||||
//notifi listeners
|
||||
for (RecordListener<K, V> l : recordListeners) {
|
||||
l.recordInserted(key, value);
|
||||
}
|
||||
return null;
|
||||
} else {
|
||||
BTreeNode.InsertResult<K, V> insert = rootNode.insert(_height, key, value, replace);
|
||||
boolean dirty = false;
|
||||
if (insert._overflow != null) {
|
||||
// current root node overflowed, we replace with a new root node
|
||||
if (DEBUG) {
|
||||
System.out.println("BTreeNode.insert() replace root BTreeNode due to overflow");
|
||||
}
|
||||
rootNode = new BTreeNode<K, V>(this, rootNode, insert._overflow);
|
||||
_root = rootNode._recid;
|
||||
_height += 1;
|
||||
dirty = true;
|
||||
}
|
||||
if (insert._existing == null) {
|
||||
_entries++;
|
||||
modCount++;
|
||||
dirty = true;
|
||||
}
|
||||
if (dirty) {
|
||||
_db.update(_recid, this);
|
||||
}
|
||||
//notify listeners
|
||||
for (RecordListener<K, V> l : recordListeners) {
|
||||
if (insert._existing == null)
|
||||
l.recordInserted(key, value);
|
||||
else
|
||||
l.recordUpdated(key, insert._existing, value);
|
||||
}
|
||||
|
||||
// insert might have returned an existing value
|
||||
V ret = insert._existing;
|
||||
//zero out tuple and put it for reuse
|
||||
insert._existing = null;
|
||||
insert._overflow = null;
|
||||
this.insertResultReuse = insert;
|
||||
return ret;
|
||||
}
|
||||
} finally {
|
||||
lock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Remove an entry with the given key from the BTree.
|
||||
*
|
||||
* @param key Removal key
|
||||
* @return Value associated with the key, or null if no entry with given
|
||||
* key existed in the BTree.
|
||||
*/
|
||||
public V remove(K key)
|
||||
throws IOException {
|
||||
if (key == null) {
|
||||
throw new IllegalArgumentException("Argument 'key' is null");
|
||||
}
|
||||
try {
|
||||
lock.writeLock().lock();
|
||||
BTreeNode<K, V> rootNode = getRoot();
|
||||
if (rootNode == null) {
|
||||
return null;
|
||||
}
|
||||
boolean dirty = false;
|
||||
BTreeNode.RemoveResult<K, V> remove = rootNode.remove(_height, key);
|
||||
if (remove._underflow && rootNode.isEmpty()) {
|
||||
_height -= 1;
|
||||
dirty = true;
|
||||
|
||||
_db.delete(_root);
|
||||
if (_height == 0) {
|
||||
_root = 0;
|
||||
} else {
|
||||
_root = rootNode.loadLastChildNode()._recid;
|
||||
}
|
||||
}
|
||||
if (remove._value != null) {
|
||||
_entries--;
|
||||
modCount++;
|
||||
dirty = true;
|
||||
}
|
||||
if (dirty) {
|
||||
_db.update(_recid, this);
|
||||
}
|
||||
if (remove._value != null)
|
||||
for (RecordListener<K, V> l : recordListeners)
|
||||
l.recordRemoved(key, remove._value);
|
||||
return remove._value;
|
||||
} finally {
|
||||
lock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Find the value associated with the given key.
|
||||
*
|
||||
* @param key Lookup key.
|
||||
* @return Value associated with the key, or null if not found.
|
||||
*/
|
||||
public V get(K key)
|
||||
throws IOException {
|
||||
if (key == null) {
|
||||
throw new IllegalArgumentException("Argument 'key' is null");
|
||||
}
|
||||
try {
|
||||
lock.readLock().lock();
|
||||
BTreeNode<K, V> rootNode = getRoot();
|
||||
if (rootNode == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return rootNode.findValue(_height, key);
|
||||
} finally {
|
||||
lock.readLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Find the value associated with the given key, or the entry immediately
|
||||
* following this key in the ordered BTree.
|
||||
*
|
||||
* @param key Lookup key.
|
||||
* @return Value associated with the key, or a greater entry, or null if no
|
||||
* greater entry was found.
|
||||
*/
|
||||
public BTreeTuple<K, V> findGreaterOrEqual(K key)
|
||||
throws IOException {
|
||||
BTreeTuple<K, V> tuple;
|
||||
BTreeTupleBrowser<K, V> browser;
|
||||
|
||||
if (key == null) {
|
||||
// there can't be a key greater than or equal to "null"
|
||||
// because null is considered an infinite key.
|
||||
return null;
|
||||
}
|
||||
|
||||
tuple = new BTreeTuple<K, V>(null, null);
|
||||
browser = browse(key,true);
|
||||
if (browser.getNext(tuple)) {
|
||||
return tuple;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Get a browser initially positioned at the beginning of the BTree.
|
||||
* <p><b>
|
||||
* WARNING: If you make structural modifications to the BTree during
|
||||
* browsing, you will get inconsistent browing results.
|
||||
* </b>
|
||||
*
|
||||
* @return Browser positionned at the beginning of the BTree.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public BTreeTupleBrowser<K, V> browse()
|
||||
throws IOException {
|
||||
try {
|
||||
lock.readLock().lock();
|
||||
BTreeNode<K, V> rootNode = getRoot();
|
||||
if (rootNode == null) {
|
||||
return EMPTY_BROWSER;
|
||||
}
|
||||
return rootNode.findFirst();
|
||||
} finally {
|
||||
lock.readLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Get a browser initially positioned just before the given key.
|
||||
* <p><b>
|
||||
* WARNING: <EFBFBD>If you make structural modifications to the BTree during
|
||||
* browsing, you will get inconsistent browing results.
|
||||
* </b>
|
||||
*
|
||||
* @param key Key used to position the browser. If null, the browser
|
||||
* will be positionned after the last entry of the BTree.
|
||||
* (Null is considered to be an "infinite" key)
|
||||
* @return Browser positionned just before the given key.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public BTreeTupleBrowser<K, V> browse(final K key, final boolean inclusive)
|
||||
throws IOException {
|
||||
try {
|
||||
lock.readLock().lock();
|
||||
BTreeNode<K, V> rootNode = getRoot();
|
||||
if (rootNode == null) {
|
||||
return EMPTY_BROWSER;
|
||||
}
|
||||
BTreeTupleBrowser<K, V> browser = rootNode.find(_height, key, inclusive);
|
||||
return browser;
|
||||
} finally {
|
||||
lock.readLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Return the persistent record identifier of the BTree.
|
||||
*/
|
||||
public long getRecid() {
|
||||
return _recid;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Return the root BTreeNode, or null if it doesn't exist.
|
||||
*/
|
||||
BTreeNode<K, V> getRoot()
|
||||
throws IOException {
|
||||
if (_root == 0) {
|
||||
return null;
|
||||
}
|
||||
BTreeNode<K, V> root = _db.fetch(_root, _nodeSerializer);
|
||||
if (root != null) {
|
||||
root._recid = _root;
|
||||
root._btree = this;
|
||||
}
|
||||
return root;
|
||||
}
|
||||
|
||||
|
||||
static BTree readExternal(DataInput in, Serialization ser)
|
||||
throws IOException, ClassNotFoundException {
|
||||
BTree tree = new BTree();
|
||||
tree._db = ser.db;
|
||||
tree._height = in.readInt();
|
||||
tree._recid = in.readLong();
|
||||
tree._root = in.readLong();
|
||||
tree._entries = in.readLong();
|
||||
tree.hasValues = in.readBoolean();
|
||||
tree._comparator = (Comparator) ser.deserialize(in);
|
||||
tree.keySerializer = (Serializer) ser.deserialize(in);
|
||||
tree.valueSerializer = (Serializer) ser.deserialize(in);
|
||||
return tree;
|
||||
}
|
||||
|
||||
|
||||
public void writeExternal(DataOutput out)
|
||||
throws IOException {
|
||||
out.writeInt(_height);
|
||||
out.writeLong(_recid);
|
||||
out.writeLong(_root);
|
||||
out.writeLong(_entries);
|
||||
out.writeBoolean(hasValues);
|
||||
_db.defaultSerializer().serialize(out, _comparator);
|
||||
_db.defaultSerializer().serialize(out, keySerializer);
|
||||
_db.defaultSerializer().serialize(out, valueSerializer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Copyes tree from one db to other, defragmenting it allong the way
|
||||
* @param recid
|
||||
* @param r1
|
||||
* @param r2
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void defrag(long recid, DBStore r1, DBStore r2) throws IOException {
|
||||
try {
|
||||
byte[] data = r1.fetchRaw(recid);
|
||||
r2.forceInsert(recid, data);
|
||||
DataInput in = new DataInputOutput(data);
|
||||
BTree t = (BTree) r1.defaultSerializer().deserialize(in);
|
||||
t.loadValues = false;
|
||||
t._db = r1;
|
||||
t._nodeSerializer = new BTreeNode(t, false);
|
||||
|
||||
|
||||
BTreeNode p = t.getRoot();
|
||||
if (p != null) {
|
||||
r2.forceInsert(t._root, r1.fetchRaw(t._root));
|
||||
p.defrag(r1, r2);
|
||||
}
|
||||
|
||||
} catch (ClassNotFoundException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Browser returning no element.
|
||||
*/
|
||||
private static final BTreeTupleBrowser EMPTY_BROWSER = new BTreeTupleBrowser() {
|
||||
|
||||
public boolean getNext(BTreeTuple tuple) {
|
||||
return false;
|
||||
}
|
||||
|
||||
public boolean getPrevious(BTreeTuple tuple) {
|
||||
return false;
|
||||
}
|
||||
|
||||
public void remove(Object key) {
|
||||
throw new IndexOutOfBoundsException();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* add RecordListener which is notified about record changes
|
||||
*
|
||||
* @param listener
|
||||
*/
|
||||
public void addRecordListener(RecordListener<K, V> listener) {
|
||||
recordListeners = Arrays.copyOf(recordListeners, recordListeners.length + 1);
|
||||
recordListeners[recordListeners.length - 1] = listener;
|
||||
}
|
||||
|
||||
/**
|
||||
* remove RecordListener which is notified about record changes
|
||||
*
|
||||
* @param listener
|
||||
*/
|
||||
public void removeRecordListener(RecordListener<K, V> listener) {
|
||||
List l = Arrays.asList(recordListeners);
|
||||
l.remove(listener);
|
||||
recordListeners = (RecordListener[]) l.toArray(new RecordListener[1]);
|
||||
}
|
||||
|
||||
|
||||
public DBAbstract getRecordManager() {
|
||||
return _db;
|
||||
}
|
||||
|
||||
|
||||
public Comparator<K> getComparator() {
|
||||
return _comparator;
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes all BTreeNodes in this BTree
|
||||
*/
|
||||
public void clear()
|
||||
throws IOException {
|
||||
try {
|
||||
lock.writeLock().lock();
|
||||
BTreeNode<K, V> rootNode = getRoot();
|
||||
if (rootNode != null)
|
||||
rootNode.delete();
|
||||
_entries = 0;
|
||||
modCount++;
|
||||
} finally {
|
||||
lock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Used for debugging and testing only. Populates the 'out' list with
|
||||
* the recids of all child nodes in the BTree.
|
||||
*
|
||||
* @param out
|
||||
* @throws IOException
|
||||
*/
|
||||
void dumpChildNodeRecIDs(List<Long> out) throws IOException {
|
||||
BTreeNode<K, V> root = getRoot();
|
||||
if (root != null) {
|
||||
out.add(root._recid);
|
||||
root.dumpChildNodeRecIDs(out, _height);
|
||||
}
|
||||
}
|
||||
|
||||
public boolean hasValues() {
|
||||
return hasValues;
|
||||
}
|
||||
|
||||
/**
|
||||
* Browser to traverse a collection of tuples. The browser allows for
|
||||
* forward and reverse order traversal.
|
||||
*
|
||||
*
|
||||
*/
|
||||
static interface BTreeTupleBrowser<K, V> {
|
||||
|
||||
/**
|
||||
* Get the next tuple.
|
||||
*
|
||||
* @param tuple Tuple into which values are copied.
|
||||
* @return True if values have been copied in tuple, or false if there is no next tuple.
|
||||
*/
|
||||
boolean getNext(BTree.BTreeTuple<K, V> tuple) throws IOException;
|
||||
|
||||
/**
|
||||
* Get the previous tuple.
|
||||
*
|
||||
* @param tuple Tuple into which values are copied.
|
||||
* @return True if values have been copied in tuple, or false if there is no previous tuple.
|
||||
*/
|
||||
boolean getPrevious(BTree.BTreeTuple<K, V> tuple) throws IOException;
|
||||
|
||||
/**
|
||||
* Remove an entry with given key, and increases browsers expectedModCount
|
||||
* This method is here to support 'ConcurrentModificationException' on Map interface.
|
||||
*
|
||||
* @param key
|
||||
*/
|
||||
void remove(K key) throws IOException;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Tuple consisting of a key-value pair.
|
||||
*/
|
||||
static final class BTreeTuple<K, V> {
|
||||
|
||||
K key;
|
||||
|
||||
V value;
|
||||
|
||||
BTreeTuple() {
|
||||
// empty
|
||||
}
|
||||
|
||||
BTreeTuple(K key, V value) {
|
||||
this.key = key;
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,97 @@
|
|||
package org.apache.jdbm;
|
||||
|
||||
import java.io.*;
|
||||
|
||||
/**
|
||||
* An record lazily loaded from store.
|
||||
* This is used in BTree/HTree to store big records outside of index tree
|
||||
*
|
||||
* @author Jan Kotek
|
||||
*/
|
||||
class BTreeLazyRecord<E> {
|
||||
|
||||
private E value = null;
|
||||
private DBAbstract db;
|
||||
private Serializer<E> serializer;
|
||||
final long recid;
|
||||
|
||||
BTreeLazyRecord(DBAbstract db, long recid, Serializer<E> serializer) {
|
||||
this.db = db;
|
||||
this.recid = recid;
|
||||
this.serializer = serializer;
|
||||
}
|
||||
|
||||
|
||||
E get() {
|
||||
if (value != null) return value;
|
||||
try {
|
||||
value = db.fetch(recid, serializer);
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
void delete() {
|
||||
try {
|
||||
db.delete(recid);
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
value = null;
|
||||
serializer = null;
|
||||
db = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Serialier used to insert already serialized data into store
|
||||
*/
|
||||
static final Serializer FAKE_SERIALIZER = new Serializer() {
|
||||
|
||||
public void serialize(DataOutput out, Object obj) throws IOException {
|
||||
byte[] data = (byte[]) obj;
|
||||
out.write(data);
|
||||
}
|
||||
|
||||
public Object deserialize(DataInput in) throws IOException, ClassNotFoundException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
static Object fastDeser(DataInputOutput in, Serializer serializer, int expectedSize) throws IOException, ClassNotFoundException {
|
||||
//we should propably copy data for deserialization into separate buffer and pass it to Serializer
|
||||
//but to make it faster, Serializer will operate directly on top of buffer.
|
||||
//and we check that it readed correct number of bytes.
|
||||
int origAvail = in.available();
|
||||
if (origAvail == 0)
|
||||
throw new InternalError(); //is backed up by byte[] buffer, so there should be always avail bytes
|
||||
Object ret = serializer.deserialize(in);
|
||||
//check than valueSerializer did not read more bytes, if yes it readed bytes from next record
|
||||
int readed = origAvail - in.available();
|
||||
if (readed > expectedSize)
|
||||
throw new IOException("Serializer readed more bytes than is record size.");
|
||||
else if (readed != expectedSize) {
|
||||
//deserializer did not readed all bytes, unussual but valid.
|
||||
//Skip some to get into correct position
|
||||
for (int ii = 0; ii < expectedSize - readed; ii++)
|
||||
in.readUnsignedByte();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* if value in tree is serialized in more bytes, it is stored as separate record outside of tree
|
||||
* This value must be always smaller than 250
|
||||
*/
|
||||
static final int MAX_INTREE_RECORD_SIZE = 32;
|
||||
|
||||
static {
|
||||
if (MAX_INTREE_RECORD_SIZE > 250) throw new Error();
|
||||
}
|
||||
|
||||
static final int NULL = 255;
|
||||
static final int LAZY_RECORD = 254;
|
||||
|
||||
}
|
|
@ -0,0 +1,611 @@
|
|||
/*******************************************************************************
|
||||
* Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
******************************************************************************/
|
||||
package org.apache.jdbm;
|
||||
|
||||
import java.io.IOError;
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ConcurrentNavigableMap;
|
||||
|
||||
|
||||
/**
|
||||
* Wrapper for <code>BTree</code> which implements <code>ConcurrentNavigableMap</code> interface
|
||||
*
|
||||
* @param <K> key type
|
||||
* @param <V> value type
|
||||
*
|
||||
* @author Jan Kotek
|
||||
*/
|
||||
class BTreeMap<K, V> extends AbstractMap<K, V> implements ConcurrentNavigableMap<K, V> {
|
||||
|
||||
protected BTree<K, V> tree;
|
||||
|
||||
protected final K fromKey;
|
||||
|
||||
protected final K toKey;
|
||||
|
||||
protected final boolean readonly;
|
||||
|
||||
protected NavigableSet<K> keySet2;
|
||||
private final boolean toInclusive;
|
||||
private final boolean fromInclusive;
|
||||
|
||||
public BTreeMap(BTree<K, V> tree, boolean readonly) {
|
||||
this(tree, readonly, null, false, null, false);
|
||||
}
|
||||
|
||||
protected BTreeMap(BTree<K, V> tree, boolean readonly, K fromKey, boolean fromInclusive, K toKey, boolean toInclusive) {
|
||||
this.tree = tree;
|
||||
this.fromKey = fromKey;
|
||||
this.fromInclusive = fromInclusive;
|
||||
this.toKey = toKey;
|
||||
this.toInclusive = toInclusive;
|
||||
this.readonly = readonly;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<Entry<K, V>> entrySet() {
|
||||
return _entrySet;
|
||||
}
|
||||
|
||||
|
||||
|
||||
private final Set<java.util.Map.Entry<K, V>> _entrySet = new AbstractSet<Entry<K, V>>() {
|
||||
|
||||
protected Entry<K, V> newEntry(K k, V v) {
|
||||
return new SimpleEntry<K, V>(k, v) {
|
||||
private static final long serialVersionUID = 978651696969194154L;
|
||||
|
||||
public V setValue(V arg0) {
|
||||
BTreeMap.this.put(getKey(), arg0);
|
||||
return super.setValue(arg0);
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
public boolean add(java.util.Map.Entry<K, V> e) {
|
||||
if (readonly)
|
||||
throw new UnsupportedOperationException("readonly");
|
||||
|
||||
try {
|
||||
if (e.getKey() == null)
|
||||
throw new NullPointerException("Can not add null key");
|
||||
if (!inBounds(e.getKey()))
|
||||
throw new IllegalArgumentException("key outside of bounds");
|
||||
return tree.insert(e.getKey(), e.getValue(), true) == null;
|
||||
} catch (IOException e1) {
|
||||
throw new IOError(e1);
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public boolean contains(Object o) {
|
||||
|
||||
if (o instanceof Entry) {
|
||||
Entry<K, V> e = (java.util.Map.Entry<K, V>) o;
|
||||
try {
|
||||
if (!inBounds(e.getKey()))
|
||||
return false;
|
||||
if (e.getKey() != null && tree.get(e.getKey()) != null)
|
||||
return true;
|
||||
} catch (IOException e1) {
|
||||
throw new IOError(e1);
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
public Iterator<java.util.Map.Entry<K, V>> iterator() {
|
||||
try {
|
||||
final BTree.BTreeTupleBrowser<K, V> br = fromKey == null ?
|
||||
tree.browse() : tree.browse(fromKey, fromInclusive);
|
||||
return new Iterator<Entry<K, V>>() {
|
||||
|
||||
private Entry<K, V> next;
|
||||
private K lastKey;
|
||||
|
||||
void ensureNext() {
|
||||
try {
|
||||
BTree.BTreeTuple<K, V> t = new BTree.BTreeTuple<K, V>();
|
||||
if (br.getNext(t) && inBounds(t.key))
|
||||
next = newEntry(t.key, t.value);
|
||||
else
|
||||
next = null;
|
||||
} catch (IOException e1) {
|
||||
throw new IOError(e1);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
ensureNext();
|
||||
}
|
||||
|
||||
|
||||
public boolean hasNext() {
|
||||
return next != null;
|
||||
}
|
||||
|
||||
public java.util.Map.Entry<K, V> next() {
|
||||
if (next == null)
|
||||
throw new NoSuchElementException();
|
||||
Entry<K, V> ret = next;
|
||||
lastKey = ret.getKey();
|
||||
//move to next position
|
||||
ensureNext();
|
||||
return ret;
|
||||
}
|
||||
|
||||
public void remove() {
|
||||
if (readonly)
|
||||
throw new UnsupportedOperationException("readonly");
|
||||
|
||||
if (lastKey == null)
|
||||
throw new IllegalStateException();
|
||||
try {
|
||||
br.remove(lastKey);
|
||||
lastKey = null;
|
||||
} catch (IOException e1) {
|
||||
throw new IOError(e1);
|
||||
}
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public boolean remove(Object o) {
|
||||
if (readonly)
|
||||
throw new UnsupportedOperationException("readonly");
|
||||
|
||||
if (o instanceof Entry) {
|
||||
Entry<K, V> e = (java.util.Map.Entry<K, V>) o;
|
||||
try {
|
||||
//check for nulls
|
||||
if (e.getKey() == null || e.getValue() == null)
|
||||
return false;
|
||||
if (!inBounds(e.getKey()))
|
||||
throw new IllegalArgumentException("out of bounds");
|
||||
//get old value, must be same as item in entry
|
||||
V v = get(e.getKey());
|
||||
if (v == null || !e.getValue().equals(v))
|
||||
return false;
|
||||
V v2 = tree.remove(e.getKey());
|
||||
return v2 != null;
|
||||
} catch (IOException e1) {
|
||||
throw new IOError(e1);
|
||||
}
|
||||
}
|
||||
return false;
|
||||
|
||||
}
|
||||
|
||||
public int size() {
|
||||
return BTreeMap.this.size();
|
||||
}
|
||||
|
||||
public void clear(){
|
||||
if(fromKey!=null || toKey!=null)
|
||||
super.clear();
|
||||
else
|
||||
try {
|
||||
tree.clear();
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
public boolean inBounds(K e) {
|
||||
if(fromKey == null && toKey == null)
|
||||
return true;
|
||||
|
||||
Comparator comp = comparator();
|
||||
if (comp == null) comp = Utils.COMPARABLE_COMPARATOR;
|
||||
|
||||
if(fromKey!=null){
|
||||
final int compare = comp.compare(e, fromKey);
|
||||
if(compare<0) return false;
|
||||
if(!fromInclusive && compare == 0) return false;
|
||||
}
|
||||
if(toKey!=null){
|
||||
final int compare = comp.compare(e, toKey);
|
||||
if(compare>0)return false;
|
||||
if(!toInclusive && compare == 0) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public V get(Object key) {
|
||||
try {
|
||||
if (key == null)
|
||||
return null;
|
||||
if (!inBounds((K) key))
|
||||
return null;
|
||||
return tree.get((K) key);
|
||||
} catch (ClassCastException e) {
|
||||
return null;
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public V remove(Object key) {
|
||||
if (readonly)
|
||||
throw new UnsupportedOperationException("readonly");
|
||||
|
||||
try {
|
||||
if (key == null || tree.get((K) key) == null)
|
||||
return null;
|
||||
if (!inBounds((K) key))
|
||||
throw new IllegalArgumentException("out of bounds");
|
||||
|
||||
return tree.remove((K) key);
|
||||
} catch (ClassCastException e) {
|
||||
return null;
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
}
|
||||
|
||||
public V put(K key, V value) {
|
||||
if (readonly)
|
||||
throw new UnsupportedOperationException("readonly");
|
||||
|
||||
try {
|
||||
if (key == null || value == null)
|
||||
throw new NullPointerException("Null key or value");
|
||||
if (!inBounds(key))
|
||||
throw new IllegalArgumentException("out of bounds");
|
||||
return tree.insert(key, value, true);
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
}
|
||||
|
||||
public void clear(){
|
||||
entrySet().clear();
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public boolean containsKey(Object key) {
|
||||
if (key == null)
|
||||
return false;
|
||||
try {
|
||||
if (!inBounds((K) key))
|
||||
return false;
|
||||
V v = tree.get((K) key);
|
||||
return v != null;
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
} catch (ClassCastException e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public Comparator<? super K> comparator() {
|
||||
return tree._comparator;
|
||||
}
|
||||
|
||||
public K firstKey() {
|
||||
if (isEmpty())
|
||||
return null;
|
||||
try {
|
||||
|
||||
BTree.BTreeTupleBrowser<K, V> b = fromKey == null ? tree.browse() : tree.browse(fromKey,fromInclusive);
|
||||
BTree.BTreeTuple<K, V> t = new BTree.BTreeTuple<K, V>();
|
||||
b.getNext(t);
|
||||
return t.key;
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
}
|
||||
|
||||
public K lastKey() {
|
||||
if (isEmpty())
|
||||
return null;
|
||||
try {
|
||||
BTree.BTreeTupleBrowser<K, V> b = toKey == null ? tree.browse(null,true) : tree.browse(toKey,false);
|
||||
BTree.BTreeTuple<K, V> t = new BTree.BTreeTuple<K, V>();
|
||||
b.getPrevious(t);
|
||||
if(!toInclusive && toKey!=null){
|
||||
//make sure we wont return last key
|
||||
Comparator c = comparator();
|
||||
if(c==null) c=Utils.COMPARABLE_COMPARATOR;
|
||||
if(c.compare(t.key,toKey)==0)
|
||||
b.getPrevious(t);
|
||||
}
|
||||
return t.key;
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
}
|
||||
|
||||
public ConcurrentNavigableMap<K, V> headMap(K toKey2, boolean inclusive) {
|
||||
K toKey3 = Utils.min(this.toKey,toKey2,comparator());
|
||||
boolean inclusive2 = toKey3 == toKey? toInclusive : inclusive;
|
||||
return new BTreeMap<K, V>(tree, readonly, this.fromKey, this.fromInclusive, toKey3, inclusive2);
|
||||
}
|
||||
|
||||
|
||||
public ConcurrentNavigableMap<K, V> headMap(K toKey) {
|
||||
return headMap(toKey,false);
|
||||
}
|
||||
|
||||
|
||||
public Entry<K, V> lowerEntry(K key) {
|
||||
K k = lowerKey(key);
|
||||
return k==null? null : new SimpleEntry<K, V>(k,get(k));
|
||||
}
|
||||
|
||||
public K lowerKey(K key) {
|
||||
if (isEmpty())
|
||||
return null;
|
||||
K key2 = Utils.min(key,toKey,comparator());
|
||||
try {
|
||||
BTree.BTreeTupleBrowser<K, V> b = tree.browse(key2,true) ;
|
||||
BTree.BTreeTuple<K, V> t = new BTree.BTreeTuple<K, V>();
|
||||
b.getPrevious(t);
|
||||
|
||||
return t.key;
|
||||
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public Entry<K, V> floorEntry(K key) {
|
||||
K k = floorKey(key);
|
||||
return k==null? null : new SimpleEntry<K, V>(k,get(k));
|
||||
|
||||
}
|
||||
|
||||
public K floorKey(K key) {
|
||||
if (isEmpty())
|
||||
return null;
|
||||
|
||||
K key2 = Utils.max(key,fromKey,comparator());
|
||||
try {
|
||||
BTree.BTreeTupleBrowser<K, V> b = tree.browse(key2,true) ;
|
||||
BTree.BTreeTuple<K, V> t = new BTree.BTreeTuple<K, V>();
|
||||
b.getNext(t);
|
||||
Comparator comp = comparator();
|
||||
if (comp == null) comp = Utils.COMPARABLE_COMPARATOR;
|
||||
if(comp.compare(t.key,key2) == 0)
|
||||
return t.key;
|
||||
|
||||
b.getPrevious(t);
|
||||
b.getPrevious(t);
|
||||
return t.key;
|
||||
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
}
|
||||
|
||||
public Entry<K, V> ceilingEntry(K key) {
|
||||
K k = ceilingKey(key);
|
||||
return k==null? null : new SimpleEntry<K, V>(k,get(k));
|
||||
}
|
||||
|
||||
public K ceilingKey(K key) {
|
||||
if (isEmpty())
|
||||
return null;
|
||||
K key2 = Utils.min(key,toKey,comparator());
|
||||
|
||||
try {
|
||||
BTree.BTreeTupleBrowser<K, V> b = tree.browse(key2,true) ;
|
||||
BTree.BTreeTuple<K, V> t = new BTree.BTreeTuple<K, V>();
|
||||
b.getNext(t);
|
||||
return t.key;
|
||||
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
}
|
||||
|
||||
public Entry<K, V> higherEntry(K key) {
|
||||
K k = higherKey(key);
|
||||
return k==null? null : new SimpleEntry<K, V>(k,get(k));
|
||||
}
|
||||
|
||||
public K higherKey(K key) {
|
||||
if (isEmpty())
|
||||
return null;
|
||||
|
||||
K key2 = Utils.max(key,fromKey,comparator());
|
||||
|
||||
try {
|
||||
BTree.BTreeTupleBrowser<K, V> b = tree.browse(key2,false) ;
|
||||
BTree.BTreeTuple<K, V> t = new BTree.BTreeTuple<K, V>();
|
||||
b.getNext(t);
|
||||
return t.key;
|
||||
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
}
|
||||
|
||||
public Entry<K, V> firstEntry() {
|
||||
K k = firstKey();
|
||||
return k==null? null : new SimpleEntry<K, V>(k,get(k));
|
||||
}
|
||||
|
||||
public Entry<K, V> lastEntry() {
|
||||
K k = lastKey();
|
||||
return k==null? null : new SimpleEntry<K, V>(k,get(k));
|
||||
}
|
||||
|
||||
public Entry<K, V> pollFirstEntry() {
|
||||
Entry<K,V> first = firstEntry();
|
||||
if(first!=null)
|
||||
remove(first.getKey());
|
||||
return first;
|
||||
}
|
||||
|
||||
public Entry<K, V> pollLastEntry() {
|
||||
Entry<K,V> last = lastEntry();
|
||||
if(last!=null)
|
||||
remove(last.getKey());
|
||||
return last;
|
||||
}
|
||||
|
||||
public ConcurrentNavigableMap<K, V> descendingMap() {
|
||||
throw new UnsupportedOperationException("not implemented yet");
|
||||
//TODO implement descending (reverse order) map
|
||||
}
|
||||
|
||||
|
||||
public NavigableSet<K> keySet() {
|
||||
return navigableKeySet();
|
||||
}
|
||||
|
||||
public NavigableSet<K> navigableKeySet() {
|
||||
if(keySet2 == null)
|
||||
keySet2 = new BTreeSet<K>((BTreeMap<K,Object>) this);
|
||||
return keySet2;
|
||||
}
|
||||
|
||||
public NavigableSet<K> descendingKeySet() {
|
||||
return descendingMap().navigableKeySet();
|
||||
}
|
||||
|
||||
|
||||
|
||||
public ConcurrentNavigableMap<K, V> tailMap(K fromKey) {
|
||||
return tailMap(fromKey,true);
|
||||
}
|
||||
|
||||
|
||||
public ConcurrentNavigableMap<K, V> tailMap(K fromKey2, boolean inclusive) {
|
||||
K fromKey3 = Utils.max(this.fromKey,fromKey2,comparator());
|
||||
boolean inclusive2 = fromKey3 == toKey? toInclusive : inclusive;
|
||||
|
||||
return new BTreeMap<K, V>(tree, readonly, fromKey3, inclusive2, toKey, toInclusive);
|
||||
}
|
||||
|
||||
public ConcurrentNavigableMap<K, V> subMap(K fromKey, boolean fromInclusive, K toKey, boolean toInclusive) {
|
||||
Comparator comp = comparator();
|
||||
if (comp == null) comp = Utils.COMPARABLE_COMPARATOR;
|
||||
if (comp.compare(fromKey, toKey) > 0)
|
||||
throw new IllegalArgumentException("fromKey is bigger then toKey");
|
||||
return new BTreeMap<K, V>(tree, readonly, fromKey, fromInclusive, toKey, toInclusive);
|
||||
}
|
||||
|
||||
public ConcurrentNavigableMap<K, V> subMap(K fromKey, K toKey) {
|
||||
return subMap(fromKey,true,toKey,false);
|
||||
}
|
||||
|
||||
|
||||
public BTree<K, V> getTree() {
|
||||
return tree;
|
||||
}
|
||||
|
||||
|
||||
public void addRecordListener(RecordListener<K, V> listener) {
|
||||
tree.addRecordListener(listener);
|
||||
}
|
||||
|
||||
public DBAbstract getRecordManager() {
|
||||
return tree.getRecordManager();
|
||||
}
|
||||
|
||||
public void removeRecordListener(RecordListener<K, V> listener) {
|
||||
tree.removeRecordListener(listener);
|
||||
}
|
||||
|
||||
|
||||
public int size() {
|
||||
if (fromKey == null && toKey == null)
|
||||
return (int) tree._entries; //use fast counter on tree if Map has no bounds
|
||||
else {
|
||||
//had to count items in iterator
|
||||
Iterator iter = keySet().iterator();
|
||||
int counter = 0;
|
||||
while (iter.hasNext()) {
|
||||
iter.next();
|
||||
counter++;
|
||||
}
|
||||
return counter;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
public V putIfAbsent(K key, V value) {
|
||||
tree.lock.writeLock().lock();
|
||||
try{
|
||||
if (!containsKey(key))
|
||||
return put(key, value);
|
||||
else
|
||||
return get(key);
|
||||
}finally {
|
||||
tree.lock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
public boolean remove(Object key, Object value) {
|
||||
tree.lock.writeLock().lock();
|
||||
try{
|
||||
if (containsKey(key) && get(key).equals(value)) {
|
||||
remove(key);
|
||||
return true;
|
||||
} else return false;
|
||||
}finally {
|
||||
tree.lock.writeLock().unlock();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
public boolean replace(K key, V oldValue, V newValue) {
|
||||
tree.lock.writeLock().lock();
|
||||
try{
|
||||
if (containsKey(key) && get(key).equals(oldValue)) {
|
||||
put(key, newValue);
|
||||
return true;
|
||||
} else return false;
|
||||
}finally {
|
||||
tree.lock.writeLock().unlock();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public V replace(K key, V value) {
|
||||
tree.lock.writeLock().lock();
|
||||
try{
|
||||
if (containsKey(key)) {
|
||||
return put(key, value);
|
||||
} else return null;
|
||||
}finally {
|
||||
tree.lock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,187 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.jdbm;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* Wrapper class for <code>>SortedMap</code> to implement <code>>NavigableSet</code>
|
||||
* <p/>
|
||||
* This code originally comes from Apache Harmony, was adapted by Jan Kotek for JDBM
|
||||
*/
|
||||
class BTreeSet<E> extends AbstractSet<E> implements NavigableSet<E> {
|
||||
|
||||
|
||||
/**
|
||||
* use keyset from this map
|
||||
*/
|
||||
final BTreeMap<E, Object> map;
|
||||
|
||||
BTreeSet(BTreeMap<E, Object> map) {
|
||||
this.map = map;
|
||||
}
|
||||
|
||||
public boolean add(E object) {
|
||||
return map.put(object, Utils.EMPTY_STRING) == null;
|
||||
}
|
||||
|
||||
|
||||
public boolean addAll(Collection<? extends E> collection) {
|
||||
return super.addAll(collection);
|
||||
}
|
||||
|
||||
|
||||
public void clear() {
|
||||
map.clear();
|
||||
}
|
||||
|
||||
public Comparator<? super E> comparator() {
|
||||
return map.comparator();
|
||||
}
|
||||
|
||||
|
||||
public boolean contains(Object object) {
|
||||
return map.containsKey(object);
|
||||
}
|
||||
|
||||
|
||||
public boolean isEmpty() {
|
||||
return map.isEmpty();
|
||||
}
|
||||
|
||||
|
||||
public E lower(E e) {
|
||||
return map.lowerKey(e);
|
||||
}
|
||||
|
||||
public E floor(E e) {
|
||||
return map.floorKey(e);
|
||||
}
|
||||
|
||||
public E ceiling(E e) {
|
||||
return map.ceilingKey(e);
|
||||
}
|
||||
|
||||
public E higher(E e) {
|
||||
return map.higherKey(e);
|
||||
}
|
||||
|
||||
public E pollFirst() {
|
||||
Map.Entry<E,Object> e = map.pollFirstEntry();
|
||||
return e!=null? e.getKey():null;
|
||||
}
|
||||
|
||||
public E pollLast() {
|
||||
Map.Entry<E,Object> e = map.pollLastEntry();
|
||||
return e!=null? e.getKey():null;
|
||||
}
|
||||
|
||||
public Iterator<E> iterator() {
|
||||
final Iterator<Map.Entry<E,Object>> iter = map.entrySet().iterator();
|
||||
return new Iterator<E>() {
|
||||
public boolean hasNext() {
|
||||
return iter.hasNext();
|
||||
}
|
||||
|
||||
public E next() {
|
||||
Map.Entry<E,Object> e = iter.next();
|
||||
return e!=null?e.getKey():null;
|
||||
}
|
||||
|
||||
public void remove() {
|
||||
iter.remove();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
public NavigableSet<E> descendingSet() {
|
||||
return map.descendingKeySet();
|
||||
}
|
||||
|
||||
public Iterator<E> descendingIterator() {
|
||||
return map.descendingKeySet().iterator();
|
||||
}
|
||||
|
||||
public NavigableSet<E> subSet(E fromElement, boolean fromInclusive, E toElement, boolean toInclusive) {
|
||||
return map.subMap(fromElement,fromInclusive,toElement,toInclusive).navigableKeySet();
|
||||
}
|
||||
|
||||
public NavigableSet<E> headSet(E toElement, boolean inclusive) {
|
||||
return map.headMap(toElement,inclusive).navigableKeySet();
|
||||
}
|
||||
|
||||
public NavigableSet<E> tailSet(E fromElement, boolean inclusive) {
|
||||
return map.tailMap(fromElement,inclusive).navigableKeySet();
|
||||
}
|
||||
|
||||
|
||||
public boolean remove(Object object) {
|
||||
return map.remove(object) != null;
|
||||
}
|
||||
|
||||
public int size() {
|
||||
return map.size();
|
||||
}
|
||||
|
||||
|
||||
public E first() {
|
||||
return map.firstKey();
|
||||
}
|
||||
|
||||
|
||||
public E last() {
|
||||
return map.lastKey();
|
||||
}
|
||||
|
||||
|
||||
public SortedSet<E> subSet(E start, E end) {
|
||||
Comparator<? super E> c = map.comparator();
|
||||
int compare = (c == null) ? ((Comparable<E>) start).compareTo(end) : c
|
||||
.compare(start, end);
|
||||
if (compare <= 0) {
|
||||
return new BTreeSet<E>((BTreeMap<E,Object>) map.subMap(start, true,end,false));
|
||||
}
|
||||
throw new IllegalArgumentException();
|
||||
}
|
||||
|
||||
|
||||
public SortedSet<E> headSet(E end) {
|
||||
// Check for errors
|
||||
Comparator<? super E> c = map.comparator();
|
||||
if (c == null) {
|
||||
((Comparable<E>) end).compareTo(end);
|
||||
} else {
|
||||
c.compare(end, end);
|
||||
}
|
||||
return new BTreeSet<E>((BTreeMap<E,Object>) map.headMap(end,false));
|
||||
}
|
||||
|
||||
|
||||
public SortedSet<E> tailSet(E start) {
|
||||
// Check for errors
|
||||
Comparator<? super E> c = map.comparator();
|
||||
if (c == null) {
|
||||
((Comparable<E>) start).compareTo(start);
|
||||
} else {
|
||||
c.compare(start, start);
|
||||
}
|
||||
return new BTreeSet<E>((BTreeMap<E,Object>) map.tailMap(start,true));
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,173 @@
|
|||
package org.apache.jdbm;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.ConcurrentNavigableMap;
|
||||
|
||||
/**
|
||||
* Database is root class for creating and loading persistent collections. It also contains
|
||||
* transaction operations.
|
||||
* //TODO just write some readme
|
||||
* <p/>
|
||||
*
|
||||
* @author Jan Kotek
|
||||
* @author Alex Boisvert
|
||||
* @author Cees de Groot
|
||||
*/
|
||||
public interface DB {
|
||||
|
||||
/**
|
||||
* Closes the DB and release resources.
|
||||
* DB can not be used after it was closed
|
||||
*/
|
||||
void close();
|
||||
|
||||
/** @return true if db was already closed */
|
||||
boolean isClosed();
|
||||
|
||||
/**
|
||||
* Clear cache and remove all entries it contains.
|
||||
* This may be useful for some Garbage Collection when reference cache is used.
|
||||
*/
|
||||
void clearCache();
|
||||
|
||||
/**
|
||||
* Defragments storage so it consumes less space.
|
||||
* It basically copyes all records into different store and then renames it, replacing original store.
|
||||
* <p/>
|
||||
* Defrag has two steps: In first collections are rearranged, so records in collection are close to each other,
|
||||
* and read speed is improved. In second step all records are sequentially transferred, reclaiming all unused space.
|
||||
* First step is optinal and may slow down defragmentation significantly as ut requires many random-access reads.
|
||||
* Second step reads and writes data sequentially and is very fast, comparable to copying files to new location.
|
||||
*
|
||||
* <p/>
|
||||
* This commits any uncommited data. Defrag also requires free space, as store is basically recreated at new location.
|
||||
*
|
||||
* @param sortCollections if collection records should be rearranged during defragment, this takes some extra time
|
||||
*/
|
||||
void defrag(boolean sortCollections);
|
||||
|
||||
/**
|
||||
* Commit (make persistent) all changes since beginning of transaction.
|
||||
* JDBM supports only single transaction.
|
||||
*/
|
||||
void commit();
|
||||
|
||||
/**
|
||||
* Rollback (cancel) all changes since beginning of transaction.
|
||||
* JDBM supports only single transaction.
|
||||
* This operations affects all maps created or loaded by this DB.
|
||||
*/
|
||||
void rollback();
|
||||
|
||||
/**
|
||||
* This calculates some database statistics such as collection sizes and record distributions.
|
||||
* Can be useful for performance optimalisations and trouble shuting.
|
||||
* This method can run for very long time.
|
||||
*
|
||||
* @return statistics contained in string
|
||||
*/
|
||||
String calculateStatistics();
|
||||
|
||||
|
||||
/**
|
||||
* Copy database content into ZIP file
|
||||
* @param zipFile
|
||||
*/
|
||||
void copyToZip(String zipFile);
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Get a <code>Map</code> which was already created and saved in DB.
|
||||
* This map uses disk based H*Tree and should have similar performance
|
||||
* as <code>HashMap</code>.
|
||||
*
|
||||
* @param name of hash map
|
||||
*
|
||||
* @return map
|
||||
*/
|
||||
<K, V> ConcurrentMap<K, V> getHashMap(String name);
|
||||
|
||||
/**
|
||||
* Creates Map which persists data into DB.
|
||||
*
|
||||
* @param name record name
|
||||
* @return
|
||||
*/
|
||||
<K, V> ConcurrentMap<K, V> createHashMap(String name);
|
||||
|
||||
|
||||
/**
|
||||
* Creates Hash Map which persists data into DB.
|
||||
* Map will use custom serializers for Keys and Values.
|
||||
* Leave keySerializer null to use default serializer for keys
|
||||
*
|
||||
* @param <K> Key type
|
||||
* @param <V> Value type
|
||||
* @param name record name
|
||||
* @param keySerializer serializer to be used for Keys, leave null to use default serializer
|
||||
* @param valueSerializer serializer to be used for Values
|
||||
* @return
|
||||
*/
|
||||
<K, V> ConcurrentMap<K, V> createHashMap(String name, Serializer<K> keySerializer, Serializer<V> valueSerializer);
|
||||
|
||||
<K> Set<K> createHashSet(String name);
|
||||
|
||||
<K> Set<K> getHashSet(String name);
|
||||
|
||||
<K> Set<K> createHashSet(String name, Serializer<K> keySerializer);
|
||||
|
||||
<K, V> ConcurrentNavigableMap<K, V> getTreeMap(String name);
|
||||
|
||||
/**
|
||||
* Create TreeMap which persists data into DB.
|
||||
*
|
||||
* @param <K> Key type
|
||||
* @param <V> Value type
|
||||
* @param name record name
|
||||
* @return
|
||||
*/
|
||||
<K extends Comparable, V> NavigableMap<K, V> createTreeMap(String name);
|
||||
|
||||
/**
|
||||
* Creates TreeMap which persists data into DB.
|
||||
*
|
||||
* @param <K> Key type
|
||||
* @param <V> Value type
|
||||
* @param name record name
|
||||
* @param keyComparator Comparator used to sort keys
|
||||
* @param keySerializer Serializer used for keys. This may reduce disk space usage *
|
||||
* @param valueSerializer Serializer used for values. This may reduce disk space usage
|
||||
* @return
|
||||
*/
|
||||
<K, V> ConcurrentNavigableMap<K, V> createTreeMap(String name,
|
||||
Comparator<K> keyComparator, Serializer<K> keySerializer, Serializer<V> valueSerializer);
|
||||
|
||||
<K> NavigableSet<K> getTreeSet(String name);
|
||||
|
||||
<K> NavigableSet<K> createTreeSet(String name);
|
||||
|
||||
<K> NavigableSet<K> createTreeSet(String name, Comparator<K> keyComparator, Serializer<K> keySerializer);
|
||||
|
||||
<K> List<K> createLinkedList(String name);
|
||||
|
||||
<K> List<K> createLinkedList(String name, Serializer<K> serializer);
|
||||
|
||||
<K> List<K> getLinkedList(String name);
|
||||
|
||||
/** returns unmodifiable map which contains all collection names and collections thenselfs*/
|
||||
Map<String,Object> getCollections();
|
||||
|
||||
/** completely remove collection from store*/
|
||||
void deleteCollection(String name);
|
||||
|
||||
/** Java Collections returns their size as int. This may not be enought for JDBM collections.
|
||||
* This method returns number of elements in JDBM collection as long.
|
||||
*
|
||||
* @param collection created by JDBM
|
||||
* @return number of elements in collection as long
|
||||
*/
|
||||
long collectionSize(Object collection);
|
||||
|
||||
}
|
|
@ -0,0 +1,590 @@
|
|||
/*******************************************************************************
|
||||
* Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
******************************************************************************/
|
||||
package org.apache.jdbm;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOError;
|
||||
import java.io.IOException;
|
||||
import java.lang.ref.WeakReference;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.ConcurrentNavigableMap;
|
||||
|
||||
/**
|
||||
* An abstract class implementing most of DB.
|
||||
* It also has some JDBM package protected stuff (getNamedRecord)
|
||||
*/
|
||||
abstract class DBAbstract implements DB {
|
||||
|
||||
|
||||
/**
|
||||
* Reserved slot for name directory recid.
|
||||
*/
|
||||
static final byte NAME_DIRECTORY_ROOT = 0;
|
||||
|
||||
|
||||
/**
|
||||
* Reserved slot for version number
|
||||
*/
|
||||
static final byte STORE_VERSION_NUMBER_ROOT = 1;
|
||||
|
||||
/**
|
||||
* Reserved slot for recid where Serial class info is stored
|
||||
*
|
||||
* NOTE when introducing more roots, do not forget to update defrag
|
||||
*/
|
||||
static final byte SERIAL_CLASS_INFO_RECID_ROOT = 2;
|
||||
|
||||
/** to prevent double instances of the same collection, we use weak value map
|
||||
*
|
||||
* //TODO what to do when there is rollback?
|
||||
* //TODO clear on close
|
||||
*/
|
||||
final private Map<String,WeakReference<Object>> collections = new HashMap<String,WeakReference<Object>>();
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Inserts a new record using a custom serializer.
|
||||
*
|
||||
* @param obj the object for the new record.
|
||||
* @param serializer a custom serializer
|
||||
* @return the rowid for the new record.
|
||||
* @throws java.io.IOException when one of the underlying I/O operations fails.
|
||||
*/
|
||||
abstract <A> long insert(A obj, Serializer<A> serializer,boolean disableCache) throws IOException;
|
||||
|
||||
/**
|
||||
* Deletes a record.
|
||||
*
|
||||
* @param recid the rowid for the record that should be deleted.
|
||||
* @throws java.io.IOException when one of the underlying I/O operations fails.
|
||||
*/
|
||||
abstract void delete(long recid) throws IOException;
|
||||
|
||||
|
||||
/**
|
||||
* Updates a record using a custom serializer.
|
||||
* If given recid does not exist, IOException will be thrown before/during commit (cache).
|
||||
*
|
||||
* @param recid the recid for the record that is to be updated.
|
||||
* @param obj the new object for the record.
|
||||
* @param serializer a custom serializer
|
||||
* @throws java.io.IOException when one of the underlying I/O operations fails
|
||||
*/
|
||||
abstract <A> void update(long recid, A obj, Serializer<A> serializer)
|
||||
throws IOException;
|
||||
|
||||
|
||||
/**
|
||||
* Fetches a record using a custom serializer.
|
||||
*
|
||||
* @param recid the recid for the record that must be fetched.
|
||||
* @param serializer a custom serializer
|
||||
* @return the object contained in the record, null if given recid does not exist
|
||||
* @throws java.io.IOException when one of the underlying I/O operations fails.
|
||||
*/
|
||||
abstract <A> A fetch(long recid, Serializer<A> serializer)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Fetches a record using a custom serializer and optionaly disabled cache
|
||||
*
|
||||
* @param recid the recid for the record that must be fetched.
|
||||
* @param serializer a custom serializer
|
||||
* @param disableCache true to disable any caching mechanism
|
||||
* @return the object contained in the record, null if given recid does not exist
|
||||
* @throws java.io.IOException when one of the underlying I/O operations fails.
|
||||
*/
|
||||
abstract <A> A fetch(long recid, Serializer<A> serializer, boolean disableCache)
|
||||
throws IOException;
|
||||
|
||||
|
||||
public long insert(Object obj) throws IOException {
|
||||
return insert(obj, defaultSerializer(),false);
|
||||
}
|
||||
|
||||
|
||||
public void update(long recid, Object obj) throws IOException {
|
||||
update(recid, obj, defaultSerializer());
|
||||
}
|
||||
|
||||
|
||||
synchronized public <A> A fetch(long recid) throws IOException {
|
||||
return (A) fetch(recid, defaultSerializer());
|
||||
}
|
||||
|
||||
synchronized public <K, V> ConcurrentMap<K, V> getHashMap(String name) {
|
||||
Object o = getCollectionInstance(name);
|
||||
if(o!=null)
|
||||
return (ConcurrentMap<K, V>) o;
|
||||
|
||||
try {
|
||||
long recid = getNamedObject(name);
|
||||
if(recid == 0) return null;
|
||||
|
||||
HTree tree = fetch(recid);
|
||||
tree.setPersistenceContext(this);
|
||||
if(!tree.hasValues()){
|
||||
throw new ClassCastException("HashSet is not HashMap");
|
||||
}
|
||||
collections.put(name,new WeakReference<Object>(tree));
|
||||
return tree;
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
}
|
||||
|
||||
synchronized public <K, V> ConcurrentMap<K, V> createHashMap(String name) {
|
||||
return createHashMap(name, null, null);
|
||||
}
|
||||
|
||||
|
||||
public synchronized <K, V> ConcurrentMap<K, V> createHashMap(String name, Serializer<K> keySerializer, Serializer<V> valueSerializer) {
|
||||
try {
|
||||
assertNameNotExist(name);
|
||||
|
||||
HTree<K, V> tree = new HTree(this, keySerializer, valueSerializer,true);
|
||||
long recid = insert(tree);
|
||||
setNamedObject(name, recid);
|
||||
collections.put(name,new WeakReference<Object>(tree));
|
||||
return tree;
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized <K> Set<K> getHashSet(String name) {
|
||||
Object o = getCollectionInstance(name);
|
||||
if(o!=null)
|
||||
return (Set<K>) o;
|
||||
|
||||
try {
|
||||
long recid = getNamedObject(name);
|
||||
if(recid == 0) return null;
|
||||
|
||||
HTree tree = fetch(recid);
|
||||
tree.setPersistenceContext(this);
|
||||
if(tree.hasValues()){
|
||||
throw new ClassCastException("HashMap is not HashSet");
|
||||
}
|
||||
Set<K> ret = new HTreeSet(tree);
|
||||
collections.put(name,new WeakReference<Object>(ret));
|
||||
return ret;
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized <K> Set<K> createHashSet(String name) {
|
||||
return createHashSet(name, null);
|
||||
}
|
||||
|
||||
public synchronized <K> Set<K> createHashSet(String name, Serializer<K> keySerializer) {
|
||||
try {
|
||||
assertNameNotExist(name);
|
||||
|
||||
HTree<K, Object> tree = new HTree(this, keySerializer, null,false);
|
||||
long recid = insert(tree);
|
||||
setNamedObject(name, recid);
|
||||
|
||||
Set<K> ret = new HTreeSet<K>(tree);
|
||||
collections.put(name,new WeakReference<Object>(ret));
|
||||
return ret;
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
}
|
||||
|
||||
synchronized public <K, V> ConcurrentNavigableMap<K, V> getTreeMap(String name) {
|
||||
Object o = getCollectionInstance(name);
|
||||
if(o!=null)
|
||||
return (ConcurrentNavigableMap<K, V> ) o;
|
||||
|
||||
try {
|
||||
long recid = getNamedObject(name);
|
||||
if(recid == 0) return null;
|
||||
|
||||
BTree t = BTree.<K, V>load(this, recid);
|
||||
if(!t.hasValues())
|
||||
throw new ClassCastException("TreeSet is not TreeMap");
|
||||
ConcurrentNavigableMap<K,V> ret = new BTreeMap<K, V>(t,false); //TODO put readonly flag here
|
||||
collections.put(name,new WeakReference<Object>(ret));
|
||||
return ret;
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
}
|
||||
|
||||
synchronized public <K extends Comparable, V> ConcurrentNavigableMap<K, V> createTreeMap(String name) {
|
||||
return createTreeMap(name, null, null, null);
|
||||
}
|
||||
|
||||
|
||||
public synchronized <K, V> ConcurrentNavigableMap<K, V> createTreeMap(String name,
|
||||
Comparator<K> keyComparator,
|
||||
Serializer<K> keySerializer,
|
||||
Serializer<V> valueSerializer) {
|
||||
try {
|
||||
assertNameNotExist(name);
|
||||
BTree<K, V> tree = BTree.createInstance(this, keyComparator, keySerializer, valueSerializer,true);
|
||||
setNamedObject(name, tree.getRecid());
|
||||
ConcurrentNavigableMap<K,V> ret = new BTreeMap<K, V>(tree,false); //TODO put readonly flag here
|
||||
collections.put(name,new WeakReference<Object>(ret));
|
||||
return ret;
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public synchronized <K> NavigableSet<K> getTreeSet(String name) {
|
||||
Object o = getCollectionInstance(name);
|
||||
if(o!=null)
|
||||
return (NavigableSet<K> ) o;
|
||||
|
||||
try {
|
||||
long recid = getNamedObject(name);
|
||||
if(recid == 0) return null;
|
||||
|
||||
BTree t = BTree.<K, Object>load(this, recid);
|
||||
if(t.hasValues())
|
||||
throw new ClassCastException("TreeMap is not TreeSet");
|
||||
BTreeSet<K> ret = new BTreeSet<K>(new BTreeMap(t,false));
|
||||
collections.put(name,new WeakReference<Object>(ret));
|
||||
return ret;
|
||||
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized <K> NavigableSet<K> createTreeSet(String name) {
|
||||
return createTreeSet(name, null, null);
|
||||
}
|
||||
|
||||
|
||||
public synchronized <K> NavigableSet<K> createTreeSet(String name, Comparator<K> keyComparator, Serializer<K> keySerializer) {
|
||||
try {
|
||||
assertNameNotExist(name);
|
||||
BTree<K, Object> tree = BTree.createInstance(this, keyComparator, keySerializer, null,false);
|
||||
setNamedObject(name, tree.getRecid());
|
||||
BTreeSet<K> ret = new BTreeSet<K>(new BTreeMap(tree,false));
|
||||
collections.put(name,new WeakReference<Object>(ret));
|
||||
return ret;
|
||||
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
synchronized public <K> List<K> createLinkedList(String name) {
|
||||
return createLinkedList(name, null);
|
||||
}
|
||||
|
||||
synchronized public <K> List<K> createLinkedList(String name, Serializer<K> serializer) {
|
||||
try {
|
||||
assertNameNotExist(name);
|
||||
|
||||
//allocate record and overwrite it
|
||||
|
||||
LinkedList2<K> list = new LinkedList2<K>(this, serializer);
|
||||
long recid = insert(list);
|
||||
setNamedObject(name, recid);
|
||||
|
||||
collections.put(name,new WeakReference<Object>(list));
|
||||
|
||||
return list;
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
}
|
||||
|
||||
synchronized public <K> List<K> getLinkedList(String name) {
|
||||
Object o = getCollectionInstance(name);
|
||||
if(o!=null)
|
||||
return (List<K> ) o;
|
||||
|
||||
try {
|
||||
long recid = getNamedObject(name);
|
||||
if(recid == 0) return null;
|
||||
LinkedList2<K> list = (LinkedList2<K>) fetch(recid);
|
||||
list.setPersistenceContext(this);
|
||||
collections.put(name,new WeakReference<Object>(list));
|
||||
return list;
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized Object getCollectionInstance(String name){
|
||||
WeakReference ref = collections.get(name);
|
||||
if(ref==null)return null;
|
||||
Object o = ref.get();
|
||||
if(o != null) return o;
|
||||
//already GCed
|
||||
collections.remove(name);
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
private void assertNameNotExist(String name) throws IOException {
|
||||
if (getNamedObject(name) != 0)
|
||||
throw new IllegalArgumentException("Object with name '" + name + "' already exists");
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Obtain the record id of a named object. Returns 0 if named object
|
||||
* doesn't exist.
|
||||
* Named objects are used to store Map views and other well known objects.
|
||||
*/
|
||||
synchronized protected long getNamedObject(String name) throws IOException{
|
||||
long nameDirectory_recid = getRoot(NAME_DIRECTORY_ROOT);
|
||||
if(nameDirectory_recid == 0){
|
||||
return 0;
|
||||
}
|
||||
HTree<String,Long> m = fetch(nameDirectory_recid);
|
||||
Long res = m.get(name);
|
||||
if(res == null)
|
||||
return 0;
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Set the record id of a named object.
|
||||
* Named objects are used to store Map views and other well known objects.
|
||||
*/
|
||||
synchronized protected void setNamedObject(String name, long recid) throws IOException{
|
||||
long nameDirectory_recid = getRoot(NAME_DIRECTORY_ROOT);
|
||||
HTree<String,Long> m = null;
|
||||
if(nameDirectory_recid == 0){
|
||||
//does not exists, create it
|
||||
m = new HTree<String, Long>(this,null,null,true);
|
||||
nameDirectory_recid = insert(m);
|
||||
setRoot(NAME_DIRECTORY_ROOT,nameDirectory_recid);
|
||||
}else{
|
||||
//fetch it
|
||||
m = fetch(nameDirectory_recid);
|
||||
}
|
||||
m.put(name,recid);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
synchronized public Map<String,Object> getCollections(){
|
||||
try{
|
||||
Map<String,Object> ret = new LinkedHashMap<String, Object>();
|
||||
long nameDirectory_recid = getRoot(NAME_DIRECTORY_ROOT);
|
||||
if(nameDirectory_recid==0)
|
||||
return ret;
|
||||
HTree<String,Long> m = fetch(nameDirectory_recid);
|
||||
|
||||
for(Map.Entry<String,Long> e:m.entrySet()){
|
||||
Object o = fetch(e.getValue());
|
||||
if(o instanceof BTree){
|
||||
if(((BTree) o).hasValues)
|
||||
o = getTreeMap(e.getKey());
|
||||
else
|
||||
o = getTreeSet(e.getKey());
|
||||
}
|
||||
else if( o instanceof HTree){
|
||||
if(((HTree) o).hasValues)
|
||||
o = getHashMap(e.getKey());
|
||||
else
|
||||
o = getHashSet(e.getKey());
|
||||
}
|
||||
|
||||
ret.put(e.getKey(), o);
|
||||
}
|
||||
return Collections.unmodifiableMap(ret);
|
||||
}catch(IOException e){
|
||||
throw new IOError(e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
synchronized public void deleteCollection(String name){
|
||||
try{
|
||||
long nameDirectory_recid = getRoot(NAME_DIRECTORY_ROOT);
|
||||
if(nameDirectory_recid==0)
|
||||
throw new IOException("Collection not found");
|
||||
HTree<String,Long> dir = fetch(nameDirectory_recid);
|
||||
|
||||
Long recid = dir.get(name);
|
||||
if(recid == null) throw new IOException("Collection not found");
|
||||
|
||||
Object o = fetch(recid);
|
||||
//we can not use O instance since it is not correctly initialized
|
||||
if(o instanceof LinkedList2){
|
||||
LinkedList2 l = (LinkedList2) o;
|
||||
l.clear();
|
||||
delete(l.rootRecid);
|
||||
}else if(o instanceof BTree){
|
||||
((BTree) o).clear();
|
||||
} else if( o instanceof HTree){
|
||||
HTree t = (HTree) o;
|
||||
t.clear();
|
||||
HTreeDirectory n = (HTreeDirectory) fetch(t.rootRecid,t.SERIALIZER);
|
||||
n.deleteAllChildren();
|
||||
delete(t.rootRecid);
|
||||
}else{
|
||||
throw new InternalError("unknown collection type: "+(o==null?null:o.getClass()));
|
||||
}
|
||||
delete(recid);
|
||||
collections.remove(name);
|
||||
|
||||
|
||||
dir.remove(name);
|
||||
|
||||
}catch(IOException e){
|
||||
throw new IOError(e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
/** we need to set reference to this DB instance, so serializer needs to be here*/
|
||||
final Serializer<Serialization> defaultSerializationSerializer = new Serializer<Serialization>(){
|
||||
|
||||
public void serialize(DataOutput out, Serialization obj) throws IOException {
|
||||
LongPacker.packLong(out,obj.serialClassInfoRecid);
|
||||
SerialClassInfo.serializer.serialize(out,obj.registered);
|
||||
}
|
||||
|
||||
public Serialization deserialize(DataInput in) throws IOException, ClassNotFoundException {
|
||||
final long recid = LongPacker.unpackLong(in);
|
||||
final ArrayList<SerialClassInfo.ClassInfo> classes = SerialClassInfo.serializer.deserialize(in);
|
||||
return new Serialization(DBAbstract.this,recid,classes);
|
||||
}
|
||||
};
|
||||
|
||||
public synchronized Serializer defaultSerializer() {
|
||||
|
||||
try{
|
||||
long serialClassInfoRecid = getRoot(SERIAL_CLASS_INFO_RECID_ROOT);
|
||||
if (serialClassInfoRecid == 0) {
|
||||
//allocate new recid
|
||||
serialClassInfoRecid = insert(null,Utils.NULL_SERIALIZER,false);
|
||||
//and insert new serializer
|
||||
Serialization ser = new Serialization(this,serialClassInfoRecid,new ArrayList<SerialClassInfo.ClassInfo>());
|
||||
|
||||
update(serialClassInfoRecid,ser, defaultSerializationSerializer);
|
||||
setRoot(SERIAL_CLASS_INFO_RECID_ROOT, serialClassInfoRecid);
|
||||
return ser;
|
||||
}else{
|
||||
return fetch(serialClassInfoRecid,defaultSerializationSerializer);
|
||||
}
|
||||
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
final protected void checkNotClosed(){
|
||||
if(isClosed()) throw new IllegalStateException("db was closed");
|
||||
}
|
||||
|
||||
protected abstract void setRoot(byte root, long recid);
|
||||
protected abstract long getRoot(byte root);
|
||||
|
||||
|
||||
synchronized public long collectionSize(Object collection){
|
||||
if(collection instanceof BTreeMap){
|
||||
BTreeMap t = (BTreeMap) collection;
|
||||
if(t.fromKey!=null|| t.toKey!=null) throw new IllegalArgumentException("collectionSize does not work on BTree submap");
|
||||
return t.tree._entries;
|
||||
}else if(collection instanceof HTree){
|
||||
return ((HTree)collection).getRoot().size;
|
||||
}else if(collection instanceof HTreeSet){
|
||||
return collectionSize(((HTreeSet) collection).map);
|
||||
}else if(collection instanceof BTreeSet){
|
||||
return collectionSize(((BTreeSet) collection).map);
|
||||
}else if(collection instanceof LinkedList2){
|
||||
return ((LinkedList2)collection).getRoot().size;
|
||||
}else{
|
||||
throw new IllegalArgumentException("Not JDBM collection");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void addShutdownHook(){
|
||||
if(shutdownCloseThread!=null){
|
||||
shutdownCloseThread = new ShutdownCloseThread();
|
||||
Runtime.getRuntime().addShutdownHook(shutdownCloseThread);
|
||||
}
|
||||
}
|
||||
|
||||
public void close(){
|
||||
if(shutdownCloseThread!=null){
|
||||
Runtime.getRuntime().removeShutdownHook(shutdownCloseThread);
|
||||
shutdownCloseThread.dbToClose = null;
|
||||
shutdownCloseThread = null;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
ShutdownCloseThread shutdownCloseThread = null;
|
||||
|
||||
private static class ShutdownCloseThread extends Thread{
|
||||
|
||||
DBAbstract dbToClose = null;
|
||||
|
||||
ShutdownCloseThread(){
|
||||
super("JDBM shutdown");
|
||||
}
|
||||
|
||||
public void run(){
|
||||
if(dbToClose!=null && !dbToClose.isClosed()){
|
||||
dbToClose.shutdownCloseThread = null;
|
||||
dbToClose.close();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
synchronized public void rollback() {
|
||||
try {
|
||||
for(WeakReference<Object> o:collections.values()){
|
||||
Object c = o.get();
|
||||
if(c != null && c instanceof BTreeMap){
|
||||
//reload tree
|
||||
BTreeMap m = (BTreeMap) c;
|
||||
m.tree = fetch(m.tree.getRecid());
|
||||
}
|
||||
if(c != null && c instanceof BTreeSet){
|
||||
//reload tree
|
||||
BTreeSet m = (BTreeSet) c;
|
||||
m.map.tree = fetch(m.map.tree.getRecid());
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -0,0 +1,162 @@
|
|||
package org.apache.jdbm;
|
||||
|
||||
import javax.crypto.Cipher;
|
||||
import java.io.IOError;
|
||||
import java.io.IOException;
|
||||
import java.util.Comparator;
|
||||
import java.util.Iterator;
|
||||
|
||||
/**
|
||||
* Abstract class with common cache functionality
|
||||
*/
|
||||
abstract class DBCache extends DBStore{
|
||||
|
||||
static final int NUM_OF_DIRTY_RECORDS_BEFORE_AUTOCOMIT = 1024;
|
||||
|
||||
static final byte NONE = 1;
|
||||
static final byte MRU = 2;
|
||||
static final byte WEAK = 3;
|
||||
static final byte SOFT = 4;
|
||||
static final byte HARD = 5;
|
||||
|
||||
static final class DirtyCacheEntry {
|
||||
long _recid; //TODO recid is already part of _hashDirties, so this field could be removed to save memory
|
||||
Object _obj;
|
||||
Serializer _serializer;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Dirty status of _hash CacheEntry Values
|
||||
*/
|
||||
final protected LongHashMap<DirtyCacheEntry> _hashDirties = new LongHashMap<DirtyCacheEntry>();
|
||||
|
||||
private Serializer cachedDefaultSerializer = null;
|
||||
|
||||
|
||||
/**
|
||||
* Construct a CacheRecordManager wrapping another DB and
|
||||
* using a given cache policy.
|
||||
*/
|
||||
public DBCache(String filename, boolean readonly, boolean transactionDisabled,
|
||||
Cipher cipherIn, Cipher cipherOut, boolean useRandomAccessFile,
|
||||
boolean deleteFilesAfterClose,boolean lockingDisabled){
|
||||
|
||||
super(filename, readonly, transactionDisabled,
|
||||
cipherIn, cipherOut, useRandomAccessFile,
|
||||
deleteFilesAfterClose,lockingDisabled);
|
||||
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public synchronized Serializer defaultSerializer(){
|
||||
if(cachedDefaultSerializer==null)
|
||||
cachedDefaultSerializer = super.defaultSerializer();
|
||||
return cachedDefaultSerializer;
|
||||
}
|
||||
|
||||
@Override
|
||||
boolean needsAutoCommit() {
|
||||
return super.needsAutoCommit()||
|
||||
(transactionsDisabled && !commitInProgress && _hashDirties.size() > NUM_OF_DIRTY_RECORDS_BEFORE_AUTOCOMIT);
|
||||
}
|
||||
|
||||
|
||||
|
||||
public synchronized <A> long insert(final A obj, final Serializer<A> serializer, final boolean disableCache)
|
||||
throws IOException {
|
||||
checkNotClosed();
|
||||
|
||||
if(super.needsAutoCommit())
|
||||
commit();
|
||||
|
||||
if(disableCache)
|
||||
return super.insert(obj, serializer, disableCache);
|
||||
|
||||
|
||||
//prealocate recid so we have something to return
|
||||
final long recid = super.insert(PREALOCATE_OBJ, null, disableCache);
|
||||
|
||||
// super.update(recid, obj,serializer);
|
||||
|
||||
// return super.insert(obj,serializer,disableCache);
|
||||
|
||||
//and create new dirty record for future update
|
||||
final DirtyCacheEntry e = new DirtyCacheEntry();
|
||||
e._recid = recid;
|
||||
e._obj = obj;
|
||||
e._serializer = serializer;
|
||||
_hashDirties.put(recid,e);
|
||||
|
||||
return recid;
|
||||
}
|
||||
|
||||
|
||||
|
||||
public synchronized void commit() {
|
||||
try{
|
||||
commitInProgress = true;
|
||||
updateCacheEntries();
|
||||
super.commit();
|
||||
}finally {
|
||||
commitInProgress = false;
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void rollback(){
|
||||
cachedDefaultSerializer = null;
|
||||
_hashDirties.clear();
|
||||
super.rollback();
|
||||
}
|
||||
|
||||
|
||||
private static final Comparator<DirtyCacheEntry> DIRTY_COMPARATOR = new Comparator<DirtyCacheEntry>() {
|
||||
final public int compare(DirtyCacheEntry o1, DirtyCacheEntry o2) {
|
||||
return (int) (o1._recid - o2._recid);
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Update all dirty cache objects to the underlying DB.
|
||||
*/
|
||||
protected void updateCacheEntries() {
|
||||
try {
|
||||
synchronized(_hashDirties){
|
||||
|
||||
while(!_hashDirties.isEmpty()){
|
||||
//make defensive copy of values as _db.update() may trigger changes in db
|
||||
//and this would modify dirties again
|
||||
DirtyCacheEntry[] vals = new DirtyCacheEntry[_hashDirties.size()];
|
||||
Iterator<DirtyCacheEntry> iter = _hashDirties.valuesIterator();
|
||||
|
||||
for(int i = 0;i<vals.length;i++){
|
||||
vals[i] = iter.next();
|
||||
}
|
||||
iter = null;
|
||||
|
||||
java.util.Arrays.sort(vals,DIRTY_COMPARATOR);
|
||||
|
||||
|
||||
for(int i = 0;i<vals.length;i++){
|
||||
final DirtyCacheEntry entry = vals[i];
|
||||
vals[i] = null;
|
||||
super.update(entry._recid, entry._obj, entry._serializer);
|
||||
_hashDirties.remove(entry._recid);
|
||||
|
||||
}
|
||||
|
||||
//update may have triggered more records to be added into dirties, so repeat until all records are written.
|
||||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,350 @@
|
|||
/*******************************************************************************
|
||||
* Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
******************************************************************************/
|
||||
|
||||
package org.apache.jdbm;
|
||||
|
||||
import javax.crypto.Cipher;
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* A DB wrapping and caching another DB.
|
||||
*
|
||||
* @author Jan Kotek
|
||||
* @author Alex Boisvert
|
||||
* @author Cees de Groot
|
||||
*
|
||||
* TODO add 'cache miss' statistics
|
||||
*/
|
||||
class DBCacheMRU
|
||||
extends DBCache {
|
||||
|
||||
|
||||
private static final boolean debug = false;
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Cached object hashtable
|
||||
*/
|
||||
protected LongHashMap<CacheEntry> _hash;
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Maximum number of objects in the cache.
|
||||
*/
|
||||
protected int _max;
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Beginning of linked-list of cache elements. First entry is element
|
||||
* which has been used least recently.
|
||||
*/
|
||||
protected CacheEntry _first;
|
||||
|
||||
/**
|
||||
* End of linked-list of cache elements. Last entry is element
|
||||
* which has been used most recently.
|
||||
*/
|
||||
protected CacheEntry _last;
|
||||
|
||||
|
||||
/**
|
||||
* Construct a CacheRecordManager wrapping another DB and
|
||||
* using a given cache policy.
|
||||
*/
|
||||
public DBCacheMRU(String filename, boolean readonly, boolean transactionDisabled,
|
||||
Cipher cipherIn, Cipher cipherOut, boolean useRandomAccessFile,
|
||||
boolean deleteFilesAfterClose, int cacheMaxRecords, boolean lockingDisabled) {
|
||||
super(filename, readonly, transactionDisabled,
|
||||
cipherIn, cipherOut, useRandomAccessFile,
|
||||
deleteFilesAfterClose,lockingDisabled);
|
||||
|
||||
_hash = new LongHashMap<CacheEntry>(cacheMaxRecords);
|
||||
_max = cacheMaxRecords;
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
public synchronized <A> A fetch(long recid, Serializer<A> serializer, boolean disableCache) throws IOException {
|
||||
|
||||
if (disableCache)
|
||||
return super.fetch(recid, serializer, disableCache);
|
||||
else
|
||||
return fetch(recid, serializer);
|
||||
}
|
||||
|
||||
|
||||
public synchronized void delete(long recid)
|
||||
throws IOException {
|
||||
checkNotClosed();
|
||||
|
||||
super.delete(recid);
|
||||
synchronized (_hash){
|
||||
CacheEntry entry = _hash.get(recid);
|
||||
if (entry != null) {
|
||||
removeEntry(entry);
|
||||
_hash.remove(entry._recid);
|
||||
}
|
||||
_hashDirties.remove(recid);
|
||||
}
|
||||
|
||||
if(super.needsAutoCommit())
|
||||
commit();
|
||||
|
||||
}
|
||||
|
||||
public synchronized <A> void update(final long recid, final A obj, final Serializer<A> serializer) throws IOException {
|
||||
checkNotClosed();
|
||||
|
||||
synchronized (_hash){
|
||||
|
||||
//remove entry if it already exists
|
||||
CacheEntry entry = cacheGet(recid);
|
||||
if (entry != null) {
|
||||
_hash.remove(recid);
|
||||
removeEntry(entry);
|
||||
}
|
||||
|
||||
//check if entry is in dirties, in this case just update its object
|
||||
DirtyCacheEntry e = _hashDirties.get(recid);
|
||||
if(e!=null){
|
||||
if(recid!=e._recid) throw new Error();
|
||||
e._obj = obj;
|
||||
e._serializer = serializer;
|
||||
return;
|
||||
}
|
||||
|
||||
//create new dirty entry
|
||||
e = new DirtyCacheEntry();
|
||||
e._recid = recid;
|
||||
e._obj = obj;
|
||||
e._serializer = serializer;
|
||||
_hashDirties.put(recid,e);
|
||||
}
|
||||
|
||||
if(super.needsAutoCommit())
|
||||
commit();
|
||||
|
||||
}
|
||||
|
||||
|
||||
public synchronized <A> A fetch(long recid, Serializer<A> serializer)
|
||||
throws IOException {
|
||||
|
||||
checkNotClosed();
|
||||
|
||||
final CacheEntry entry = cacheGet(recid);
|
||||
if (entry != null) {
|
||||
return (A) entry._obj;
|
||||
}
|
||||
|
||||
//check dirties
|
||||
final DirtyCacheEntry entry2 = _hashDirties.get(recid);
|
||||
if(entry2!=null){
|
||||
return (A) entry2._obj;
|
||||
}
|
||||
|
||||
|
||||
|
||||
A value = super.fetch(recid, serializer);
|
||||
|
||||
if(super.needsAutoCommit())
|
||||
commit();
|
||||
|
||||
|
||||
//put record into MRU cache
|
||||
cachePut(recid, value);
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
|
||||
public synchronized void close() {
|
||||
|
||||
if(isClosed())
|
||||
return;
|
||||
|
||||
updateCacheEntries();
|
||||
super.close();
|
||||
_hash = null;
|
||||
}
|
||||
|
||||
|
||||
|
||||
public synchronized void rollback() {
|
||||
|
||||
// discard all cache entries since we don't know which entries
|
||||
// where part of the transaction
|
||||
synchronized (_hash){
|
||||
_hash.clear();
|
||||
_first = null;
|
||||
_last = null;
|
||||
}
|
||||
|
||||
super.rollback();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Obtain an object in the cache
|
||||
*/
|
||||
protected CacheEntry cacheGet(long key) {
|
||||
synchronized (_hash){
|
||||
CacheEntry entry = _hash.get(key);
|
||||
if ( entry != null && _last != entry) {
|
||||
//touch entry
|
||||
removeEntry(entry);
|
||||
addEntry(entry);
|
||||
}
|
||||
return entry;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Place an object in the cache.
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
protected void cachePut(final long recid, final Object value) throws IOException {
|
||||
synchronized (_hash){
|
||||
CacheEntry entry = _hash.get(recid);
|
||||
if (entry != null) {
|
||||
entry._obj = value;
|
||||
//touch entry
|
||||
if (_last != entry) {
|
||||
removeEntry(entry);
|
||||
addEntry(entry);
|
||||
}
|
||||
} else {
|
||||
|
||||
if (_hash.size() >= _max) {
|
||||
// purge and recycle entry
|
||||
entry = purgeEntry();
|
||||
entry._recid = recid;
|
||||
entry._obj = value;
|
||||
} else {
|
||||
entry = new CacheEntry(recid, value);
|
||||
}
|
||||
addEntry(entry);
|
||||
_hash.put(entry._recid, entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a CacheEntry. Entry goes at the end of the list.
|
||||
*/
|
||||
protected void addEntry(CacheEntry entry) {
|
||||
synchronized (_hash){
|
||||
if (_first == null) {
|
||||
_first = entry;
|
||||
_last = entry;
|
||||
} else {
|
||||
_last._next = entry;
|
||||
entry._previous = _last;
|
||||
_last = entry;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Remove a CacheEntry from linked list
|
||||
*/
|
||||
protected void removeEntry(CacheEntry entry) {
|
||||
synchronized (_hash){
|
||||
if (entry == _first) {
|
||||
_first = entry._next;
|
||||
}
|
||||
if (_last == entry) {
|
||||
_last = entry._previous;
|
||||
}
|
||||
CacheEntry previous = entry._previous;
|
||||
CacheEntry next = entry._next;
|
||||
if (previous != null) {
|
||||
previous._next = next;
|
||||
}
|
||||
if (next != null) {
|
||||
next._previous = previous;
|
||||
}
|
||||
entry._previous = null;
|
||||
entry._next = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Purge least recently used object from the cache
|
||||
*
|
||||
* @return recyclable CacheEntry
|
||||
*/
|
||||
protected CacheEntry purgeEntry() {
|
||||
synchronized (_hash){
|
||||
CacheEntry entry = _first;
|
||||
if (entry == null)
|
||||
return new CacheEntry(-1, null);
|
||||
|
||||
removeEntry(entry);
|
||||
_hash.remove(entry._recid);
|
||||
entry._obj = null;
|
||||
return entry;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
static final class CacheEntry {
|
||||
|
||||
protected long _recid;
|
||||
protected Object _obj;
|
||||
|
||||
|
||||
protected CacheEntry _previous;
|
||||
protected CacheEntry _next;
|
||||
|
||||
|
||||
CacheEntry(long recid, Object obj) {
|
||||
_recid = recid;
|
||||
_obj = obj;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
public void clearCache() {
|
||||
if(debug)
|
||||
System.err.println("DBCache: Clear cache");
|
||||
|
||||
// discard all cache entries since we don't know which entries
|
||||
// where part of the transaction
|
||||
synchronized (_hash){
|
||||
_hash.clear();
|
||||
_first = null;
|
||||
_last = null;
|
||||
|
||||
//clear dirties
|
||||
updateCacheEntries();
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,401 @@
|
|||
/*******************************************************************************
|
||||
* Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
******************************************************************************/
|
||||
|
||||
package org.apache.jdbm;
|
||||
|
||||
import javax.crypto.Cipher;
|
||||
import java.io.IOException;
|
||||
import java.lang.ref.ReferenceQueue;
|
||||
import java.lang.ref.SoftReference;
|
||||
import java.lang.ref.WeakReference;
|
||||
import java.util.Iterator;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
/**
|
||||
* A DB wrapping and caching another DB.
|
||||
*
|
||||
* @author Jan Kotek
|
||||
* @author Alex Boisvert
|
||||
* @author Cees de Groot
|
||||
*
|
||||
* TODO add 'cache miss' statistics
|
||||
*/
|
||||
public class DBCacheRef
|
||||
extends DBCache {
|
||||
|
||||
|
||||
|
||||
private static final boolean debug = false;
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* If Soft Cache is enabled, this contains softly referenced clean entries.
|
||||
* If entry became dirty, it is moved to _hash with limited size.
|
||||
* This map is accessed from SoftCache Disposer thread, so all access must be
|
||||
* synchronized
|
||||
*/
|
||||
protected LongHashMap _softHash;
|
||||
|
||||
/**
|
||||
* Reference queue used to collect Soft Cache entries
|
||||
*/
|
||||
protected ReferenceQueue<ReferenceCacheEntry> _refQueue;
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Thread in which Soft Cache references are disposed
|
||||
*/
|
||||
protected Thread _softRefThread;
|
||||
|
||||
protected static AtomicInteger threadCounter = new AtomicInteger(0);
|
||||
|
||||
|
||||
/** counter which counts number of insert since last 'action'*/
|
||||
protected int insertCounter = 0;
|
||||
|
||||
private final boolean _autoClearReferenceCacheOnLowMem;
|
||||
private final byte _cacheType;
|
||||
|
||||
|
||||
/**
|
||||
* Construct a CacheRecordManager wrapping another DB and
|
||||
* using a given cache policy.
|
||||
*/
|
||||
public DBCacheRef(String filename, boolean readonly, boolean transactionDisabled,
|
||||
Cipher cipherIn, Cipher cipherOut, boolean useRandomAccessFile,
|
||||
boolean deleteFilesAfterClose,
|
||||
byte cacheType, boolean cacheAutoClearOnLowMem, boolean lockingDisabled) {
|
||||
|
||||
super(filename, readonly, transactionDisabled,
|
||||
cipherIn, cipherOut, useRandomAccessFile,
|
||||
deleteFilesAfterClose, lockingDisabled);
|
||||
|
||||
|
||||
this._cacheType = cacheType;
|
||||
_autoClearReferenceCacheOnLowMem = cacheAutoClearOnLowMem;
|
||||
|
||||
|
||||
_softHash = new LongHashMap<ReferenceCacheEntry>();
|
||||
_refQueue = new ReferenceQueue<ReferenceCacheEntry>();
|
||||
_softRefThread = new Thread(
|
||||
new SoftRunnable(this, _refQueue),
|
||||
"JDBM Soft Cache Disposer " + (threadCounter.incrementAndGet()));
|
||||
_softRefThread.setDaemon(true);
|
||||
_softRefThread.start();
|
||||
|
||||
}
|
||||
|
||||
|
||||
void clearCacheIfLowOnMem() {
|
||||
|
||||
insertCounter = 0;
|
||||
|
||||
if(!_autoClearReferenceCacheOnLowMem)
|
||||
return;
|
||||
|
||||
Runtime r = Runtime.getRuntime();
|
||||
long max = r.maxMemory();
|
||||
if(max == Long.MAX_VALUE)
|
||||
return;
|
||||
|
||||
double free = r.freeMemory();
|
||||
double total = r.totalMemory();
|
||||
//We believe that free refers to total not max.
|
||||
//Increasing heap size to max would increase to max
|
||||
free = free + (max-total);
|
||||
|
||||
if(debug)
|
||||
System.err.println("DBCache: freemem = " +free + " = "+(free/max)+"%");
|
||||
|
||||
if(free<1e7 || free*4 <max)
|
||||
clearCache();
|
||||
|
||||
|
||||
}
|
||||
|
||||
public synchronized <A> A fetch(long recid, Serializer<A> serializer, boolean disableCache) throws IOException {
|
||||
|
||||
if (disableCache)
|
||||
return super.fetch(recid, serializer, disableCache);
|
||||
else
|
||||
return fetch(recid, serializer);
|
||||
}
|
||||
|
||||
|
||||
public synchronized void delete(long recid)
|
||||
throws IOException {
|
||||
checkNotClosed();
|
||||
|
||||
super.delete(recid);
|
||||
synchronized (_hashDirties){
|
||||
_hashDirties.remove(recid);
|
||||
}
|
||||
synchronized (_softHash) {
|
||||
Object e = _softHash.remove(recid);
|
||||
if (e != null && e instanceof ReferenceCacheEntry) {
|
||||
((ReferenceCacheEntry)e).clear();
|
||||
}
|
||||
}
|
||||
|
||||
if(needsAutoCommit())
|
||||
commit();
|
||||
|
||||
}
|
||||
|
||||
public synchronized <A> void update(final long recid, A obj, Serializer<A> serializer) throws IOException {
|
||||
checkNotClosed();
|
||||
|
||||
|
||||
synchronized (_softHash) {
|
||||
//soft cache can not contain dirty objects
|
||||
Object e = _softHash.remove(recid);
|
||||
if (e != null && e instanceof ReferenceCacheEntry) {
|
||||
((ReferenceCacheEntry)e).clear();
|
||||
}
|
||||
}
|
||||
synchronized (_hashDirties){
|
||||
//put into dirty cache
|
||||
final DirtyCacheEntry e = new DirtyCacheEntry();
|
||||
e._recid = recid;
|
||||
e._obj = obj;
|
||||
e._serializer = serializer;
|
||||
_hashDirties.put(recid,e);
|
||||
}
|
||||
|
||||
if(needsAutoCommit())
|
||||
commit();
|
||||
|
||||
}
|
||||
|
||||
|
||||
public synchronized <A> A fetch(long recid, Serializer<A> serializer)
|
||||
throws IOException {
|
||||
checkNotClosed();
|
||||
|
||||
synchronized (_softHash) {
|
||||
Object e = _softHash.get(recid);
|
||||
if (e != null) {
|
||||
|
||||
if(e instanceof ReferenceCacheEntry)
|
||||
e = ((ReferenceCacheEntry)e).get();
|
||||
if (e != null) {
|
||||
return (A) e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
synchronized (_hashDirties){
|
||||
DirtyCacheEntry e2 = _hashDirties.get(recid);
|
||||
if(e2!=null){
|
||||
return (A) e2._obj;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
A value = super.fetch(recid, serializer);
|
||||
|
||||
if(needsAutoCommit())
|
||||
commit();
|
||||
|
||||
synchronized (_softHash) {
|
||||
|
||||
if (_cacheType == SOFT)
|
||||
_softHash.put(recid, new SoftCacheEntry(recid, value, _refQueue));
|
||||
else if (_cacheType == WEAK)
|
||||
_softHash.put(recid, new WeakCacheEntry(recid, value, _refQueue));
|
||||
else
|
||||
_softHash.put(recid,value);
|
||||
}
|
||||
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
|
||||
public synchronized void close() {
|
||||
checkNotClosed();
|
||||
|
||||
updateCacheEntries();
|
||||
super.close();
|
||||
_softHash = null;
|
||||
_softRefThread.interrupt();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
public synchronized void rollback() {
|
||||
checkNotClosed();
|
||||
|
||||
|
||||
// discard all cache entries since we don't know which entries
|
||||
// where part of the transaction
|
||||
synchronized (_softHash) {
|
||||
Iterator<ReferenceCacheEntry> iter = _softHash.valuesIterator();
|
||||
while (iter.hasNext()) {
|
||||
ReferenceCacheEntry e = iter.next();
|
||||
e.clear();
|
||||
}
|
||||
_softHash.clear();
|
||||
}
|
||||
|
||||
super.rollback();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
protected boolean isCacheEntryDirty(DirtyCacheEntry entry) {
|
||||
return _hashDirties.get(entry._recid) != null;
|
||||
}
|
||||
|
||||
protected void setCacheEntryDirty(DirtyCacheEntry entry, boolean dirty) {
|
||||
if (dirty) {
|
||||
_hashDirties.put(entry._recid, entry);
|
||||
} else {
|
||||
_hashDirties.remove(entry._recid);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
interface ReferenceCacheEntry {
|
||||
long getRecid();
|
||||
|
||||
void clear();
|
||||
|
||||
Object get();
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
static final class SoftCacheEntry extends SoftReference implements ReferenceCacheEntry {
|
||||
protected final long _recid;
|
||||
|
||||
public long getRecid() {
|
||||
return _recid;
|
||||
}
|
||||
|
||||
SoftCacheEntry(long recid, Object obj, ReferenceQueue queue) {
|
||||
super(obj, queue);
|
||||
_recid = recid;
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
static final class WeakCacheEntry extends WeakReference implements ReferenceCacheEntry {
|
||||
protected final long _recid;
|
||||
|
||||
public long getRecid() {
|
||||
return _recid;
|
||||
}
|
||||
|
||||
WeakCacheEntry(long recid, Object obj, ReferenceQueue queue) {
|
||||
super(obj, queue);
|
||||
_recid = recid;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Runs in separate thread and cleans SoftCache.
|
||||
* Runnable auto exists when CacheRecordManager is GCed
|
||||
*
|
||||
* @author Jan Kotek
|
||||
*/
|
||||
static final class SoftRunnable implements Runnable {
|
||||
|
||||
private ReferenceQueue<ReferenceCacheEntry> entryQueue;
|
||||
private WeakReference<DBCacheRef> db2;
|
||||
|
||||
public SoftRunnable(DBCacheRef db,
|
||||
ReferenceQueue<ReferenceCacheEntry> entryQueue) {
|
||||
this.db2 = new WeakReference<DBCacheRef>(db);
|
||||
this.entryQueue = entryQueue;
|
||||
}
|
||||
|
||||
public void run() {
|
||||
while (true) try {
|
||||
|
||||
//collect next item from cache,
|
||||
//limit 10000 ms is to keep periodically checking if db was GCed
|
||||
ReferenceCacheEntry e = (ReferenceCacheEntry) entryQueue.remove(10000);
|
||||
|
||||
//check if db was GCed, cancel in that case
|
||||
DBCacheRef db = db2.get();
|
||||
if (db == null)
|
||||
return;
|
||||
|
||||
if (e != null) {
|
||||
|
||||
synchronized (db._softHash) {
|
||||
int counter = 0;
|
||||
while (e != null) {
|
||||
db._softHash.remove(e.getRecid());
|
||||
e = (SoftCacheEntry) entryQueue.poll();
|
||||
if(debug)
|
||||
counter++;
|
||||
}
|
||||
if(debug)
|
||||
System.err.println("DBCache: "+counter+" objects released from ref cache.");
|
||||
}
|
||||
}else{
|
||||
//check memory consumption every 10 seconds
|
||||
db.clearCacheIfLowOnMem();
|
||||
|
||||
}
|
||||
|
||||
|
||||
} catch (InterruptedException e) {
|
||||
return;
|
||||
} catch (Throwable e) {
|
||||
//this thread must keep spinning,
|
||||
//otherwise SoftCacheEntries would not be disposed
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
public void clearCache() {
|
||||
if(debug)
|
||||
System.err.println("DBCache: Clear cache");
|
||||
|
||||
|
||||
synchronized (_softHash) {
|
||||
if(_cacheType!=HARD){
|
||||
Iterator<ReferenceCacheEntry> iter = _softHash.valuesIterator();
|
||||
while (iter.hasNext()) {
|
||||
ReferenceCacheEntry e = iter.next();
|
||||
e.clear();
|
||||
}
|
||||
}
|
||||
_softHash.clear();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,351 @@
|
|||
/*******************************************************************************
|
||||
* Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
******************************************************************************/
|
||||
|
||||
package org.apache.jdbm;
|
||||
|
||||
import javax.crypto.Cipher;
|
||||
import javax.crypto.SecretKey;
|
||||
import javax.crypto.SecretKeyFactory;
|
||||
import javax.crypto.spec.IvParameterSpec;
|
||||
import javax.crypto.spec.PBEKeySpec;
|
||||
import javax.crypto.spec.SecretKeySpec;
|
||||
import java.io.IOError;
|
||||
import java.security.spec.KeySpec;
|
||||
|
||||
/**
|
||||
* Class used to configure and create DB. It uses builder pattern.
|
||||
*/
|
||||
public class DBMaker {
|
||||
|
||||
private byte cacheType = DBCacheRef.MRU;
|
||||
private int mruCacheSize = 2048;
|
||||
|
||||
private String location = null;
|
||||
|
||||
private boolean disableTransactions = false;
|
||||
private boolean lockingDisabled = false;
|
||||
private boolean readonly = false;
|
||||
private String password = null;
|
||||
private boolean useAES256Bit = true;
|
||||
private boolean useRandomAccessFile = false;
|
||||
private boolean autoClearRefCacheOnLowMem = true;
|
||||
private boolean closeOnJVMExit = false;
|
||||
private boolean deleteFilesAfterCloseFlag = false;
|
||||
|
||||
|
||||
private DBMaker(){}
|
||||
|
||||
/**
|
||||
* Creates new DBMaker and sets file to load data from.
|
||||
* @param file to load data from
|
||||
* @return new DBMaker
|
||||
*/
|
||||
public static DBMaker openFile(String file){
|
||||
DBMaker m = new DBMaker();
|
||||
m.location = file;
|
||||
return m;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates new DBMaker which uses in memory store. Data will be lost after JVM exits.
|
||||
* @return new DBMaker
|
||||
*/
|
||||
public static DBMaker openMemory(){
|
||||
return new DBMaker();
|
||||
}
|
||||
|
||||
/**
|
||||
* Open store in zip file
|
||||
*
|
||||
* @param zip file
|
||||
* @return new DBMaker
|
||||
*/
|
||||
public static DBMaker openZip(String zip) {
|
||||
DBMaker m = new DBMaker();
|
||||
m.location = "$$ZIP$$://"+zip;
|
||||
return m;
|
||||
}
|
||||
|
||||
static String isZipFileLocation(String location){
|
||||
String match = "$$ZIP$$://";
|
||||
if( location.startsWith(match)){
|
||||
return location.substring(match.length());
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Use WeakReference for cache.
|
||||
* This cache does not improve performance much,
|
||||
* but prevents JDBM from creating multiple instances of the same object.
|
||||
*
|
||||
* @return this builder
|
||||
*/
|
||||
public DBMaker enableWeakCache() {
|
||||
cacheType = DBCacheRef.WEAK;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Use SoftReference for cache.
|
||||
* This cache greatly improves performance if you have enoguth memory.
|
||||
* Instances in cache are Garbage Collected when memory gets low
|
||||
*
|
||||
* @return this builder
|
||||
*/
|
||||
public DBMaker enableSoftCache() {
|
||||
cacheType = DBCacheRef.SOFT;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Use hard reference for cache.
|
||||
* This greatly improves performance if there is enought memory
|
||||
* Hard cache has smaller memory overhead then Soft or Weak, because
|
||||
* reference objects and queue does not have to be maintained
|
||||
*
|
||||
* @return this builder
|
||||
*/
|
||||
public DBMaker enableHardCache() {
|
||||
cacheType = DBCacheRef.SOFT;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Use 'Most Recently Used' cache with limited size.
|
||||
* Oldest instances are released from cache when new instances are fetched.
|
||||
* This cache is not cleared by GC. Is good for systems with limited memory.
|
||||
* <p/>
|
||||
* Default size for MRU cache is 2048 records.
|
||||
*
|
||||
* @return this builder
|
||||
*/
|
||||
public DBMaker enableMRUCache() {
|
||||
cacheType = DBCacheRef.MRU;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Sets 'Most Recently Used' cache size. This cache is activated by default with size 2048
|
||||
*
|
||||
* @param cacheSize number of instances which will be kept in cache.
|
||||
* @return this builder
|
||||
*/
|
||||
public DBMaker setMRUCacheSize(int cacheSize) {
|
||||
if (cacheSize < 0) throw new IllegalArgumentException("Cache size is smaller than zero");
|
||||
cacheType = DBCacheRef.MRU;
|
||||
mruCacheSize = cacheSize;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* If reference (soft,weak or hard) cache is enabled,
|
||||
* GC may not release references fast enough (or not at all in case of hard cache).
|
||||
* So JDBM periodically checks amount of free heap memory.
|
||||
* If free memory is less than 25% or 10MB,
|
||||
* JDBM completely clears its reference cache to prevent possible memory issues.
|
||||
* <p>
|
||||
* Calling this method disables auto cache clearing when mem is low.
|
||||
* And of course it can cause some out of memory exceptions.
|
||||
*
|
||||
* @return this builder
|
||||
*/
|
||||
public DBMaker disableCacheAutoClear(){
|
||||
this.autoClearRefCacheOnLowMem = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Enabled storage encryption using AES cipher. JDBM supports both 128 bit and 256 bit encryption if JRE provides it.
|
||||
* There are some restrictions on AES 256 bit and not all JREs have it by default.
|
||||
* <p/>
|
||||
* Storage can not be read (decrypted), unless the key is provided next time it is opened
|
||||
*
|
||||
* @param password used to encrypt store
|
||||
* @param useAES256Bit if true strong AES 256 bit encryption is used. Otherwise more usual AES 128 bit is used.
|
||||
* @return this builder
|
||||
*/
|
||||
public DBMaker enableEncryption(String password, boolean useAES256Bit) {
|
||||
this.password = password;
|
||||
this.useAES256Bit = useAES256Bit;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Make DB readonly.
|
||||
* Update/delete/insert operation will throw 'UnsupportedOperationException'
|
||||
*
|
||||
* @return this builder
|
||||
*/
|
||||
public DBMaker readonly() {
|
||||
readonly = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Disable cache completely
|
||||
*
|
||||
* @return this builder
|
||||
*/
|
||||
public DBMaker disableCache() {
|
||||
cacheType = DBCacheRef.NONE;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Option to disable transaction (to increase performance at the cost of potential data loss).
|
||||
* Transactions are enabled by default
|
||||
* <p/>
|
||||
* Switches off transactioning for the record manager. This means
|
||||
* that a) a transaction log is not kept, and b) writes aren't
|
||||
* synch'ed after every update. Writes are cached in memory and then flushed
|
||||
* to disk every N writes. You may also flush writes manually by calling commit().
|
||||
* This is useful when batch inserting into a new database.
|
||||
* <p/>
|
||||
* When using this, database must be properly closed before JVM shutdown.
|
||||
* Failing to do so may and WILL corrupt store.
|
||||
*
|
||||
* @return this builder
|
||||
*/
|
||||
public DBMaker disableTransactions() {
|
||||
this.disableTransactions = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Disable file system based locking (for file systems that do not support it).
|
||||
*
|
||||
* Locking is not supported by many remote or distributed file systems; such
|
||||
* as Lustre and NFS. Attempts to perform locks will result in an
|
||||
* IOException with the message "Function not implemented".
|
||||
*
|
||||
* Disabling locking will avoid this issue, though of course it comes with
|
||||
* all the issues of uncontrolled file access.
|
||||
*
|
||||
* @return this builder
|
||||
*/
|
||||
public DBMaker disableLocking(){
|
||||
this.lockingDisabled = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* By default JDBM uses mapped memory buffers to read from files.
|
||||
* But this may behave strangely on some platforms.
|
||||
* Safe alternative is to use old RandomAccessFile rather then mapped ByteBuffer.
|
||||
* There is typically slower (pages needs to be copyed into memory on every write).
|
||||
*
|
||||
* @return this builder
|
||||
*/
|
||||
public DBMaker useRandomAccessFile(){
|
||||
this.useRandomAccessFile = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Registers shutdown hook and close database on JVM exit, if it was not already closed;
|
||||
*
|
||||
* @return this builder
|
||||
*/
|
||||
public DBMaker closeOnExit(){
|
||||
this.closeOnJVMExit = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete all storage files after DB is closed
|
||||
*
|
||||
* @return this builder
|
||||
*/
|
||||
public DBMaker deleteFilesAfterClose(){
|
||||
this.deleteFilesAfterCloseFlag = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Opens database with settings earlier specified in this builder.
|
||||
*
|
||||
* @return new DB
|
||||
* @throws java.io.IOError if db could not be opened
|
||||
*/
|
||||
public DB make() {
|
||||
|
||||
Cipher cipherIn = null;
|
||||
Cipher cipherOut = null;
|
||||
if (password != null) try {
|
||||
//initialize ciphers
|
||||
//this code comes from stack owerflow
|
||||
//http://stackoverflow.com/questions/992019/java-256bit-aes-encryption/992413#992413
|
||||
byte[] salt = new byte[]{3, -34, 123, 53, 78, 121, -12, -1, 45, -12, -48, 89, 11, 100, 99, 8};
|
||||
|
||||
SecretKeyFactory factory = SecretKeyFactory.getInstance("PBKDF2WithHmacSHA1");
|
||||
KeySpec spec = new PBEKeySpec(password.toCharArray(), salt, 1024, useAES256Bit?256:128);
|
||||
SecretKey tmp = factory.generateSecret(spec);
|
||||
SecretKey secret = new SecretKeySpec(tmp.getEncoded(), "AES");
|
||||
|
||||
String transform = "AES/CBC/NoPadding";
|
||||
IvParameterSpec params = new IvParameterSpec(salt);
|
||||
|
||||
cipherIn = Cipher.getInstance(transform);
|
||||
cipherIn.init(Cipher.ENCRYPT_MODE, secret, params);
|
||||
|
||||
cipherOut = Cipher.getInstance(transform);
|
||||
cipherOut.init(Cipher.DECRYPT_MODE, secret, params);
|
||||
|
||||
//sanity check, try with page size
|
||||
byte[] data = new byte[Storage.PAGE_SIZE];
|
||||
byte[] encData = cipherIn.doFinal(data);
|
||||
if (encData.length != Storage.PAGE_SIZE)
|
||||
throw new Error("Page size changed after encryption, make sure you use '/NoPadding'");
|
||||
byte[] data2 = cipherOut.doFinal(encData);
|
||||
for (int i = 0; i < data.length; i++) {
|
||||
if (data[i] != data2[i]) throw new Error("Encryption provided by JRE does not work");
|
||||
}
|
||||
|
||||
} catch (Exception e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
|
||||
DBAbstract db = null;
|
||||
|
||||
|
||||
if (cacheType == DBCacheRef.MRU){
|
||||
db = new DBCacheMRU(location, readonly, disableTransactions, cipherIn, cipherOut,useRandomAccessFile,deleteFilesAfterCloseFlag, mruCacheSize,lockingDisabled);
|
||||
}else if( cacheType == DBCacheRef.SOFT || cacheType == DBCacheRef.HARD || cacheType == DBCacheRef.WEAK) {
|
||||
db = new DBCacheRef(location, readonly, disableTransactions, cipherIn, cipherOut,useRandomAccessFile,deleteFilesAfterCloseFlag, cacheType,autoClearRefCacheOnLowMem,lockingDisabled);
|
||||
} else if (cacheType == DBCacheRef.NONE) {
|
||||
db = new DBStore(location, readonly, disableTransactions, cipherIn, cipherOut,useRandomAccessFile,deleteFilesAfterCloseFlag,lockingDisabled);
|
||||
} else {
|
||||
throw new IllegalArgumentException("Unknown cache type: " + cacheType);
|
||||
}
|
||||
|
||||
if(closeOnJVMExit){
|
||||
db.addShutdownHook();
|
||||
}
|
||||
|
||||
return db;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,928 @@
|
|||
/*******************************************************************************
|
||||
* Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
******************************************************************************/
|
||||
|
||||
package org.apache.jdbm;
|
||||
|
||||
|
||||
import javax.crypto.Cipher;
|
||||
import java.io.*;
|
||||
import java.util.*;
|
||||
import java.util.zip.ZipEntry;
|
||||
import java.util.zip.ZipOutputStream;
|
||||
|
||||
/**
|
||||
* This class manages records, which are uninterpreted blobs of data. The
|
||||
* set of operations is simple and straightforward: you communicate with
|
||||
* the class using long "rowids" and byte[] data blocks. Rowids are returned
|
||||
* on inserts and you can stash them away someplace safe to be able to get
|
||||
* back to them. Data blocks can be as long as you wish, and may have
|
||||
* lengths different from the original when updating.
|
||||
* <p/>
|
||||
* Operations are synchronized, so that only one of them will happen
|
||||
* concurrently even if you hammer away from multiple threads. Operations
|
||||
* are made atomic by keeping a transaction log which is recovered after
|
||||
* a crash, so the operations specified by this interface all have ACID
|
||||
* properties.
|
||||
* <p/>
|
||||
* You identify a file by just the name. The package attaches <tt>.db</tt>
|
||||
* for the database file, and <tt>.lg</tt> for the transaction log. The
|
||||
* transaction log is synchronized regularly and then restarted, so don't
|
||||
* worry if you see the size going up and down.
|
||||
*
|
||||
* @author Alex Boisvert
|
||||
* @author Cees de Groot
|
||||
*/
|
||||
class DBStore
|
||||
extends DBAbstract {
|
||||
|
||||
|
||||
/**
|
||||
* Version of storage. It should be safe to open lower versions, but engine should throw exception
|
||||
* while opening new versions (as it contains unsupported features or serialization)
|
||||
*/
|
||||
static final long STORE_FORMAT_VERSION = 1L;
|
||||
|
||||
/**
|
||||
* Underlying file for store records.
|
||||
*/
|
||||
private PageFile _file;
|
||||
|
||||
/**
|
||||
* Page manager for physical manager.
|
||||
*/
|
||||
private PageManager _pageman;
|
||||
|
||||
/**
|
||||
* Physical row identifier manager.
|
||||
*/
|
||||
private PhysicalRowIdManager _physMgr;
|
||||
|
||||
/**
|
||||
* Indicated that store is opened for readonly operations
|
||||
* If true, store will throw UnsupportedOperationException when update/insert/delete operation is called
|
||||
*/
|
||||
private final boolean readonly;
|
||||
final boolean transactionsDisabled;
|
||||
private final boolean deleteFilesAfterClose;
|
||||
|
||||
private static final int AUTOCOMMIT_AFTER_N_PAGES = 1024 * 5;
|
||||
|
||||
|
||||
boolean commitInProgress = false;
|
||||
|
||||
|
||||
/**
|
||||
* cipher used for decryption, may be null
|
||||
*/
|
||||
private Cipher cipherOut;
|
||||
/**
|
||||
* cipher used for encryption, may be null
|
||||
*/
|
||||
private Cipher cipherIn;
|
||||
private boolean useRandomAccessFile;
|
||||
private boolean lockingDisabled;
|
||||
|
||||
void checkCanWrite() {
|
||||
if (readonly)
|
||||
throw new UnsupportedOperationException("Could not write, store is opened as read-only");
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Logigal to Physical row identifier manager.
|
||||
*/
|
||||
private LogicalRowIdManager _logicMgr;
|
||||
|
||||
|
||||
/**
|
||||
* Static debugging flag
|
||||
*/
|
||||
public static final boolean DEBUG = false;
|
||||
|
||||
|
||||
|
||||
static final long PREALOCATE_PHYS_RECID = Short.MIN_VALUE;
|
||||
|
||||
static final Object PREALOCATE_OBJ = new Object();
|
||||
|
||||
|
||||
|
||||
|
||||
private final DataInputOutput buffer = new DataInputOutput();
|
||||
private boolean bufferInUse = false;
|
||||
|
||||
|
||||
private final String _filename;
|
||||
|
||||
public DBStore(String filename, boolean readonly, boolean transactionDisabled, boolean lockingDisabled) throws IOException {
|
||||
this(filename, readonly, transactionDisabled, null, null, false,false,false);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Creates a record manager for the indicated file
|
||||
*
|
||||
* @throws IOException when the file cannot be opened or is not
|
||||
* a valid file content-wise.
|
||||
*/
|
||||
public DBStore(String filename, boolean readonly, boolean transactionDisabled,
|
||||
Cipher cipherIn, Cipher cipherOut, boolean useRandomAccessFile,
|
||||
boolean deleteFilesAfterClose, boolean lockingDisabled){
|
||||
_filename = filename;
|
||||
this.readonly = readonly;
|
||||
this.transactionsDisabled = transactionDisabled;
|
||||
this.cipherIn = cipherIn;
|
||||
this.cipherOut = cipherOut;
|
||||
this.useRandomAccessFile = useRandomAccessFile;
|
||||
this.deleteFilesAfterClose = deleteFilesAfterClose;
|
||||
this.lockingDisabled = lockingDisabled;
|
||||
reopen();
|
||||
}
|
||||
|
||||
|
||||
private void reopen() {
|
||||
try{
|
||||
_file = new PageFile(_filename, readonly, transactionsDisabled, cipherIn, cipherOut,useRandomAccessFile,lockingDisabled);
|
||||
_pageman = new PageManager(_file);
|
||||
_physMgr = new PhysicalRowIdManager(_file, _pageman);
|
||||
|
||||
_logicMgr = new LogicalRowIdManager(_file, _pageman);
|
||||
|
||||
long versionNumber = getRoot(STORE_VERSION_NUMBER_ROOT);
|
||||
if (versionNumber > STORE_FORMAT_VERSION)
|
||||
throw new IOException("Unsupported version of store. Please update JDBM. Minimal supported ver:" + STORE_FORMAT_VERSION + ", store ver:" + versionNumber);
|
||||
if (!readonly)
|
||||
setRoot(STORE_VERSION_NUMBER_ROOT, STORE_FORMAT_VERSION);
|
||||
}catch(IOException e){
|
||||
throw new IOError(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Closes the record manager.
|
||||
*
|
||||
* @throws IOException when one of the underlying I/O operations fails.
|
||||
*/
|
||||
public synchronized void close() {
|
||||
checkNotClosed();
|
||||
try {
|
||||
super.close();
|
||||
_pageman.close();
|
||||
_file.close();
|
||||
if(deleteFilesAfterClose)
|
||||
_file.storage.deleteAllFiles();
|
||||
|
||||
_pageman = null;
|
||||
|
||||
|
||||
_file = null;
|
||||
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isClosed() {
|
||||
return _pageman==null;
|
||||
}
|
||||
|
||||
|
||||
public synchronized <A> long insert(final A obj, final Serializer<A> serializer, final boolean disableCache)
|
||||
throws IOException {
|
||||
checkNotClosed();
|
||||
checkCanWrite();
|
||||
|
||||
if (needsAutoCommit()) {
|
||||
commit();
|
||||
}
|
||||
|
||||
|
||||
if (bufferInUse) {
|
||||
//current reusable buffer is in use, have to fallback into creating new instances
|
||||
DataInputOutput buffer2 = new DataInputOutput();
|
||||
return insert2(obj, serializer, buffer2);
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
bufferInUse = true;
|
||||
return insert2(obj, serializer, buffer);
|
||||
} finally {
|
||||
bufferInUse = false;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
boolean needsAutoCommit() {
|
||||
return transactionsDisabled && !commitInProgress &&
|
||||
(_file.getDirtyPageCount() >= AUTOCOMMIT_AFTER_N_PAGES );
|
||||
}
|
||||
|
||||
|
||||
private <A> long insert2(A obj, Serializer<A> serializer, DataInputOutput buf)
|
||||
throws IOException {
|
||||
buf.reset();
|
||||
|
||||
|
||||
long physRowId;
|
||||
if(obj==PREALOCATE_OBJ){
|
||||
//if inserted record is PREALOCATE_OBJ , it gets special handling.
|
||||
//it is inserted only into _logicMgr with special value to indicate null
|
||||
//this is used to preallocate recid for lazy inserts in cache
|
||||
physRowId = PREALOCATE_PHYS_RECID;
|
||||
}else{
|
||||
serializer.serialize(buf, obj);
|
||||
if(buf.getPos()>RecordHeader.MAX_RECORD_SIZE){
|
||||
throw new IllegalArgumentException("Too big record. JDBM only supports record size up to: "+RecordHeader.MAX_RECORD_SIZE+" bytes. Record size was: "+buf.getPos());
|
||||
}
|
||||
physRowId = _physMgr.insert(buf.getBuf(), 0, buf.getPos());
|
||||
}
|
||||
final long recid = _logicMgr.insert(physRowId);
|
||||
|
||||
if (DEBUG) {
|
||||
System.out.println("BaseRecordManager.insert() recid " + recid + " length " + buf.getPos());
|
||||
}
|
||||
|
||||
return compressRecid(recid);
|
||||
}
|
||||
|
||||
|
||||
public synchronized void delete(long logRowId)
|
||||
throws IOException {
|
||||
|
||||
checkNotClosed();
|
||||
checkCanWrite();
|
||||
if (logRowId <= 0) {
|
||||
throw new IllegalArgumentException("Argument 'recid' is invalid: "
|
||||
+ logRowId);
|
||||
}
|
||||
|
||||
if (needsAutoCommit()) {
|
||||
commit();
|
||||
}
|
||||
|
||||
if (DEBUG) {
|
||||
System.out.println("BaseRecordManager.delete() recid " + logRowId);
|
||||
}
|
||||
|
||||
logRowId = decompressRecid(logRowId);
|
||||
|
||||
long physRowId = _logicMgr.fetch(logRowId);
|
||||
_logicMgr.delete(logRowId);
|
||||
if(physRowId!=PREALOCATE_PHYS_RECID){
|
||||
_physMgr.free(physRowId);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public synchronized <A> void update(long recid, A obj, Serializer<A> serializer)
|
||||
throws IOException {
|
||||
checkNotClosed();
|
||||
checkCanWrite();
|
||||
if (recid <= 0) {
|
||||
throw new IllegalArgumentException("Argument 'recid' is invalid: "
|
||||
+ recid);
|
||||
}
|
||||
|
||||
if (needsAutoCommit()) {
|
||||
commit();
|
||||
}
|
||||
|
||||
if (bufferInUse) {
|
||||
//current reusable buffer is in use, have to create new instances
|
||||
DataInputOutput buffer2 = new DataInputOutput();
|
||||
update2(recid, obj, serializer, buffer2);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
bufferInUse = true;
|
||||
update2(recid, obj, serializer, buffer);
|
||||
} finally {
|
||||
bufferInUse = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private <A> void update2(long logRecid, final A obj, final Serializer<A> serializer, final DataInputOutput buf)
|
||||
throws IOException {
|
||||
|
||||
logRecid = decompressRecid(logRecid);
|
||||
|
||||
long physRecid = _logicMgr.fetch(logRecid);
|
||||
if (physRecid == 0)
|
||||
throw new IOException("Can not update, recid does not exist: " + logRecid);
|
||||
buf.reset();
|
||||
serializer.serialize(buf, obj);
|
||||
|
||||
|
||||
if (DEBUG) {
|
||||
System.out.println("BaseRecordManager.update() recid " + logRecid + " length " + buf.getPos());
|
||||
}
|
||||
|
||||
long newRecid =
|
||||
physRecid!=PREALOCATE_PHYS_RECID?
|
||||
_physMgr.update(physRecid, buf.getBuf(), 0, buf.getPos()):
|
||||
//previous record was only virtual and does not actually exist, so make new insert
|
||||
_physMgr.insert(buf.getBuf(),0,buf.getPos());
|
||||
|
||||
_logicMgr.update(logRecid, newRecid);
|
||||
|
||||
}
|
||||
|
||||
|
||||
public synchronized <A> A fetch(final long recid, final Serializer<A> serializer)
|
||||
throws IOException {
|
||||
|
||||
checkNotClosed();
|
||||
if (recid <= 0) {
|
||||
throw new IllegalArgumentException("Argument 'recid' is invalid: " + recid);
|
||||
}
|
||||
|
||||
if (bufferInUse) {
|
||||
//current reusable buffer is in use, have to create new instances
|
||||
DataInputOutput buffer2 = new DataInputOutput();
|
||||
return fetch2(recid, serializer, buffer2);
|
||||
}
|
||||
try {
|
||||
bufferInUse = true;
|
||||
return fetch2(recid, serializer, buffer);
|
||||
} finally {
|
||||
bufferInUse = false;
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized <A> A fetch(long recid, Serializer<A> serializer, boolean disableCache) throws IOException {
|
||||
//we dont have any cache, so can ignore disableCache parameter
|
||||
return fetch(recid, serializer);
|
||||
}
|
||||
|
||||
|
||||
private <A> A fetch2(long recid, final Serializer<A> serializer, final DataInputOutput buf)
|
||||
throws IOException {
|
||||
|
||||
recid = decompressRecid(recid);
|
||||
|
||||
buf.reset();
|
||||
long physLocation = _logicMgr.fetch(recid);
|
||||
if (physLocation == 0) {
|
||||
//throw new IOException("Record not found, recid: "+recid);
|
||||
return null;
|
||||
}
|
||||
if(physLocation == PREALOCATE_PHYS_RECID){
|
||||
throw new InternalError("cache should prevent this!");
|
||||
}
|
||||
|
||||
_physMgr.fetch(buf, physLocation);
|
||||
|
||||
if (DEBUG) {
|
||||
System.out.println("BaseRecordManager.fetch() recid " + recid + " length " + buf.getPos());
|
||||
}
|
||||
buf.resetForReading();
|
||||
try {
|
||||
return serializer.deserialize(buf); //TODO there should be write limit to throw EOFException
|
||||
} catch (ClassNotFoundException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
}
|
||||
|
||||
byte[] fetchRaw(long recid) throws IOException {
|
||||
recid = decompressRecid(recid);
|
||||
long physLocation = _logicMgr.fetch(recid);
|
||||
if (physLocation == 0) {
|
||||
//throw new IOException("Record not found, recid: "+recid);
|
||||
return null;
|
||||
}
|
||||
DataInputOutput i = new DataInputOutput();
|
||||
_physMgr.fetch(i, physLocation);
|
||||
return i.toByteArray();
|
||||
}
|
||||
|
||||
|
||||
public synchronized long getRoot(final byte id){
|
||||
checkNotClosed();
|
||||
|
||||
return _pageman.getFileHeader().fileHeaderGetRoot(id);
|
||||
}
|
||||
|
||||
|
||||
public synchronized void setRoot(final byte id, final long rowid){
|
||||
checkNotClosed();
|
||||
checkCanWrite();
|
||||
|
||||
_pageman.getFileHeader().fileHeaderSetRoot(id, rowid);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
public synchronized void commit() {
|
||||
try {
|
||||
commitInProgress = true;
|
||||
checkNotClosed();
|
||||
checkCanWrite();
|
||||
/** flush free phys rows into pages*/
|
||||
_physMgr.commit();
|
||||
_logicMgr.commit();
|
||||
|
||||
/**commit pages */
|
||||
_pageman.commit();
|
||||
|
||||
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}finally {
|
||||
commitInProgress= false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public synchronized void rollback() {
|
||||
if (transactionsDisabled)
|
||||
throw new IllegalAccessError("Transactions are disabled, can not rollback");
|
||||
|
||||
try {
|
||||
checkNotClosed();
|
||||
_physMgr.rollback();
|
||||
_logicMgr.rollback();
|
||||
_pageman.rollback();
|
||||
|
||||
super.rollback();
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void copyToZip(String zipFile) {
|
||||
try {
|
||||
String zip = zipFile;
|
||||
String zip2 = "db";
|
||||
ZipOutputStream z = new ZipOutputStream(new FileOutputStream(zip));
|
||||
|
||||
//copy zero pages
|
||||
{
|
||||
String file = zip2 + 0;
|
||||
z.putNextEntry(new ZipEntry(file));
|
||||
z.write(Utils.encrypt(cipherIn, _pageman.getHeaderBufData()));
|
||||
z.closeEntry();
|
||||
}
|
||||
|
||||
//iterate over pages and create new file for each
|
||||
for (long pageid = _pageman.getFirst(Magic.TRANSLATION_PAGE);
|
||||
pageid != 0;
|
||||
pageid = _pageman.getNext(pageid)
|
||||
) {
|
||||
PageIo page = _file.get(pageid);
|
||||
String file = zip2 + pageid;
|
||||
z.putNextEntry(new ZipEntry(file));
|
||||
z.write(Utils.encrypt(cipherIn, page.getData()));
|
||||
z.closeEntry();
|
||||
_file.release(page);
|
||||
}
|
||||
for (long pageid = _pageman.getFirst(Magic.FREELOGIDS_PAGE);
|
||||
pageid != 0;
|
||||
pageid = _pageman.getNext(pageid)
|
||||
) {
|
||||
PageIo page = _file.get(pageid);
|
||||
String file = zip2 + pageid;
|
||||
z.putNextEntry(new ZipEntry(file));
|
||||
z.write(Utils.encrypt(cipherIn, page.getData()));
|
||||
z.closeEntry();
|
||||
_file.release(page);
|
||||
}
|
||||
|
||||
for (long pageid = _pageman.getFirst(Magic.USED_PAGE);
|
||||
pageid != 0;
|
||||
pageid = _pageman.getNext(pageid)
|
||||
) {
|
||||
PageIo page = _file.get(pageid);
|
||||
String file = zip2 + pageid;
|
||||
z.putNextEntry(new ZipEntry(file));
|
||||
z.write(Utils.encrypt(cipherIn, page.getData()));
|
||||
z.closeEntry();
|
||||
_file.release(page);
|
||||
}
|
||||
for (long pageid = _pageman.getFirst(Magic.FREEPHYSIDS_PAGE);
|
||||
pageid != 0;
|
||||
pageid = _pageman.getNext(pageid)
|
||||
) {
|
||||
PageIo page = _file.get(pageid);
|
||||
String file = zip2 + pageid;
|
||||
z.putNextEntry(new ZipEntry(file));
|
||||
z.write(Utils.encrypt(cipherIn, page.getData()));
|
||||
z.closeEntry();
|
||||
_file.release(page);
|
||||
}
|
||||
for (long pageid = _pageman.getFirst(Magic.FREEPHYSIDS_ROOT_PAGE);
|
||||
pageid != 0;
|
||||
pageid = _pageman.getNext(pageid)
|
||||
) {
|
||||
PageIo page = _file.get(pageid);
|
||||
String file = zip2 + pageid;
|
||||
z.putNextEntry(new ZipEntry(file));
|
||||
z.write(Utils.encrypt(cipherIn, page.getData()));
|
||||
z.closeEntry();
|
||||
_file.release(page);
|
||||
}
|
||||
|
||||
z.close();
|
||||
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
public synchronized void clearCache() {
|
||||
//no cache
|
||||
}
|
||||
|
||||
|
||||
private long statisticsCountPages(short pageType) throws IOException {
|
||||
long pageCounter = 0;
|
||||
|
||||
for (long pageid = _pageman.getFirst(pageType);
|
||||
pageid != 0;
|
||||
pageid = _pageman.getNext(pageid)
|
||||
) {
|
||||
pageCounter++;
|
||||
}
|
||||
|
||||
|
||||
return pageCounter;
|
||||
|
||||
}
|
||||
|
||||
public synchronized String calculateStatistics() {
|
||||
checkNotClosed();
|
||||
|
||||
try {
|
||||
|
||||
final StringBuilder b = new StringBuilder();
|
||||
|
||||
//count pages
|
||||
{
|
||||
|
||||
b.append("PAGES:\n");
|
||||
long total = 0;
|
||||
long pages = statisticsCountPages(Magic.USED_PAGE);
|
||||
total += pages;
|
||||
b.append(" " + pages + " used pages with size " + Utils.formatSpaceUsage(pages * Storage.PAGE_SIZE) + "\n");
|
||||
pages = statisticsCountPages(Magic.TRANSLATION_PAGE);
|
||||
total += pages;
|
||||
b.append(" " + pages + " record translation pages with size " + Utils.formatSpaceUsage(pages * Storage.PAGE_SIZE) + "\n");
|
||||
pages = statisticsCountPages(Magic.FREE_PAGE);
|
||||
total += pages;
|
||||
b.append(" " + pages + " free (unused) pages with size " + Utils.formatSpaceUsage(pages * Storage.PAGE_SIZE) + "\n");
|
||||
pages = statisticsCountPages(Magic.FREEPHYSIDS_PAGE);
|
||||
total += pages;
|
||||
b.append(" " + pages + " free (phys) pages with size " + Utils.formatSpaceUsage(pages * Storage.PAGE_SIZE) + "\n");
|
||||
pages = statisticsCountPages(Magic.FREELOGIDS_PAGE);
|
||||
total += pages;
|
||||
b.append(" " + pages + " free (logical) pages with size " + Utils.formatSpaceUsage(pages * Storage.PAGE_SIZE) + "\n");
|
||||
b.append(" Total number of pages is " + total + " with size " + Utils.formatSpaceUsage(total * Storage.PAGE_SIZE) + "\n");
|
||||
|
||||
}
|
||||
{
|
||||
b.append("RECORDS:\n");
|
||||
|
||||
long recordCount = 0;
|
||||
long freeRecordCount = 0;
|
||||
long maximalRecordSize = 0;
|
||||
long maximalAvailSizeDiff = 0;
|
||||
long totalRecordSize = 0;
|
||||
long totalAvailDiff = 0;
|
||||
|
||||
//count records
|
||||
for (long pageid = _pageman.getFirst(Magic.TRANSLATION_PAGE);
|
||||
pageid != 0;
|
||||
pageid = _pageman.getNext(pageid)
|
||||
) {
|
||||
PageIo io = _file.get(pageid);
|
||||
|
||||
for (int i = 0; i < _logicMgr.ELEMS_PER_PAGE; i += 1) {
|
||||
final int pos = Magic.PAGE_HEADER_SIZE + i * Magic.PhysicalRowId_SIZE;
|
||||
final long physLoc = io.pageHeaderGetLocation((short) pos);
|
||||
|
||||
if (physLoc == 0) {
|
||||
freeRecordCount++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if(physLoc == PREALOCATE_PHYS_RECID){
|
||||
continue;
|
||||
}
|
||||
|
||||
recordCount++;
|
||||
|
||||
//get size
|
||||
PageIo page = _file.get(physLoc>>> Storage.PAGE_SIZE_SHIFT);
|
||||
final short physOffset =(short) (physLoc & Storage.OFFSET_MASK);
|
||||
int availSize = RecordHeader.getAvailableSize(page, physOffset);
|
||||
int currentSize = RecordHeader.getCurrentSize(page, physOffset);
|
||||
_file.release(page);
|
||||
|
||||
maximalAvailSizeDiff = Math.max(maximalAvailSizeDiff, availSize - currentSize);
|
||||
maximalRecordSize = Math.max(maximalRecordSize, currentSize);
|
||||
totalAvailDiff += availSize - currentSize;
|
||||
totalRecordSize += currentSize;
|
||||
|
||||
}
|
||||
_file.release(io);
|
||||
}
|
||||
|
||||
b.append(" Contains " + recordCount + " records and " + freeRecordCount + " free slots.\n");
|
||||
b.append(" Total space occupied by data is " + Utils.formatSpaceUsage(totalRecordSize) + "\n");
|
||||
b.append(" Average data size in record is " + Utils.formatSpaceUsage(Math.round(1D * totalRecordSize / recordCount)) + "\n");
|
||||
b.append(" Maximal data size in record is " + Utils.formatSpaceUsage(maximalRecordSize) + "\n");
|
||||
b.append(" Space wasted in record fragmentation is " + Utils.formatSpaceUsage(totalAvailDiff) + "\n");
|
||||
b.append(" Maximal space wasted in single record fragmentation is " + Utils.formatSpaceUsage(maximalAvailSizeDiff) + "\n");
|
||||
}
|
||||
|
||||
return b.toString();
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void defrag(boolean sortCollections) {
|
||||
|
||||
try {
|
||||
checkNotClosed();
|
||||
checkCanWrite();
|
||||
commit();
|
||||
final String filename2 = _filename + "_defrag" + System.currentTimeMillis();
|
||||
final String filename1 = _filename;
|
||||
DBStore db2 = new DBStore(filename2, false, true, cipherIn, cipherOut, false,false,false);
|
||||
|
||||
//recreate logical file with original page layout
|
||||
{
|
||||
//find minimal logical pageid (logical pageids are negative)
|
||||
LongHashMap<String> logicalPages = new LongHashMap<String>();
|
||||
long minpageid = 0;
|
||||
for (long pageid = _pageman.getFirst(Magic.TRANSLATION_PAGE);
|
||||
pageid != 0;
|
||||
pageid = _pageman.getNext(pageid)
|
||||
) {
|
||||
minpageid = Math.min(minpageid, pageid);
|
||||
logicalPages.put(pageid, Utils.EMPTY_STRING);
|
||||
}
|
||||
|
||||
//fill second db with logical pages
|
||||
long pageCounter = 0;
|
||||
for (
|
||||
long pageid = db2._pageman.allocate(Magic.TRANSLATION_PAGE);
|
||||
pageid >= minpageid;
|
||||
pageid = db2._pageman.allocate(Magic.TRANSLATION_PAGE)
|
||||
) {
|
||||
pageCounter++;
|
||||
if (pageCounter % 1000 == 0)
|
||||
db2.commit();
|
||||
}
|
||||
|
||||
logicalPages = null;
|
||||
}
|
||||
|
||||
|
||||
//reinsert collections so physical records are located near each other
|
||||
//iterate over named object recids, it is sorted with TreeSet
|
||||
if(sortCollections){
|
||||
long nameRecid = getRoot(NAME_DIRECTORY_ROOT);
|
||||
Collection<Long> recids = new TreeSet<Long>();
|
||||
if(nameRecid!=0){
|
||||
HTree<String,Long> m = fetch(nameRecid);
|
||||
recids.addAll(m.values());
|
||||
}
|
||||
|
||||
for (Long namedRecid : recids) {
|
||||
Object obj = fetch(namedRecid);
|
||||
if (obj instanceof LinkedList) {
|
||||
LinkedList2.defrag(namedRecid, this, db2);
|
||||
} else if (obj instanceof HTree) {
|
||||
HTree.defrag(namedRecid, this, db2);
|
||||
} else if (obj instanceof BTree) {
|
||||
BTree.defrag(namedRecid, this, db2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
for (long pageid = _pageman.getFirst(Magic.TRANSLATION_PAGE);
|
||||
pageid != 0;
|
||||
pageid = _pageman.getNext(pageid)
|
||||
) {
|
||||
PageIo io = _file.get(pageid);
|
||||
|
||||
for (int i = 0; i < _logicMgr.ELEMS_PER_PAGE; i += 1) {
|
||||
final int pos = Magic.PAGE_HEADER_SIZE + i * Magic.PhysicalRowId_SIZE;
|
||||
if (pos > Short.MAX_VALUE)
|
||||
throw new Error();
|
||||
|
||||
//write to new file
|
||||
final long logicalRowId = ((-pageid) << Storage.PAGE_SIZE_SHIFT) + (long) pos;
|
||||
|
||||
//read from logical location in second db,
|
||||
//check if record was already inserted as part of collections
|
||||
if (db2._pageman.getLast(Magic.TRANSLATION_PAGE) <= pageid &&
|
||||
db2._logicMgr.fetch(logicalRowId) != 0) {
|
||||
//yes, this record already exists in second db
|
||||
continue;
|
||||
}
|
||||
|
||||
//get physical location in this db
|
||||
final long physRowId = io.pageHeaderGetLocation((short) pos);
|
||||
|
||||
if (physRowId == 0)
|
||||
continue;
|
||||
|
||||
|
||||
if (physRowId == PREALOCATE_PHYS_RECID){
|
||||
db2._logicMgr.forceInsert(logicalRowId, physRowId);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
|
||||
//read from physical location at this db
|
||||
DataInputOutput b = new DataInputOutput();
|
||||
_physMgr.fetch(b, physRowId);
|
||||
byte[] bb = b.toByteArray();
|
||||
|
||||
//force insert into other file, without decompressing logical id to external form
|
||||
long physLoc = db2._physMgr.insert(bb, 0, bb.length);
|
||||
db2._logicMgr.forceInsert(logicalRowId, physLoc);
|
||||
|
||||
}
|
||||
_file.release(io);
|
||||
db2.commit();
|
||||
}
|
||||
for(byte b = 0;b<Magic.FILE_HEADER_NROOTS;b++){
|
||||
db2.setRoot(b, getRoot(b));
|
||||
}
|
||||
|
||||
db2.close();
|
||||
_pageman.close();
|
||||
_file.close();
|
||||
|
||||
|
||||
List<File> filesToDelete = new ArrayList<File>();
|
||||
//now rename old files
|
||||
String[] exts = {StorageDiskMapped.IDR, StorageDiskMapped.DBR};
|
||||
for (String ext : exts) {
|
||||
String f1 = filename1 + ext;
|
||||
String f2 = filename2 + "_OLD" + ext;
|
||||
|
||||
//first rename transaction log
|
||||
File f1t = new File(f1 + StorageDisk.transaction_log_file_extension);
|
||||
File f2t = new File(f2 + StorageDisk.transaction_log_file_extension);
|
||||
f1t.renameTo(f2t);
|
||||
filesToDelete.add(f2t);
|
||||
|
||||
//rename data files, iterate until file exist
|
||||
for (int i = 0; ; i++) {
|
||||
File f1d = new File(f1 + "." + i);
|
||||
if (!f1d.exists()) break;
|
||||
File f2d = new File(f2 + "." + i);
|
||||
f1d.renameTo(f2d);
|
||||
filesToDelete.add(f2d);
|
||||
}
|
||||
}
|
||||
|
||||
//rename new files
|
||||
for (String ext : exts) {
|
||||
String f1 = filename2 + ext;
|
||||
String f2 = filename1 + ext;
|
||||
|
||||
//first rename transaction log
|
||||
File f1t = new File(f1 + StorageDisk.transaction_log_file_extension);
|
||||
File f2t = new File(f2 + StorageDisk.transaction_log_file_extension);
|
||||
f1t.renameTo(f2t);
|
||||
|
||||
//rename data files, iterate until file exist
|
||||
for (int i = 0; ; i++) {
|
||||
File f1d = new File(f1 + "." + i);
|
||||
if (!f1d.exists()) break;
|
||||
File f2d = new File(f2 + "." + i);
|
||||
f1d.renameTo(f2d);
|
||||
}
|
||||
}
|
||||
|
||||
for (File d : filesToDelete) {
|
||||
d.delete();
|
||||
}
|
||||
|
||||
|
||||
reopen();
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Insert data at forced logicalRowId, use only for defragmentation !!
|
||||
*
|
||||
* @param logicalRowId
|
||||
* @param data
|
||||
* @throws IOException
|
||||
*/
|
||||
void forceInsert(long logicalRowId, byte[] data) throws IOException {
|
||||
logicalRowId = decompressRecid(logicalRowId);
|
||||
|
||||
if (needsAutoCommit()) {
|
||||
commit();
|
||||
}
|
||||
|
||||
long physLoc = _physMgr.insert(data, 0, data.length);
|
||||
_logicMgr.forceInsert(logicalRowId, physLoc);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Returns number of records stored in database.
|
||||
* Is used for unit tests
|
||||
*/
|
||||
long countRecords() throws IOException {
|
||||
long counter = 0;
|
||||
|
||||
long page = _pageman.getFirst(Magic.TRANSLATION_PAGE);
|
||||
while (page != 0) {
|
||||
PageIo io = _file.get(page);
|
||||
for (int i = 0; i < _logicMgr.ELEMS_PER_PAGE; i += 1) {
|
||||
int pos = Magic.PAGE_HEADER_SIZE + i * Magic.PhysicalRowId_SIZE;
|
||||
if (pos > Short.MAX_VALUE)
|
||||
throw new Error();
|
||||
|
||||
//get physical location
|
||||
long physRowId = io.pageHeaderGetLocation((short) pos);
|
||||
|
||||
if (physRowId != 0)
|
||||
counter += 1;
|
||||
}
|
||||
_file.release(io);
|
||||
page = _pageman.getNext(page);
|
||||
}
|
||||
return counter;
|
||||
}
|
||||
|
||||
|
||||
private static int COMPRESS_RECID_PAGE_SHIFT = Integer.MIN_VALUE;
|
||||
static{
|
||||
int shift = 1;
|
||||
while((1<<shift) <LogicalRowIdManager.ELEMS_PER_PAGE )
|
||||
shift++;
|
||||
COMPRESS_RECID_PAGE_SHIFT = shift;
|
||||
}
|
||||
|
||||
|
||||
private final static long COMPRESS_RECID_OFFSET_MASK = 0xFFFFFFFFFFFFFFFFL >>> (64- COMPRESS_RECID_PAGE_SHIFT);
|
||||
|
||||
|
||||
/**
|
||||
* Compress recid from physical form (block - offset) to (block - slot).
|
||||
* This way resulting number is smaller and can be easier packed with LongPacker
|
||||
*/
|
||||
static long compressRecid(final long recid) {
|
||||
final long page = recid>>> Storage.PAGE_SIZE_SHIFT;
|
||||
short offset = (short) (recid & Storage.OFFSET_MASK);
|
||||
|
||||
offset = (short) (offset - Magic.PAGE_HEADER_SIZE);
|
||||
if (offset % Magic.PhysicalRowId_SIZE != 0)
|
||||
throw new InternalError("recid not dividable "+Magic.PhysicalRowId_SIZE);
|
||||
long slot = offset / Magic.PhysicalRowId_SIZE;
|
||||
|
||||
return (page << COMPRESS_RECID_PAGE_SHIFT) + slot;
|
||||
|
||||
}
|
||||
|
||||
static long decompressRecid(final long recid) {
|
||||
|
||||
final long page = recid >>> COMPRESS_RECID_PAGE_SHIFT;
|
||||
final short offset = (short) ((recid & COMPRESS_RECID_OFFSET_MASK) * Magic.PhysicalRowId_SIZE + Magic.PAGE_HEADER_SIZE);
|
||||
return (page << Storage.PAGE_SIZE_SHIFT) + (long) offset;
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,297 @@
|
|||
package org.apache.jdbm;
|
||||
|
||||
import java.io.*;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
* Utility class which implements DataInput and DataOutput on top of byte[] buffer
|
||||
* with minimal overhead
|
||||
*
|
||||
* @author Jan Kotek
|
||||
*/
|
||||
class DataInputOutput implements DataInput, DataOutput, ObjectInput, ObjectOutput {
|
||||
|
||||
private int pos = 0;
|
||||
private int count = 0;
|
||||
private byte[] buf;
|
||||
|
||||
|
||||
public DataInputOutput() {
|
||||
buf = new byte[8];
|
||||
}
|
||||
|
||||
public DataInputOutput(byte[] data) {
|
||||
buf = data;
|
||||
count = data.length;
|
||||
}
|
||||
|
||||
public byte[] getBuf() {
|
||||
return buf;
|
||||
}
|
||||
|
||||
public int getPos() {
|
||||
return pos;
|
||||
}
|
||||
|
||||
|
||||
public void reset() {
|
||||
pos = 0;
|
||||
count = 0;
|
||||
}
|
||||
|
||||
|
||||
public void resetForReading() {
|
||||
count = pos;
|
||||
pos = 0;
|
||||
}
|
||||
|
||||
public void reset(byte[] b) {
|
||||
pos = 0;
|
||||
buf = b;
|
||||
count = b.length;
|
||||
}
|
||||
|
||||
public byte[] toByteArray() {
|
||||
byte[] d = new byte[pos];
|
||||
System.arraycopy(buf, 0, d, 0, pos);
|
||||
return d;
|
||||
}
|
||||
|
||||
public int available() {
|
||||
return count - pos;
|
||||
}
|
||||
|
||||
|
||||
public void readFully(byte[] b) throws IOException {
|
||||
readFully(b, 0, b.length);
|
||||
}
|
||||
|
||||
public void readFully(byte[] b, int off, int len) throws IOException {
|
||||
System.arraycopy(buf, pos, b, off, len);
|
||||
pos += len;
|
||||
}
|
||||
|
||||
public int skipBytes(int n) throws IOException {
|
||||
pos += n;
|
||||
return n;
|
||||
}
|
||||
|
||||
public boolean readBoolean() throws IOException {
|
||||
return buf[pos++] == 1;
|
||||
}
|
||||
|
||||
public byte readByte() throws IOException {
|
||||
return buf[pos++];
|
||||
}
|
||||
|
||||
public int readUnsignedByte() throws IOException {
|
||||
return buf[pos++] & 0xff;
|
||||
}
|
||||
|
||||
public short readShort() throws IOException {
|
||||
return (short)
|
||||
(((short) (buf[pos++] & 0xff) << 8) |
|
||||
((short) (buf[pos++] & 0xff) << 0));
|
||||
|
||||
}
|
||||
|
||||
public int readUnsignedShort() throws IOException {
|
||||
return (((int) (buf[pos++] & 0xff) << 8) |
|
||||
((int) (buf[pos++] & 0xff) << 0));
|
||||
}
|
||||
|
||||
public char readChar() throws IOException {
|
||||
return (char) readInt();
|
||||
}
|
||||
|
||||
public int readInt() throws IOException {
|
||||
return
|
||||
(((buf[pos++] & 0xff) << 24) |
|
||||
((buf[pos++] & 0xff) << 16) |
|
||||
((buf[pos++] & 0xff) << 8) |
|
||||
((buf[pos++] & 0xff) << 0));
|
||||
|
||||
}
|
||||
|
||||
public long readLong() throws IOException {
|
||||
return
|
||||
(((long) (buf[pos++] & 0xff) << 56) |
|
||||
((long) (buf[pos++] & 0xff) << 48) |
|
||||
((long) (buf[pos++] & 0xff) << 40) |
|
||||
((long) (buf[pos++] & 0xff) << 32) |
|
||||
((long) (buf[pos++] & 0xff) << 24) |
|
||||
((long) (buf[pos++] & 0xff) << 16) |
|
||||
((long) (buf[pos++] & 0xff) << 8) |
|
||||
((long) (buf[pos++] & 0xff) << 0));
|
||||
|
||||
}
|
||||
|
||||
public float readFloat() throws IOException {
|
||||
return Float.intBitsToFloat(readInt());
|
||||
}
|
||||
|
||||
public double readDouble() throws IOException {
|
||||
return Double.longBitsToDouble(readLong());
|
||||
}
|
||||
|
||||
public String readLine() throws IOException {
|
||||
return readUTF();
|
||||
}
|
||||
|
||||
public String readUTF() throws IOException {
|
||||
return Serialization.deserializeString(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* make sure there will be enought space in buffer to write N bytes
|
||||
*/
|
||||
private void ensureAvail(int n) {
|
||||
if (pos + n >= buf.length) {
|
||||
int newSize = Math.max(pos + n, buf.length * 2);
|
||||
buf = Arrays.copyOf(buf, newSize);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
public void write(int b) throws IOException {
|
||||
ensureAvail(1);
|
||||
buf[pos++] = (byte) b;
|
||||
}
|
||||
|
||||
public void write(byte[] b) throws IOException {
|
||||
write(b, 0, b.length);
|
||||
}
|
||||
|
||||
public void write(byte[] b, int off, int len) throws IOException {
|
||||
ensureAvail(len);
|
||||
System.arraycopy(b, off, buf, pos, len);
|
||||
pos += len;
|
||||
}
|
||||
|
||||
public void writeBoolean(boolean v) throws IOException {
|
||||
ensureAvail(1);
|
||||
buf[pos++] = (byte) (v ? 1 : 0);
|
||||
}
|
||||
|
||||
public void writeByte(int v) throws IOException {
|
||||
ensureAvail(1);
|
||||
buf[pos++] = (byte) (v);
|
||||
}
|
||||
|
||||
public void writeShort(int v) throws IOException {
|
||||
ensureAvail(2);
|
||||
buf[pos++] = (byte) (0xff & (v >> 8));
|
||||
buf[pos++] = (byte) (0xff & (v >> 0));
|
||||
|
||||
}
|
||||
|
||||
public void writeChar(int v) throws IOException {
|
||||
writeInt(v);
|
||||
}
|
||||
|
||||
public void writeInt(int v) throws IOException {
|
||||
ensureAvail(4);
|
||||
buf[pos++] = (byte) (0xff & (v >> 24));
|
||||
buf[pos++] = (byte) (0xff & (v >> 16));
|
||||
buf[pos++] = (byte) (0xff & (v >> 8));
|
||||
buf[pos++] = (byte) (0xff & (v >> 0));
|
||||
|
||||
}
|
||||
|
||||
public void writeLong(long v) throws IOException {
|
||||
ensureAvail(8);
|
||||
buf[pos++] = (byte) (0xff & (v >> 56));
|
||||
buf[pos++] = (byte) (0xff & (v >> 48));
|
||||
buf[pos++] = (byte) (0xff & (v >> 40));
|
||||
buf[pos++] = (byte) (0xff & (v >> 32));
|
||||
buf[pos++] = (byte) (0xff & (v >> 24));
|
||||
buf[pos++] = (byte) (0xff & (v >> 16));
|
||||
buf[pos++] = (byte) (0xff & (v >> 8));
|
||||
buf[pos++] = (byte) (0xff & (v >> 0));
|
||||
}
|
||||
|
||||
public void writeFloat(float v) throws IOException {
|
||||
ensureAvail(4);
|
||||
writeInt(Float.floatToIntBits(v));
|
||||
}
|
||||
|
||||
public void writeDouble(double v) throws IOException {
|
||||
ensureAvail(8);
|
||||
writeLong(Double.doubleToLongBits(v));
|
||||
}
|
||||
|
||||
public void writeBytes(String s) throws IOException {
|
||||
writeUTF(s);
|
||||
}
|
||||
|
||||
public void writeChars(String s) throws IOException {
|
||||
writeUTF(s);
|
||||
}
|
||||
|
||||
public void writeUTF(String s) throws IOException {
|
||||
Serialization.serializeString(this, s);
|
||||
}
|
||||
|
||||
/** helper method to write data directly from PageIo*/
|
||||
public void writeFromByteBuffer(ByteBuffer b, int offset, int length) {
|
||||
ensureAvail(length);
|
||||
b.position(offset);
|
||||
b.get(buf,pos,length);
|
||||
pos+=length;
|
||||
}
|
||||
|
||||
|
||||
//temp var used for Externalizable
|
||||
SerialClassInfo serializer;
|
||||
//temp var used for Externalizable
|
||||
Serialization.FastArrayList objectStack;
|
||||
|
||||
public Object readObject() throws ClassNotFoundException, IOException {
|
||||
//is here just to implement ObjectInput
|
||||
//Fake method which reads data from serializer.
|
||||
//We could probably implement separate wrapper for this, but I want to safe class space
|
||||
return serializer.deserialize(this, objectStack);
|
||||
}
|
||||
|
||||
public int read() throws IOException {
|
||||
//is here just to implement ObjectInput
|
||||
return readUnsignedByte();
|
||||
}
|
||||
|
||||
public int read(byte[] b) throws IOException {
|
||||
//is here just to implement ObjectInput
|
||||
readFully(b);
|
||||
return b.length;
|
||||
}
|
||||
|
||||
public int read(byte[] b, int off, int len) throws IOException {
|
||||
//is here just to implement ObjectInput
|
||||
readFully(b,off,len);
|
||||
return len;
|
||||
}
|
||||
|
||||
public long skip(long n) throws IOException {
|
||||
//is here just to implement ObjectInput
|
||||
pos += n;
|
||||
return n;
|
||||
}
|
||||
|
||||
public void close() throws IOException {
|
||||
//is here just to implement ObjectInput
|
||||
//do nothing
|
||||
}
|
||||
|
||||
public void writeObject(Object obj) throws IOException {
|
||||
//is here just to implement ObjectOutput
|
||||
serializer.serialize(this,obj,objectStack);
|
||||
}
|
||||
|
||||
|
||||
public void flush() throws IOException {
|
||||
//is here just to implement ObjectOutput
|
||||
//do nothing
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,215 @@
|
|||
///*
|
||||
//package org.apache.jdbm;
|
||||
//
|
||||
//import java.io.DataInput;
|
||||
//import java.io.DataOutput;
|
||||
//import java.io.IOException;
|
||||
//import java.nio.Buffer;
|
||||
//import java.nio.ByteBuffer;
|
||||
//import java.util.Arrays;
|
||||
//
|
||||
//*/
|
||||
///**
|
||||
// * Utility class which implements DataInput and DataOutput on top of ByteBuffer
|
||||
// * with minimal overhead
|
||||
// * This class is not used, is left here in case we would ever need it.
|
||||
// *
|
||||
// * @author Jan Kotek
|
||||
// *//*
|
||||
//
|
||||
//class DataInputOutput2 implements DataInput, DataOutput {
|
||||
//
|
||||
// private ByteBuffer buf;
|
||||
//
|
||||
//
|
||||
// public DataInputOutput2() {
|
||||
// buf = ByteBuffer.allocate(8);
|
||||
// }
|
||||
//
|
||||
// public DataInputOutput2(ByteBuffer data) {
|
||||
// buf = data;
|
||||
// }
|
||||
//
|
||||
// public DataInputOutput2(byte[] data) {
|
||||
// buf = ByteBuffer.wrap(data);
|
||||
// }
|
||||
//
|
||||
//
|
||||
// public int getPos() {
|
||||
// return buf.position();
|
||||
// }
|
||||
//
|
||||
//
|
||||
// public void reset() {
|
||||
// buf.rewind();
|
||||
// }
|
||||
//
|
||||
//
|
||||
// public void reset(byte[] b) {
|
||||
// buf = ByteBuffer.wrap(b);
|
||||
// }
|
||||
//
|
||||
// public void resetForReading() {
|
||||
// buf.flip();
|
||||
// }
|
||||
//
|
||||
//
|
||||
// public byte[] toByteArray() {
|
||||
// byte[] d = new byte[buf.position()];
|
||||
// buf.position(0);
|
||||
// buf.get(d); //reading N bytes restores to current position
|
||||
//
|
||||
// return d;
|
||||
// }
|
||||
//
|
||||
// public int available() {
|
||||
// return buf.remaining();
|
||||
// }
|
||||
//
|
||||
//
|
||||
// public void readFully(byte[] b) throws IOException {
|
||||
// readFully(b, 0, b.length);
|
||||
// }
|
||||
//
|
||||
// public void readFully(byte[] b, int off, int len) throws IOException {
|
||||
// buf.get(b,off,len);
|
||||
// }
|
||||
//
|
||||
// public int skipBytes(int n) throws IOException {
|
||||
// buf.position(buf.position()+n);
|
||||
// return n;
|
||||
// }
|
||||
//
|
||||
// public boolean readBoolean() throws IOException {
|
||||
// return buf.get()==1;
|
||||
// }
|
||||
//
|
||||
// public byte readByte() throws IOException {
|
||||
// return buf.get();
|
||||
// }
|
||||
//
|
||||
// public int readUnsignedByte() throws IOException {
|
||||
// return buf.get() & 0xff;
|
||||
// }
|
||||
//
|
||||
// public short readShort() throws IOException {
|
||||
// return buf.getShort();
|
||||
// }
|
||||
//
|
||||
// public int readUnsignedShort() throws IOException {
|
||||
// return (((int) (buf.get() & 0xff) << 8) |
|
||||
// ((int) (buf.get() & 0xff) << 0));
|
||||
// }
|
||||
//
|
||||
// public char readChar() throws IOException {
|
||||
// return (char) readInt();
|
||||
// }
|
||||
//
|
||||
// public int readInt() throws IOException {
|
||||
// return buf.getInt();
|
||||
// }
|
||||
//
|
||||
// public long readLong() throws IOException {
|
||||
// return buf.getLong();
|
||||
// }
|
||||
//
|
||||
// public float readFloat() throws IOException {
|
||||
// return buf.getFloat();
|
||||
// }
|
||||
//
|
||||
// public double readDouble() throws IOException {
|
||||
// return buf.getDouble();
|
||||
// }
|
||||
//
|
||||
// public String readLine() throws IOException {
|
||||
// return readUTF();
|
||||
// }
|
||||
//
|
||||
// public String readUTF() throws IOException {
|
||||
// return Serialization.deserializeString(this);
|
||||
// }
|
||||
//
|
||||
// */
|
||||
///**
|
||||
// * make sure there will be enough space in buffer to write N bytes
|
||||
// *//*
|
||||
//
|
||||
// private void ensureAvail(int n) {
|
||||
// int pos = buf.position();
|
||||
// if (pos + n >= buf.limit()) {
|
||||
// int newSize = Math.max(pos + n, buf.limit() * 2);
|
||||
// byte[] b = new byte[newSize];
|
||||
// buf.get(b);
|
||||
// buf = ByteBuffer.wrap(b);
|
||||
// buf.position(pos);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
//
|
||||
// public void write(final int b) throws IOException {
|
||||
// ensureAvail(1);
|
||||
// buf.put((byte) b);
|
||||
// }
|
||||
//
|
||||
// public void write(final byte[] b) throws IOException {
|
||||
// write(b, 0, b.length);
|
||||
// }
|
||||
//
|
||||
// public void write(final byte[] b, final int off, final int len) throws IOException {
|
||||
// ensureAvail(len);
|
||||
// buf.put(b,off,len);
|
||||
// }
|
||||
//
|
||||
// public void writeBoolean(final boolean v) throws IOException {
|
||||
// ensureAvail(1);
|
||||
// buf.put((byte) (v?1:0));
|
||||
// }
|
||||
//
|
||||
// public void writeByte(final int v) throws IOException {
|
||||
// ensureAvail(1);
|
||||
// buf.put((byte) v);
|
||||
// }
|
||||
//
|
||||
// public void writeShort(final short v) throws IOException {
|
||||
// ensureAvail(2);
|
||||
// buf.putShort(v);
|
||||
// }
|
||||
//
|
||||
// public void writeChar(final int v) throws IOException {
|
||||
// writeInt(v);
|
||||
// }
|
||||
//
|
||||
// public void writeInt(final int v) throws IOException {
|
||||
// ensureAvail(4);
|
||||
// buf.putInt(v);
|
||||
// }
|
||||
//
|
||||
// public void writeLong(final long v) throws IOException {
|
||||
// ensureAvail(8);
|
||||
// buf.putLong(v);
|
||||
// }
|
||||
//
|
||||
// public void writeFloat(final float v) throws IOException {
|
||||
// ensureAvail(4);
|
||||
// buf.putFloat(v);
|
||||
// }
|
||||
//
|
||||
// public void writeDouble(final double v) throws IOException {
|
||||
// ensureAvail(8);
|
||||
// buf.putDouble(v);
|
||||
// }
|
||||
//
|
||||
// public void writeBytes(String s) throws IOException {
|
||||
// writeUTF(s);
|
||||
// }
|
||||
//
|
||||
// public void writeChars(String s) throws IOException {
|
||||
// writeUTF(s);
|
||||
// }
|
||||
//
|
||||
// public void writeUTF(String s) throws IOException {
|
||||
// Serialization.serializeString(this, s);
|
||||
// }
|
||||
//
|
||||
//}
|
||||
//*/
|
|
@ -0,0 +1,542 @@
|
|||
/*******************************************************************************
|
||||
* Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
******************************************************************************/
|
||||
|
||||
package org.apache.jdbm;
|
||||
|
||||
import java.io.*;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.concurrent.locks.ReadWriteLock;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
/**
|
||||
* Persistent HashMap implementation for DB.
|
||||
* Implemented as an H*Tree structure.
|
||||
*
|
||||
* @author Alex Boisvert
|
||||
* @author Jan Kotek
|
||||
*/
|
||||
class HTree<K, V> extends AbstractMap<K, V> implements ConcurrentMap<K, V> {
|
||||
|
||||
final Serializer SERIALIZER = new Serializer<Object>() {
|
||||
|
||||
public Object deserialize(DataInput ds2) throws IOException {
|
||||
DataInputOutput ds = (DataInputOutput) ds2;
|
||||
try {
|
||||
int i = ds.readUnsignedByte();
|
||||
if (i == SerializationHeader.HTREE_BUCKET) { //is HashBucket?
|
||||
HTreeBucket ret = new HTreeBucket(HTree.this);
|
||||
if (loadValues)
|
||||
ret.readExternal(ds);
|
||||
|
||||
if (loadValues && ds.available() != 0)
|
||||
throw new InternalError("bytes left: " + ds.available());
|
||||
return ret;
|
||||
} else if (i == SerializationHeader.HTREE_DIRECTORY) {
|
||||
HTreeDirectory ret = new HTreeDirectory(HTree.this);
|
||||
ret.readExternal(ds);
|
||||
if (loadValues && ds.available() != 0)
|
||||
throw new InternalError("bytes left: " + ds.available());
|
||||
return ret;
|
||||
} else {
|
||||
throw new InternalError("Wrong HTree header: " + i);
|
||||
}
|
||||
} catch (ClassNotFoundException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void serialize(DataOutput out, Object obj) throws IOException {
|
||||
if (obj instanceof HTreeBucket) {
|
||||
out.write(SerializationHeader.HTREE_BUCKET);
|
||||
HTreeBucket b = (HTreeBucket) obj;
|
||||
b.writeExternal(out);
|
||||
} else {
|
||||
out.write(SerializationHeader.HTREE_DIRECTORY);
|
||||
HTreeDirectory n = (HTreeDirectory) obj;
|
||||
n.writeExternal(out);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
final protected ReadWriteLock lock = new ReentrantReadWriteLock();
|
||||
|
||||
/**
|
||||
* Listeners which are notified about changes in records
|
||||
*/
|
||||
protected RecordListener[] recordListeners = new RecordListener[0];
|
||||
|
||||
/**
|
||||
* Serializer used to serialize index keys (optional)
|
||||
*/
|
||||
protected Serializer<K> keySerializer;
|
||||
|
||||
|
||||
/**
|
||||
* Serializer used to serialize index values (optional)
|
||||
*/
|
||||
protected Serializer<V> valueSerializer;
|
||||
protected boolean readonly = false;
|
||||
final long rootRecid;
|
||||
DBAbstract db;
|
||||
/** if false map contains only keys, used for set*/
|
||||
boolean hasValues = true;
|
||||
|
||||
/**
|
||||
* counts structural changes in tree at runtume. Is here to support fail-fast behaviour.
|
||||
*/
|
||||
int modCount;
|
||||
|
||||
/**
|
||||
* indicates if values should be loaded during deserialization, set to true during defragmentation
|
||||
*/
|
||||
private boolean loadValues = true;
|
||||
|
||||
public Serializer<K> getKeySerializer() {
|
||||
return keySerializer;
|
||||
}
|
||||
|
||||
public Serializer<V> getValueSerializer() {
|
||||
return valueSerializer;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* cache writing buffer, so it does not have to be allocated on each write
|
||||
*/
|
||||
AtomicReference<DataInputOutput> writeBufferCache = new AtomicReference<DataInputOutput>();
|
||||
|
||||
|
||||
/**
|
||||
* Create a persistent hashtable.
|
||||
*/
|
||||
public HTree(DBAbstract db, Serializer<K> keySerializer, Serializer<V> valueSerializer, boolean hasValues)
|
||||
throws IOException {
|
||||
this.keySerializer = keySerializer;
|
||||
this.valueSerializer = valueSerializer;
|
||||
this.db = db;
|
||||
this.hasValues = hasValues;
|
||||
|
||||
HTreeDirectory<K, V> root = new HTreeDirectory<K, V>(this, (byte) 0);
|
||||
root.setPersistenceContext(0);
|
||||
this.rootRecid = db.insert(root, this.SERIALIZER,false);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Load a persistent hashtable
|
||||
*/
|
||||
public HTree(DBAbstract db,long rootRecid, Serializer<K> keySerializer, Serializer<V> valueSerializer, boolean hasValues)
|
||||
throws IOException {
|
||||
this.db = db;
|
||||
this.rootRecid = rootRecid;
|
||||
this.keySerializer = keySerializer;
|
||||
this.valueSerializer = valueSerializer;
|
||||
this.hasValues = hasValues;
|
||||
}
|
||||
|
||||
void setPersistenceContext(DBAbstract db) {
|
||||
this.db = db;
|
||||
}
|
||||
|
||||
|
||||
public V put(K key, V value) {
|
||||
if (readonly)
|
||||
throw new UnsupportedOperationException("readonly");
|
||||
lock.writeLock().lock();
|
||||
|
||||
try {
|
||||
if (key == null || value == null)
|
||||
throw new NullPointerException("Null key or value");
|
||||
|
||||
V oldVal = (V) getRoot().put(key, value);
|
||||
if (oldVal == null) {
|
||||
modCount++;
|
||||
|
||||
//increase size
|
||||
HTreeDirectory root = getRoot();
|
||||
root.size++;
|
||||
db.update(rootRecid,root,SERIALIZER);
|
||||
|
||||
for (RecordListener<K, V> r : recordListeners)
|
||||
r.recordInserted(key, value);
|
||||
} else {
|
||||
|
||||
//notify listeners
|
||||
for (RecordListener<K, V> r : recordListeners)
|
||||
r.recordUpdated(key, oldVal, value);
|
||||
}
|
||||
|
||||
return oldVal;
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}finally {
|
||||
lock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public V get(Object key) {
|
||||
if (key == null)
|
||||
return null;
|
||||
lock.readLock().lock();
|
||||
try {
|
||||
return getRoot().get((K) key);
|
||||
} catch (ClassCastException e) {
|
||||
return null;
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}finally {
|
||||
lock.readLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public V remove(Object key) {
|
||||
if (readonly)
|
||||
throw new UnsupportedOperationException("readonly");
|
||||
|
||||
lock.writeLock().lock();
|
||||
try {
|
||||
if (key == null)
|
||||
return null;
|
||||
|
||||
V val = (V) getRoot().remove(key);
|
||||
modCount++;
|
||||
|
||||
|
||||
if (val != null){
|
||||
//decrease size
|
||||
HTreeDirectory root = getRoot();
|
||||
root.size--;
|
||||
db.update(rootRecid,root,SERIALIZER);
|
||||
|
||||
|
||||
for (RecordListener r : recordListeners)
|
||||
r.recordRemoved(key, val);
|
||||
}
|
||||
|
||||
return val;
|
||||
} catch (ClassCastException e) {
|
||||
return null;
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}finally {
|
||||
lock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
public boolean containsKey(Object key) {
|
||||
if (key == null)
|
||||
return false;
|
||||
//no need for locking, get is already locked
|
||||
V v = get((K) key);
|
||||
return v != null;
|
||||
}
|
||||
|
||||
public void clear() {
|
||||
lock.writeLock().lock();
|
||||
try {
|
||||
Iterator<K> keyIter = keys();
|
||||
while (keyIter.hasNext()) {
|
||||
keyIter.next();
|
||||
keyIter.remove();
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}finally {
|
||||
lock.writeLock().unlock();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns an enumeration of the keys contained in this
|
||||
*/
|
||||
public Iterator<K> keys()
|
||||
throws IOException {
|
||||
lock.readLock().lock();
|
||||
try{
|
||||
return getRoot().keys();
|
||||
}finally {
|
||||
lock.readLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public DBAbstract getRecordManager() {
|
||||
return db;
|
||||
}
|
||||
|
||||
/**
|
||||
* add RecordListener which is notified about record changes
|
||||
*
|
||||
* @param listener
|
||||
*/
|
||||
public void addRecordListener(RecordListener<K, V> listener) {
|
||||
recordListeners = Arrays.copyOf(recordListeners, recordListeners.length + 1);
|
||||
recordListeners[recordListeners.length - 1] = listener;
|
||||
}
|
||||
|
||||
/**
|
||||
* remove RecordListener which is notified about record changes
|
||||
*
|
||||
* @param listener
|
||||
*/
|
||||
public void removeRecordListener(RecordListener<K, V> listener) {
|
||||
List l = Arrays.asList(recordListeners);
|
||||
l.remove(listener);
|
||||
recordListeners = (RecordListener[]) l.toArray(new RecordListener[1]);
|
||||
}
|
||||
|
||||
|
||||
public Set<Entry<K, V>> entrySet() {
|
||||
return _entrySet;
|
||||
}
|
||||
|
||||
private Set<Entry<K, V>> _entrySet = new AbstractSet<Entry<K, V>>() {
|
||||
|
||||
protected Entry<K, V> newEntry(K k, V v) {
|
||||
return new SimpleEntry<K, V>(k, v) {
|
||||
private static final long serialVersionUID = 978651696969194154L;
|
||||
|
||||
public V setValue(V arg0) {
|
||||
//put is already locked
|
||||
HTree.this.put(getKey(), arg0);
|
||||
return super.setValue(arg0);
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
public boolean add(java.util.Map.Entry<K, V> e) {
|
||||
if (readonly)
|
||||
throw new UnsupportedOperationException("readonly");
|
||||
if (e.getKey() == null)
|
||||
throw new NullPointerException("Can not add null key");
|
||||
lock.writeLock().lock();
|
||||
try{
|
||||
if (e.getValue().equals(get(e.getKey())))
|
||||
return false;
|
||||
HTree.this.put(e.getKey(), e.getValue());
|
||||
return true;
|
||||
}finally {
|
||||
lock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public boolean contains(Object o) {
|
||||
if (o instanceof Entry) {
|
||||
Entry<K, V> e = (java.util.Map.Entry<K, V>) o;
|
||||
|
||||
//get is already locked
|
||||
if (e.getKey() != null && HTree.this.get(e.getKey()) != null)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
public Iterator<java.util.Map.Entry<K, V>> iterator() {
|
||||
try {
|
||||
final Iterator<K> br = keys();
|
||||
return new Iterator<Entry<K, V>>() {
|
||||
|
||||
public boolean hasNext() {
|
||||
return br.hasNext();
|
||||
}
|
||||
|
||||
public java.util.Map.Entry<K, V> next() {
|
||||
K k = br.next();
|
||||
return newEntry(k, get(k));
|
||||
}
|
||||
|
||||
public void remove() {
|
||||
if (readonly)
|
||||
throw new UnsupportedOperationException("readonly");
|
||||
br.remove();
|
||||
}
|
||||
};
|
||||
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public boolean remove(Object o) {
|
||||
if (readonly)
|
||||
throw new UnsupportedOperationException("readonly");
|
||||
|
||||
if (o instanceof Entry) {
|
||||
Entry<K, V> e = (java.util.Map.Entry<K, V>) o;
|
||||
|
||||
//check for nulls
|
||||
if (e.getKey() == null || e.getValue() == null)
|
||||
return false;
|
||||
lock.writeLock().lock();
|
||||
try{
|
||||
//get old value, must be same as item in entry
|
||||
V v = get(e.getKey());
|
||||
if (v == null || !e.getValue().equals(v))
|
||||
return false;
|
||||
HTree.this.remove(e.getKey());
|
||||
return true;
|
||||
}finally{
|
||||
lock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
return false;
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size() {
|
||||
lock.readLock().lock();
|
||||
try {
|
||||
int counter = 0;
|
||||
Iterator<K> it = keys();
|
||||
while (it.hasNext()) {
|
||||
it.next();
|
||||
counter++;
|
||||
}
|
||||
return counter;
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}finally {
|
||||
lock.readLock().unlock();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
HTreeDirectory<K, V> getRoot() {
|
||||
//assumes that caller already holds read or write lock
|
||||
try {
|
||||
HTreeDirectory<K, V> root = (HTreeDirectory<K, V>) db.fetch(rootRecid, this.SERIALIZER);
|
||||
root.setPersistenceContext(rootRecid);
|
||||
return root;
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
}
|
||||
|
||||
public static HTree deserialize(DataInput is, Serialization ser) throws IOException, ClassNotFoundException {
|
||||
long rootRecid = LongPacker.unpackLong(is);
|
||||
boolean hasValues = is.readBoolean();
|
||||
Serializer keySerializer = (Serializer) ser.deserialize(is);
|
||||
Serializer valueSerializer = (Serializer) ser.deserialize(is);
|
||||
|
||||
return new HTree(ser.db,rootRecid, keySerializer, valueSerializer, hasValues);
|
||||
}
|
||||
|
||||
void serialize(DataOutput out) throws IOException {
|
||||
LongPacker.packLong(out, rootRecid);
|
||||
out.writeBoolean(hasValues);;
|
||||
db.defaultSerializer().serialize(out, keySerializer);
|
||||
db.defaultSerializer().serialize(out, valueSerializer);
|
||||
}
|
||||
|
||||
|
||||
static void defrag(Long recid, DBStore r1, DBStore r2) throws IOException {
|
||||
//TODO should modCount be increased after defrag, revert or commit?
|
||||
try {
|
||||
byte[] data = r1.fetchRaw(recid);
|
||||
r2.forceInsert(recid, data);
|
||||
DataInput in = new DataInputStream(new ByteArrayInputStream(data));
|
||||
HTree t = (HTree) r1.defaultSerializer().deserialize(in);
|
||||
t.db = r1;
|
||||
t.loadValues = false;
|
||||
|
||||
HTreeDirectory d = t.getRoot();
|
||||
if (d != null) {
|
||||
r2.forceInsert(t.rootRecid, r1.fetchRaw(t.rootRecid));
|
||||
d.defrag(r1, r2);
|
||||
}
|
||||
|
||||
} catch (ClassNotFoundException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public int size(){
|
||||
return (int) getRoot().size;
|
||||
}
|
||||
|
||||
public boolean hasValues() {
|
||||
return hasValues;
|
||||
}
|
||||
|
||||
public V putIfAbsent(K key, V value) {
|
||||
lock.writeLock().lock();
|
||||
try{
|
||||
if (!containsKey(key))
|
||||
return put(key, value);
|
||||
else
|
||||
return get(key);
|
||||
}finally {
|
||||
lock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
public boolean remove(Object key, Object value) {
|
||||
lock.writeLock().lock();
|
||||
try{
|
||||
if (containsKey(key) && get(key).equals(value)) {
|
||||
remove(key);
|
||||
return true;
|
||||
} else return false;
|
||||
}finally {
|
||||
lock.writeLock().unlock();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
public boolean replace(K key, V oldValue, V newValue) {
|
||||
lock.writeLock().lock();
|
||||
try{
|
||||
if (containsKey(key) && get(key).equals(oldValue)) {
|
||||
put(key, newValue);
|
||||
return true;
|
||||
} else return false;
|
||||
}finally {
|
||||
lock.writeLock().unlock();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public V replace(K key, V value) {
|
||||
lock.writeLock().lock();
|
||||
try{
|
||||
if (containsKey(key)) {
|
||||
return put(key, value);
|
||||
} else return null;
|
||||
}finally {
|
||||
lock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,352 @@
|
|||
/*******************************************************************************
|
||||
* Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
******************************************************************************/
|
||||
|
||||
package org.apache.jdbm;
|
||||
|
||||
import java.io.*;
|
||||
import java.util.ArrayList;
|
||||
|
||||
/**
|
||||
* A bucket is a placeholder for multiple (key, value) pairs. Buckets
|
||||
* are used to store collisions (same hash value) at all levels of an
|
||||
* H*tree.
|
||||
* <p/>
|
||||
* There are two types of buckets: leaf and non-leaf.
|
||||
* <p/>
|
||||
* Non-leaf buckets are buckets which hold collisions which happen
|
||||
* when the H*tree is not fully expanded. Keys in a non-leaf buckets
|
||||
* can have different hash codes. Non-leaf buckets are limited to an
|
||||
* arbitrary size. When this limit is reached, the H*tree should create
|
||||
* a new HTreeDirectory node and distribute keys of the non-leaf buckets into
|
||||
* the newly created HTreeDirectory.
|
||||
* <p/>
|
||||
* A leaf bucket is a bucket which contains keys which all have
|
||||
* the same <code>hashCode()</code>. Leaf buckets stand at the
|
||||
* bottom of an H*tree because the hashing algorithm cannot further
|
||||
* discriminate between different keys based on their hash code.
|
||||
*
|
||||
* @author Alex Boisvert
|
||||
*/
|
||||
final class HTreeBucket<K, V> {
|
||||
|
||||
/**
|
||||
* The maximum number of elements (key, value) a non-leaf bucket
|
||||
* can contain.
|
||||
*/
|
||||
public static final int OVERFLOW_SIZE = 16;
|
||||
|
||||
|
||||
/**
|
||||
* Depth of this bucket.
|
||||
*/
|
||||
private byte _depth;
|
||||
|
||||
|
||||
/**
|
||||
* Keys and values in this bucket. Keys are followed by values at KEYPOS+OVERFLOW_SIZE
|
||||
*/
|
||||
private Object[] _keysAndValues;
|
||||
|
||||
private byte size = 0;
|
||||
|
||||
|
||||
private final HTree<K, V> tree;
|
||||
|
||||
|
||||
/**
|
||||
* Public constructor for serialization.
|
||||
*/
|
||||
public HTreeBucket(HTree<K, V> tree) {
|
||||
this.tree = tree;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Construct a bucket with a given depth level. Depth level is the
|
||||
* number of <code>HashDirectory</code> above this bucket.
|
||||
*/
|
||||
public HTreeBucket(HTree<K, V> tree, byte level) {
|
||||
this.tree = tree;
|
||||
if (level > HTreeDirectory.MAX_DEPTH + 1) {
|
||||
throw new IllegalArgumentException(
|
||||
"Cannot create bucket with depth > MAX_DEPTH+1. "
|
||||
+ "Depth=" + level);
|
||||
}
|
||||
_depth = level;
|
||||
_keysAndValues = new Object[OVERFLOW_SIZE * 2];
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns the number of elements contained in this bucket.
|
||||
*/
|
||||
public int getElementCount() {
|
||||
return size;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns whether or not this bucket is a "leaf bucket".
|
||||
*/
|
||||
public boolean isLeaf() {
|
||||
return (_depth > HTreeDirectory.MAX_DEPTH);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns true if bucket can accept at least one more element.
|
||||
*/
|
||||
public boolean hasRoom() {
|
||||
if (isLeaf()) {
|
||||
return true; // leaf buckets are never full
|
||||
} else {
|
||||
// non-leaf bucket
|
||||
return (size < OVERFLOW_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Add an element (key, value) to this bucket. If an existing element
|
||||
* has the same key, it is replaced silently.
|
||||
*
|
||||
* @return Object which was previously associated with the given key
|
||||
* or <code>null</code> if no association existed.
|
||||
*/
|
||||
public V addElement(K key, V value) {
|
||||
//find entry
|
||||
byte existing = -1;
|
||||
for (byte i = 0; i < size; i++) {
|
||||
if (key.equals(_keysAndValues[i])) {
|
||||
existing = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (existing != -1) {
|
||||
// replace existing element
|
||||
Object before = _keysAndValues[existing + OVERFLOW_SIZE];
|
||||
if (before instanceof BTreeLazyRecord) {
|
||||
BTreeLazyRecord<V> rec = (BTreeLazyRecord<V>) before;
|
||||
before = rec.get();
|
||||
rec.delete();
|
||||
}
|
||||
_keysAndValues[existing + OVERFLOW_SIZE] = value;
|
||||
return (V) before;
|
||||
} else {
|
||||
// add new (key, value) pair
|
||||
_keysAndValues[size] = key;
|
||||
_keysAndValues[size + OVERFLOW_SIZE] = value;
|
||||
size++;
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Remove an element, given a specific key.
|
||||
*
|
||||
* @param key Key of the element to remove
|
||||
* @return Removed element value, or <code>null</code> if not found
|
||||
*/
|
||||
public V removeElement(K key) {
|
||||
//find entry
|
||||
byte existing = -1;
|
||||
for (byte i = 0; i < size; i++) {
|
||||
if (key.equals(_keysAndValues[i])) {
|
||||
existing = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (existing != -1) {
|
||||
Object o = _keysAndValues[existing + OVERFLOW_SIZE];
|
||||
if (o instanceof BTreeLazyRecord) {
|
||||
BTreeLazyRecord<V> rec = (BTreeLazyRecord<V>) o;
|
||||
o = rec.get();
|
||||
rec.delete();
|
||||
}
|
||||
|
||||
|
||||
//move last element to existing
|
||||
size--;
|
||||
_keysAndValues[existing] = _keysAndValues[size];
|
||||
_keysAndValues[existing + OVERFLOW_SIZE] = _keysAndValues[size + OVERFLOW_SIZE];
|
||||
|
||||
//and unset last element
|
||||
_keysAndValues[size] = null;
|
||||
_keysAndValues[size + OVERFLOW_SIZE] = null;
|
||||
|
||||
|
||||
return (V) o;
|
||||
} else {
|
||||
// not found
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns the value associated with a given key. If the given key
|
||||
* is not found in this bucket, returns <code>null</code>.
|
||||
*/
|
||||
public V getValue(K key) {
|
||||
//find entry
|
||||
byte existing = -1;
|
||||
for (byte i = 0; i < size; i++) {
|
||||
if (key.equals(_keysAndValues[i])) {
|
||||
existing = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (existing != -1) {
|
||||
Object o = _keysAndValues[existing + OVERFLOW_SIZE];
|
||||
if (o instanceof BTreeLazyRecord)
|
||||
return ((BTreeLazyRecord<V>) o).get();
|
||||
else
|
||||
return (V) o;
|
||||
} else {
|
||||
// key not found
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Obtain keys contained in this buckets. Keys are ordered to match
|
||||
* their values, which be be obtained by calling <code>getValues()</code>.
|
||||
* <p/>
|
||||
* As an optimization, the Vector returned is the instance member
|
||||
* of this class. Please don't modify outside the scope of this class.
|
||||
*/
|
||||
ArrayList<K> getKeys() {
|
||||
ArrayList<K> ret = new ArrayList<K>();
|
||||
for (byte i = 0; i < size; i++) {
|
||||
ret.add((K) _keysAndValues[i]);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Obtain values contained in this buckets. Values are ordered to match
|
||||
* their keys, which be be obtained by calling <code>getKeys()</code>.
|
||||
* <p/>
|
||||
* As an optimization, the Vector returned is the instance member
|
||||
* of this class. Please don't modify outside the scope of this class.
|
||||
*/
|
||||
ArrayList<V> getValues() {
|
||||
ArrayList<V> ret = new ArrayList<V>();
|
||||
for (byte i = 0; i < size; i++) {
|
||||
ret.add((V) _keysAndValues[i + OVERFLOW_SIZE]);
|
||||
}
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
|
||||
public void writeExternal(DataOutput out)
|
||||
throws IOException {
|
||||
out.write(_depth);
|
||||
out.write(size);
|
||||
|
||||
|
||||
DataInputOutput out3 = tree.writeBufferCache.getAndSet(null);
|
||||
if (out3 == null)
|
||||
out3 = new DataInputOutput();
|
||||
else
|
||||
out3.reset();
|
||||
|
||||
Serializer keySerializer = tree.keySerializer != null ? tree.keySerializer : tree.getRecordManager().defaultSerializer();
|
||||
for (byte i = 0; i < size; i++) {
|
||||
out3.reset();
|
||||
keySerializer.serialize(out3, _keysAndValues[i]);
|
||||
LongPacker.packInt(out, out3.getPos());
|
||||
out.write(out3.getBuf(), 0, out3.getPos());
|
||||
|
||||
}
|
||||
|
||||
//write values
|
||||
if(tree.hasValues()){
|
||||
Serializer valSerializer = tree.valueSerializer != null ? tree.valueSerializer : tree.getRecordManager().defaultSerializer();
|
||||
|
||||
for (byte i = 0; i < size; i++) {
|
||||
Object value = _keysAndValues[i + OVERFLOW_SIZE];
|
||||
if (value == null) {
|
||||
out.write(BTreeLazyRecord.NULL);
|
||||
} else if (value instanceof BTreeLazyRecord) {
|
||||
out.write(BTreeLazyRecord.LAZY_RECORD);
|
||||
LongPacker.packLong(out, ((BTreeLazyRecord) value).recid);
|
||||
} else {
|
||||
//transform to byte array
|
||||
out3.reset();
|
||||
valSerializer.serialize(out3, value);
|
||||
|
||||
if (out3.getPos() > BTreeLazyRecord.MAX_INTREE_RECORD_SIZE) {
|
||||
//store as separate record
|
||||
long recid = tree.getRecordManager().insert(out3.toByteArray(), BTreeLazyRecord.FAKE_SERIALIZER,true);
|
||||
out.write(BTreeLazyRecord.LAZY_RECORD);
|
||||
LongPacker.packLong(out, recid);
|
||||
} else {
|
||||
out.write(out3.getPos());
|
||||
out.write(out3.getBuf(), 0, out3.getPos());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
tree.writeBufferCache.set(out3);
|
||||
|
||||
}
|
||||
|
||||
|
||||
public void readExternal(DataInputOutput in) throws IOException, ClassNotFoundException {
|
||||
_depth = in.readByte();
|
||||
size = in.readByte();
|
||||
|
||||
//read keys
|
||||
Serializer keySerializer = tree.keySerializer != null ? tree.keySerializer : tree.getRecordManager().defaultSerializer();
|
||||
_keysAndValues = (K[]) new Object[OVERFLOW_SIZE * 2];
|
||||
for (byte i = 0; i < size; i++) {
|
||||
int expectedSize = LongPacker.unpackInt(in);
|
||||
K key = (K) BTreeLazyRecord.fastDeser(in, keySerializer, expectedSize);
|
||||
_keysAndValues[i] = key;
|
||||
}
|
||||
|
||||
//read values
|
||||
if(tree.hasValues()){
|
||||
Serializer<V> valSerializer = tree.valueSerializer != null ? tree.valueSerializer : (Serializer<V>) tree.getRecordManager().defaultSerializer();
|
||||
for (byte i = 0; i < size; i++) {
|
||||
int header = in.readUnsignedByte();
|
||||
if (header == BTreeLazyRecord.NULL) {
|
||||
_keysAndValues[i + OVERFLOW_SIZE] = null;
|
||||
} else if (header == BTreeLazyRecord.LAZY_RECORD) {
|
||||
long recid = LongPacker.unpackLong(in);
|
||||
_keysAndValues[i + OVERFLOW_SIZE] = (new BTreeLazyRecord(tree.getRecordManager(), recid, valSerializer));
|
||||
} else {
|
||||
_keysAndValues[i + OVERFLOW_SIZE] = BTreeLazyRecord.fastDeser(in, valSerializer, header);
|
||||
}
|
||||
}
|
||||
}else{
|
||||
for (byte i = 0; i < size; i++) {
|
||||
if(_keysAndValues[i]!=null)
|
||||
_keysAndValues[i+OVERFLOW_SIZE] = Utils.EMPTY_STRING;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -0,0 +1,618 @@
|
|||
/*******************************************************************************
|
||||
* Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
******************************************************************************/
|
||||
|
||||
package org.apache.jdbm;
|
||||
|
||||
import java.io.*;
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* Hashtable directory page.
|
||||
*
|
||||
* @author Alex Boisvert
|
||||
*/
|
||||
final class HTreeDirectory<K, V> {
|
||||
|
||||
/**
|
||||
* Maximum number of children in a directory.
|
||||
* <p/>
|
||||
* (Must be a power of 2 -- if you update this value, you must also
|
||||
* update BIT_SIZE and MAX_DEPTH.)
|
||||
* <p/>
|
||||
* !!!! do not change this, it affects storage format, there are also magic numbers which relies on 255 !!!
|
||||
*/
|
||||
static final int MAX_CHILDREN = 256;
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Number of significant bits per directory level.
|
||||
*/
|
||||
static final int BIT_SIZE = 8; // log2(256) = 8
|
||||
|
||||
|
||||
/**
|
||||
* Maximum number of levels (zero-based)
|
||||
* <p/>
|
||||
* (4 * 8 bits = 32 bits, which is the size of an "int", and as
|
||||
* you know, hashcodes in Java are "ints")
|
||||
*/
|
||||
static final int MAX_DEPTH = 3; // 4 levels
|
||||
|
||||
|
||||
/**
|
||||
* Record ids of children nodes.
|
||||
* It is saved in matrix to save memory, some subarrays may be null.
|
||||
*/
|
||||
private long[][] _children;
|
||||
|
||||
|
||||
/**
|
||||
* Depth of this directory page, zero-based
|
||||
*/
|
||||
private byte _depth;
|
||||
|
||||
/**
|
||||
* This directory's record ID in the DB. (transient)
|
||||
*/
|
||||
private long _recid;
|
||||
|
||||
/** if this is root (depth=0), it contains size, otherwise -1*/
|
||||
long size;
|
||||
|
||||
protected final HTree<K, V> tree;
|
||||
|
||||
/**
|
||||
* Public constructor used by serialization
|
||||
*/
|
||||
public HTreeDirectory(HTree<K, V> tree) {
|
||||
this.tree = tree;
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct a HashDirectory
|
||||
*
|
||||
* @param depth Depth of this directory node.
|
||||
*/
|
||||
HTreeDirectory(HTree<K, V> tree, byte depth) {
|
||||
this.tree = tree;
|
||||
_depth = depth;
|
||||
_children = new long[32][];
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Sets persistence context. This method must be called before any
|
||||
* persistence-related operation.
|
||||
*
|
||||
* @param recid Record id of this directory.
|
||||
*/
|
||||
void setPersistenceContext(long recid) {
|
||||
this._recid = recid;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Get the record identifier used to load this hashtable.
|
||||
*/
|
||||
long getRecid() {
|
||||
return _recid;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns whether or not this directory is empty. A directory
|
||||
* is empty when it no longer contains buckets or sub-directories.
|
||||
*/
|
||||
boolean isEmpty() {
|
||||
for (int i = 0; i < _children.length; i++) {
|
||||
long[] sub = _children[i];
|
||||
if (sub!=null){
|
||||
for (int j = 0; j < 8; j++) {
|
||||
if(sub[j] != 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the value which is associated with the given key. Returns
|
||||
* <code>null</code> if there is not association for this key.
|
||||
*
|
||||
* @param key key whose associated value is to be returned
|
||||
*/
|
||||
V get(K key)
|
||||
throws IOException {
|
||||
int hash = hashCode(key);
|
||||
long child_recid = getRecid(hash);
|
||||
if (child_recid == 0) {
|
||||
// not bucket/node --> not found
|
||||
return null;
|
||||
} else {
|
||||
Object node = tree.db.fetch(child_recid, tree.SERIALIZER);
|
||||
// System.out.println("HashDirectory.get() child is : "+node);
|
||||
|
||||
if (node instanceof HTreeDirectory) {
|
||||
// recurse into next directory level
|
||||
HTreeDirectory<K, V> dir = (HTreeDirectory<K, V>) node;
|
||||
dir.setPersistenceContext(child_recid);
|
||||
return dir.get(key);
|
||||
} else {
|
||||
// node is a bucket
|
||||
HTreeBucket<K, V> bucket = (HTreeBucket) node;
|
||||
return bucket.getValue(key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private long getRecid(int hash) {
|
||||
long[] sub = _children[hash>>>3];
|
||||
return sub==null? 0 : sub[hash%8];
|
||||
}
|
||||
|
||||
private void putRecid(int hash, long recid) {
|
||||
long[] sub = _children[hash>>>3];
|
||||
if(sub == null){
|
||||
sub = new long[8];
|
||||
_children[hash>>>3] = sub;
|
||||
}
|
||||
sub[hash%8] = recid;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Associates the specified value with the specified key.
|
||||
*
|
||||
* @param key key with which the specified value is to be assocated.
|
||||
* @param value value to be associated with the specified key.
|
||||
* @return object which was previously associated with the given key,
|
||||
* or <code>null</code> if no association existed.
|
||||
*/
|
||||
Object put(final Object key, final Object value)
|
||||
throws IOException {
|
||||
if (value == null) {
|
||||
return remove(key);
|
||||
}
|
||||
int hash = hashCode(key);
|
||||
long child_recid = getRecid(hash);
|
||||
if (child_recid == 0) {
|
||||
// no bucket/node here yet, let's create a bucket
|
||||
HTreeBucket bucket = new HTreeBucket(tree, (byte) (_depth + 1));
|
||||
|
||||
// insert (key,value) pair in bucket
|
||||
Object existing = bucket.addElement(key, value);
|
||||
|
||||
long b_recid = tree.db.insert(bucket, tree.SERIALIZER,false);
|
||||
putRecid(hash, b_recid);
|
||||
|
||||
tree.db.update(_recid, this, tree.SERIALIZER);
|
||||
|
||||
// System.out.println("Added: "+bucket);
|
||||
return existing;
|
||||
} else {
|
||||
Object node = tree.db.fetch(child_recid, tree.SERIALIZER);
|
||||
|
||||
if (node instanceof HTreeDirectory) {
|
||||
// recursive insert in next directory level
|
||||
HTreeDirectory dir = (HTreeDirectory) node;
|
||||
dir.setPersistenceContext(child_recid);
|
||||
return dir.put(key, value);
|
||||
} else {
|
||||
// node is a bucket
|
||||
HTreeBucket bucket = (HTreeBucket) node;
|
||||
if (bucket.hasRoom()) {
|
||||
Object existing = bucket.addElement(key, value);
|
||||
tree.db.update(child_recid, bucket, tree.SERIALIZER);
|
||||
// System.out.println("Added: "+bucket);
|
||||
return existing;
|
||||
} else {
|
||||
// overflow, so create a new directory
|
||||
if (_depth == MAX_DEPTH) {
|
||||
throw new RuntimeException("Cannot create deeper directory. "
|
||||
+ "Depth=" + _depth);
|
||||
}
|
||||
HTreeDirectory dir = new HTreeDirectory(tree, (byte) (_depth + 1));
|
||||
long dir_recid = tree.db.insert(dir, tree.SERIALIZER,false);
|
||||
dir.setPersistenceContext(dir_recid);
|
||||
|
||||
putRecid(hash, dir_recid);
|
||||
tree.db.update(_recid, this, tree.SERIALIZER);
|
||||
|
||||
// discard overflown bucket
|
||||
tree.db.delete(child_recid);
|
||||
|
||||
// migrate existing bucket elements
|
||||
ArrayList keys = bucket.getKeys();
|
||||
ArrayList values = bucket.getValues();
|
||||
int entries = keys.size();
|
||||
for (int i = 0; i < entries; i++) {
|
||||
dir.put(keys.get(i), values.get(i));
|
||||
}
|
||||
|
||||
// (finally!) insert new element
|
||||
return dir.put(key, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Remove the value which is associated with the given key. If the
|
||||
* key does not exist, this method simply ignores the operation.
|
||||
*
|
||||
* @param key key whose associated value is to be removed
|
||||
* @return object which was associated with the given key, or
|
||||
* <code>null</code> if no association existed with given key.
|
||||
*/
|
||||
Object remove(Object key) throws IOException {
|
||||
int hash = hashCode(key);
|
||||
long child_recid = getRecid(hash);
|
||||
if (child_recid == 0) {
|
||||
// not bucket/node --> not found
|
||||
return null;
|
||||
} else {
|
||||
Object node = tree.db.fetch(child_recid, tree.SERIALIZER);
|
||||
// System.out.println("HashDirectory.remove() child is : "+node);
|
||||
|
||||
if (node instanceof HTreeDirectory) {
|
||||
// recurse into next directory level
|
||||
HTreeDirectory dir = (HTreeDirectory) node;
|
||||
dir.setPersistenceContext(child_recid);
|
||||
Object existing = dir.remove(key);
|
||||
if (existing != null) {
|
||||
if (dir.isEmpty()) {
|
||||
// delete empty directory
|
||||
tree.db.delete(child_recid);
|
||||
putRecid(hash, 0);
|
||||
tree.db.update(_recid, this, tree.SERIALIZER);
|
||||
}
|
||||
}
|
||||
return existing;
|
||||
} else {
|
||||
// node is a bucket
|
||||
HTreeBucket bucket = (HTreeBucket) node;
|
||||
Object existing = bucket.removeElement(key);
|
||||
if (existing != null) {
|
||||
if (bucket.getElementCount() >= 1) {
|
||||
tree.db.update(child_recid, bucket, tree.SERIALIZER);
|
||||
} else {
|
||||
// delete bucket, it's empty
|
||||
tree.db.delete(child_recid);
|
||||
putRecid(hash, 0);
|
||||
tree.db.update(_recid, this, tree.SERIALIZER);
|
||||
}
|
||||
}
|
||||
return existing;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates the hashcode of a key, based on the current directory
|
||||
* depth.
|
||||
*/
|
||||
private int hashCode(Object key) {
|
||||
int hashMask = hashMask();
|
||||
int hash = key.hashCode();
|
||||
hash = hash & hashMask;
|
||||
hash = hash >>> ((MAX_DEPTH - _depth) * BIT_SIZE);
|
||||
hash = hash % MAX_CHILDREN;
|
||||
/*
|
||||
System.out.println("HashDirectory.hashCode() is: 0x"
|
||||
+Integer.toHexString(hash)
|
||||
+" for object hashCode() 0x"
|
||||
+Integer.toHexString(key.hashCode()));
|
||||
*/
|
||||
return hash;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates the hashmask of this directory. The hashmask is the
|
||||
* bit mask applied to a hashcode to retain only bits that are
|
||||
* relevant to this directory level.
|
||||
*/
|
||||
int hashMask() {
|
||||
int bits = MAX_CHILDREN - 1;
|
||||
int hashMask = bits << ((MAX_DEPTH - _depth) * BIT_SIZE);
|
||||
/*
|
||||
System.out.println("HashDirectory.hashMask() is: 0x"
|
||||
+Integer.toHexString(hashMask));
|
||||
*/
|
||||
return hashMask;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an enumeration of the keys contained in this
|
||||
*/
|
||||
Iterator<K> keys()
|
||||
throws IOException {
|
||||
return new HDIterator(true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an enumeration of the values contained in this
|
||||
*/
|
||||
Iterator<V> values()
|
||||
throws IOException {
|
||||
return new HDIterator(false);
|
||||
}
|
||||
|
||||
|
||||
public void writeExternal(DataOutput out)
|
||||
throws IOException {
|
||||
out.writeByte(_depth);
|
||||
if(_depth==0){
|
||||
LongPacker.packLong(out,size);
|
||||
}
|
||||
|
||||
int zeroStart = 0;
|
||||
for (int i = 0; i < MAX_CHILDREN; i++) {
|
||||
if (getRecid(i) != 0) {
|
||||
zeroStart = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
out.write(zeroStart);
|
||||
if (zeroStart == MAX_CHILDREN)
|
||||
return;
|
||||
|
||||
int zeroEnd = 0;
|
||||
for (int i = MAX_CHILDREN - 1; i >= 0; i--) {
|
||||
if (getRecid(i) != 0) {
|
||||
zeroEnd = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
out.write(zeroEnd);
|
||||
|
||||
for (int i = zeroStart; i <= zeroEnd; i++) {
|
||||
LongPacker.packLong(out, getRecid(i));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void readExternal(DataInputOutput in)
|
||||
throws IOException, ClassNotFoundException {
|
||||
_depth = in.readByte();
|
||||
if(_depth==0)
|
||||
size = LongPacker.unpackLong(in);
|
||||
else
|
||||
size = -1;
|
||||
|
||||
_children = new long[32][];
|
||||
int zeroStart = in.readUnsignedByte();
|
||||
int zeroEnd = in.readUnsignedByte();
|
||||
|
||||
for (int i = zeroStart; i <= zeroEnd; i++) {
|
||||
long recid = LongPacker.unpackLong(in);
|
||||
if(recid!=0)
|
||||
putRecid(i,recid);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void defrag(DBStore r1, DBStore r2) throws IOException, ClassNotFoundException {
|
||||
for (long[] sub: _children) {
|
||||
if(sub==null) continue;
|
||||
for (long child : sub) {
|
||||
if (child == 0) continue;
|
||||
byte[] data = r1.fetchRaw(child);
|
||||
r2.forceInsert(child, data);
|
||||
Object t = tree.SERIALIZER.deserialize(new DataInputOutput(data));
|
||||
if (t instanceof HTreeDirectory) {
|
||||
((HTreeDirectory) t).defrag(r1, r2);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void deleteAllChildren() throws IOException {
|
||||
for(long[] ll : _children){
|
||||
if(ll!=null){
|
||||
for(long l:ll ){
|
||||
if(l!=0){
|
||||
tree.db.delete(l);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// INNER CLASS
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/**
|
||||
* Utility class to enumerate keys/values in a HTree
|
||||
*/
|
||||
class HDIterator<A> implements Iterator<A> {
|
||||
|
||||
/**
|
||||
* True if we're iterating on keys, False if enumerating on values.
|
||||
*/
|
||||
private boolean _iterateKeys;
|
||||
|
||||
/**
|
||||
* Stacks of directories & last enumerated child position
|
||||
*/
|
||||
private ArrayList _dirStack;
|
||||
private ArrayList _childStack;
|
||||
|
||||
/**
|
||||
* Current HashDirectory in the hierarchy
|
||||
*/
|
||||
private HTreeDirectory _dir;
|
||||
|
||||
/**
|
||||
* Current child position
|
||||
*/
|
||||
private int _child;
|
||||
|
||||
/**
|
||||
* Current bucket iterator
|
||||
*/
|
||||
private Iterator<A> _iter;
|
||||
|
||||
private A next;
|
||||
|
||||
/**
|
||||
* last item returned in next(), is used to remove() last item
|
||||
*/
|
||||
private A last;
|
||||
|
||||
private int expectedModCount;
|
||||
|
||||
/**
|
||||
* Construct an iterator on this directory.
|
||||
*
|
||||
* @param iterateKeys True if iteration supplies keys, False
|
||||
* if iterateKeys supplies values.
|
||||
*/
|
||||
HDIterator(boolean iterateKeys)
|
||||
throws IOException {
|
||||
_dirStack = new ArrayList();
|
||||
_childStack = new ArrayList();
|
||||
_dir = HTreeDirectory.this;
|
||||
_child = -1;
|
||||
_iterateKeys = iterateKeys;
|
||||
expectedModCount = tree.modCount;
|
||||
|
||||
prepareNext();
|
||||
next = next2();
|
||||
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns the next object.
|
||||
*/
|
||||
public A next2() {
|
||||
A next = null;
|
||||
if (_iter != null && _iter.hasNext()) {
|
||||
next = _iter.next();
|
||||
} else {
|
||||
try {
|
||||
prepareNext();
|
||||
} catch (IOException except) {
|
||||
throw new IOError(except);
|
||||
}
|
||||
if (_iter != null && _iter.hasNext()) {
|
||||
return next2();
|
||||
}
|
||||
}
|
||||
return next;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Prepare internal state so we can answer <code>hasMoreElements</code>
|
||||
* <p/>
|
||||
* Actually, this code prepares an Enumeration on the next
|
||||
* Bucket to enumerate. If no following bucket is found,
|
||||
* the next Enumeration is set to <code>null</code>.
|
||||
*/
|
||||
private void prepareNext() throws IOException {
|
||||
long child_recid = 0;
|
||||
|
||||
// get next bucket/directory to enumerate
|
||||
do {
|
||||
_child++;
|
||||
if (_child >= MAX_CHILDREN) {
|
||||
|
||||
if (_dirStack.isEmpty()) {
|
||||
// no more directory in the stack, we're finished
|
||||
return;
|
||||
}
|
||||
|
||||
// try next node
|
||||
_dir = (HTreeDirectory) _dirStack.remove(_dirStack.size() - 1);
|
||||
_child = ((Integer) _childStack.remove(_childStack.size() - 1)).intValue();
|
||||
continue;
|
||||
}
|
||||
child_recid = _dir.getRecid(_child);
|
||||
} while (child_recid == 0);
|
||||
|
||||
if (child_recid == 0) {
|
||||
throw new Error("child_recid cannot be 0");
|
||||
}
|
||||
|
||||
Object node = tree.db.fetch(child_recid, tree.SERIALIZER);
|
||||
// System.out.println("HDEnumeration.get() child is : "+node);
|
||||
|
||||
if (node instanceof HTreeDirectory) {
|
||||
// save current position
|
||||
_dirStack.add(_dir);
|
||||
_childStack.add(new Integer(_child));
|
||||
|
||||
_dir = (HTreeDirectory) node;
|
||||
_child = -1;
|
||||
|
||||
// recurse into
|
||||
_dir.setPersistenceContext(child_recid);
|
||||
prepareNext();
|
||||
} else {
|
||||
// node is a bucket
|
||||
HTreeBucket bucket = (HTreeBucket) node;
|
||||
if (_iterateKeys) {
|
||||
ArrayList keys2 = bucket.getKeys();
|
||||
_iter = keys2.iterator();
|
||||
} else {
|
||||
_iter = bucket.getValues().iterator();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public boolean hasNext() {
|
||||
return next != null;
|
||||
}
|
||||
|
||||
|
||||
public A next() {
|
||||
if (next == null) throw new NoSuchElementException();
|
||||
if (expectedModCount != tree.modCount)
|
||||
throw new ConcurrentModificationException();
|
||||
last = next;
|
||||
next = next2();
|
||||
return last;
|
||||
}
|
||||
|
||||
|
||||
public void remove() {
|
||||
if (last == null) throw new IllegalStateException();
|
||||
|
||||
if (expectedModCount != tree.modCount)
|
||||
throw new ConcurrentModificationException();
|
||||
|
||||
//TODO current delete behaviour may change node layout. INVESTIGATE if this can happen!
|
||||
tree.remove(last);
|
||||
last = null;
|
||||
expectedModCount++;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
package org.apache.jdbm;
|
||||
|
||||
|
||||
import java.util.AbstractSet;
|
||||
import java.util.Iterator;
|
||||
|
||||
/**
|
||||
* Wrapper for HTree to implement java.util.Map interface
|
||||
*/
|
||||
class HTreeSet<E> extends AbstractSet<E> {
|
||||
|
||||
final HTree<E, Object> map;
|
||||
|
||||
HTreeSet(HTree map) {
|
||||
this.map = map;
|
||||
}
|
||||
|
||||
public Iterator<E> iterator() {
|
||||
return map.keySet().iterator();
|
||||
}
|
||||
|
||||
public int size() {
|
||||
return map.size();
|
||||
}
|
||||
|
||||
|
||||
public boolean isEmpty() {
|
||||
return map.isEmpty();
|
||||
}
|
||||
|
||||
public boolean contains(Object o) {
|
||||
return map.containsKey(o);
|
||||
}
|
||||
|
||||
public boolean add(E e) {
|
||||
return map.put(e, Utils.EMPTY_STRING) == null;
|
||||
}
|
||||
|
||||
public boolean remove(Object o) {
|
||||
return map.remove(o) == Utils.EMPTY_STRING;
|
||||
}
|
||||
|
||||
public void clear() {
|
||||
map.clear();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,480 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.jdbm;
|
||||
|
||||
|
||||
import java.io.*;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
/**
|
||||
* LinkedList2 which stores its nodes on disk.
|
||||
*
|
||||
* @author Jan Kotek
|
||||
*/
|
||||
class LinkedList2<E> extends AbstractSequentialList<E> {
|
||||
|
||||
private DBAbstract db;
|
||||
|
||||
final long rootRecid;
|
||||
/** size limit, is not currently used, but needs to be here for future compatibility.
|
||||
* Zero means no limit.
|
||||
*/
|
||||
long sizeLimit = 0;
|
||||
|
||||
private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
|
||||
|
||||
static final class Root{
|
||||
long first;
|
||||
long last;
|
||||
long size;
|
||||
}
|
||||
|
||||
private static final Serializer<Root> ROOT_SERIALIZER= new Serializer<Root>(){
|
||||
|
||||
public void serialize(DataOutput out, Root obj) throws IOException {
|
||||
LongPacker.packLong(out,obj.first);
|
||||
LongPacker.packLong(out,obj.last);
|
||||
LongPacker.packLong(out,obj.size);
|
||||
}
|
||||
|
||||
public Root deserialize(DataInput in) throws IOException, ClassNotFoundException {
|
||||
Root r = new Root();
|
||||
r.first = LongPacker.unpackLong(in);
|
||||
r.last = LongPacker.unpackLong(in);
|
||||
r.size = LongPacker.unpackLong(in);
|
||||
return r;
|
||||
}
|
||||
};
|
||||
|
||||
private Serializer<E> valueSerializer;
|
||||
|
||||
/**
|
||||
* indicates that entry values should not be loaded during deserialization, used during defragmentation
|
||||
*/
|
||||
protected boolean loadValues = true;
|
||||
|
||||
/** constructor used for deserialization */
|
||||
LinkedList2(DBAbstract db,long rootRecid, Serializer<E> valueSerializer) {
|
||||
this.db = db;
|
||||
this.rootRecid = rootRecid;
|
||||
this.valueSerializer = valueSerializer;
|
||||
}
|
||||
|
||||
/** constructor used to create new empty list*/
|
||||
LinkedList2(DBAbstract db, Serializer<E> valueSerializer) throws IOException {
|
||||
this.db = db;
|
||||
if (valueSerializer != null && !(valueSerializer instanceof Serializable))
|
||||
throw new IllegalArgumentException("Serializer does not implement Serializable");
|
||||
this.valueSerializer = valueSerializer;
|
||||
//create root
|
||||
this.rootRecid = db.insert(new Root(), ROOT_SERIALIZER,false);
|
||||
}
|
||||
|
||||
void setPersistenceContext(DBAbstract db) {
|
||||
this.db = db;
|
||||
}
|
||||
|
||||
|
||||
public ListIterator<E> listIterator(int index) {
|
||||
lock.readLock().lock();
|
||||
try{
|
||||
Root r = getRoot();
|
||||
if (index < 0 || index > r.size)
|
||||
throw new IndexOutOfBoundsException();
|
||||
|
||||
|
||||
Iter iter = new Iter();
|
||||
iter.next = r.first;
|
||||
|
||||
|
||||
//scroll to requested position
|
||||
//TODO scroll from end, if beyond half
|
||||
for (int i = 0; i < index; i++) {
|
||||
iter.next();
|
||||
}
|
||||
return iter;
|
||||
}finally {
|
||||
lock.readLock().unlock();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
Root getRoot(){
|
||||
//expect that caller already holds lock
|
||||
try {
|
||||
return db.fetch(rootRecid,ROOT_SERIALIZER);
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
public int size() {
|
||||
lock.readLock().lock();
|
||||
try{
|
||||
return (int) getRoot().size;
|
||||
}finally {
|
||||
lock.readLock().unlock();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
public Iterator<E> descendingIterator() {
|
||||
return null; //To change body of implemented methods use File | Settings | File Templates.
|
||||
}
|
||||
|
||||
public boolean add(Object value) {
|
||||
lock.writeLock().lock();
|
||||
try {
|
||||
Root r = getRoot();
|
||||
Entry e = new Entry(r.last, 0, value);
|
||||
long recid = db.insert(e, entrySerializer,false);
|
||||
|
||||
//update old last Entry to point to new record
|
||||
if (r.last != 0) {
|
||||
Entry oldLast = db.fetch(r.last, entrySerializer);
|
||||
if (oldLast.next != 0) throw new Error();
|
||||
oldLast.next = recid;
|
||||
db.update(r.last, oldLast, entrySerializer);
|
||||
}
|
||||
|
||||
//update linked list
|
||||
r.last = recid;
|
||||
if (r.first == 0) r.first = recid;
|
||||
r.size++;
|
||||
db.update(rootRecid, r, ROOT_SERIALIZER);
|
||||
modCount++;
|
||||
return true;
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}finally {
|
||||
lock.writeLock().unlock();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
private Entry<E> fetch(long recid) {
|
||||
lock.readLock().lock();
|
||||
try {
|
||||
return db.fetch(recid, entrySerializer);
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}finally {
|
||||
lock.readLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* called from Serialization object
|
||||
*/
|
||||
static LinkedList2 deserialize(DataInput is, Serialization ser) throws IOException, ClassNotFoundException {
|
||||
long rootrecid = LongPacker.unpackLong(is);
|
||||
long sizeLimit = LongPacker.unpackLong(is);
|
||||
if(sizeLimit!=0) throw new InternalError("LinkedList.sizeLimit not supported in this JDBM version");
|
||||
Serializer serializer = (Serializer) ser.deserialize(is);
|
||||
return new LinkedList2(ser.db,rootrecid, serializer);
|
||||
}
|
||||
|
||||
void serialize(DataOutput out) throws IOException {
|
||||
LongPacker.packLong(out, rootRecid);
|
||||
LongPacker.packLong(out, sizeLimit);
|
||||
db.defaultSerializer().serialize(out, valueSerializer);
|
||||
}
|
||||
|
||||
private final Serializer<Entry> entrySerializer = new Serializer<Entry>() {
|
||||
|
||||
public void serialize(DataOutput out, Entry e) throws IOException {
|
||||
LongPacker.packLong(out, e.prev);
|
||||
LongPacker.packLong(out, e.next);
|
||||
if (valueSerializer != null)
|
||||
valueSerializer.serialize(out, (E) e.value);
|
||||
else
|
||||
db.defaultSerializer().serialize(out, e.value);
|
||||
}
|
||||
|
||||
public Entry<E> deserialize(DataInput in) throws IOException, ClassNotFoundException {
|
||||
long prev = LongPacker.unpackLong(in);
|
||||
long next = LongPacker.unpackLong(in);
|
||||
Object value = null;
|
||||
if (loadValues)
|
||||
value = valueSerializer == null ? db.defaultSerializer().deserialize(in) : valueSerializer.deserialize(in);
|
||||
return new LinkedList2.Entry(prev, next, value);
|
||||
}
|
||||
};
|
||||
|
||||
static class Entry<E> {
|
||||
long prev = 0;
|
||||
long next = 0;
|
||||
|
||||
E value;
|
||||
|
||||
public Entry(long prev, long next, E value) {
|
||||
this.prev = prev;
|
||||
this.next = next;
|
||||
this.value = value;
|
||||
}
|
||||
}
|
||||
|
||||
private final class Iter implements ListIterator<E> {
|
||||
|
||||
private int expectedModCount = modCount;
|
||||
private int index = 0;
|
||||
|
||||
private long prev = 0;
|
||||
private long next = 0;
|
||||
|
||||
private byte lastOper = 0;
|
||||
|
||||
public boolean hasNext() {
|
||||
return next != 0;
|
||||
}
|
||||
|
||||
|
||||
public E next() {
|
||||
if (next == 0) throw new NoSuchElementException();
|
||||
checkForComodification();
|
||||
|
||||
Entry<E> e = fetch(next);
|
||||
|
||||
prev = next;
|
||||
next = e.next;
|
||||
index++;
|
||||
lastOper = +1;
|
||||
return e.value;
|
||||
}
|
||||
|
||||
public boolean hasPrevious() {
|
||||
return prev != 0;
|
||||
}
|
||||
|
||||
public E previous() {
|
||||
checkForComodification();
|
||||
Entry<E> e = fetch(prev);
|
||||
next = prev;
|
||||
prev = e.prev;
|
||||
index--;
|
||||
lastOper = -1;
|
||||
return e.value;
|
||||
}
|
||||
|
||||
public int nextIndex() {
|
||||
return index;
|
||||
}
|
||||
|
||||
public int previousIndex() {
|
||||
return index - 1;
|
||||
}
|
||||
|
||||
public void remove() {
|
||||
checkForComodification();
|
||||
lock.writeLock().lock();
|
||||
try {
|
||||
if (lastOper == 1) {
|
||||
//last operation was next() so remove previous element
|
||||
lastOper = 0;
|
||||
|
||||
Entry<E> p = db.fetch(prev, entrySerializer);
|
||||
//update entry before previous
|
||||
if (p.prev != 0) {
|
||||
Entry<E> pp = db.fetch(p.prev, entrySerializer);
|
||||
pp.next = p.next;
|
||||
db.update(p.prev, pp, entrySerializer);
|
||||
}
|
||||
//update entry after next
|
||||
if (p.next != 0) {
|
||||
Entry<E> pn = db.fetch(p.next, entrySerializer);
|
||||
pn.prev = p.prev;
|
||||
db.update(p.next, pn, entrySerializer);
|
||||
}
|
||||
//remove old record from db
|
||||
db.delete(prev);
|
||||
//update list
|
||||
Root r = getRoot();
|
||||
if (r.first == prev)
|
||||
r.first = next;
|
||||
if (r.last == prev)
|
||||
r.last = next;
|
||||
r.size--;
|
||||
db.update(rootRecid, r,ROOT_SERIALIZER);
|
||||
modCount++;
|
||||
expectedModCount++;
|
||||
//update iterator
|
||||
prev = p.prev;
|
||||
|
||||
} else if (lastOper == -1) {
|
||||
//last operation was prev() so remove next element
|
||||
lastOper = 0;
|
||||
|
||||
Entry<E> n = db.fetch(next, entrySerializer);
|
||||
//update entry before next
|
||||
if (n.prev != 0) {
|
||||
Entry<E> pp = db.fetch(n.prev, entrySerializer);
|
||||
pp.next = n.next;
|
||||
db.update(n.prev, pp, entrySerializer);
|
||||
}
|
||||
//update entry after previous
|
||||
if (n.next != 0) {
|
||||
Entry<E> pn = db.fetch(n.next, entrySerializer);
|
||||
pn.prev = n.prev;
|
||||
db.update(n.next, pn, entrySerializer);
|
||||
}
|
||||
//remove old record from db
|
||||
db.delete(next);
|
||||
//update list
|
||||
Root r = getRoot();
|
||||
if (r.last == next)
|
||||
r.last = prev;
|
||||
if (r.first == next)
|
||||
r.first = prev;
|
||||
r.size--;
|
||||
db.update(rootRecid, r,ROOT_SERIALIZER);
|
||||
modCount++;
|
||||
expectedModCount++;
|
||||
//update iterator
|
||||
next = n.next;
|
||||
|
||||
} else
|
||||
throw new IllegalStateException();
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}finally {
|
||||
lock.writeLock().unlock();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void set(E value) {
|
||||
checkForComodification();
|
||||
lock.writeLock().lock();
|
||||
try {
|
||||
if (lastOper == 1) {
|
||||
//last operation was next(), so update previous item
|
||||
lastOper = 0;
|
||||
Entry<E> n = db.fetch(prev, entrySerializer);
|
||||
n.value = value;
|
||||
db.update(prev, n, entrySerializer);
|
||||
} else if (lastOper == -1) {
|
||||
//last operation was prev() so update next item
|
||||
lastOper = 0;
|
||||
Entry<E> n = db.fetch(next, entrySerializer);
|
||||
n.value = value;
|
||||
db.update(next, n, entrySerializer);
|
||||
} else
|
||||
throw new IllegalStateException();
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}finally {
|
||||
lock.writeLock().unlock();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void add(E value) {
|
||||
checkForComodification();
|
||||
|
||||
//use more efficient method if possible
|
||||
if (next == 0) {
|
||||
LinkedList2.this.add(value);
|
||||
expectedModCount++;
|
||||
return;
|
||||
}
|
||||
lock.writeLock().lock();
|
||||
try {
|
||||
//insert new entry
|
||||
Entry<E> e = new Entry<E>(prev, next, value);
|
||||
long recid = db.insert(e, entrySerializer,false);
|
||||
|
||||
//update previous entry
|
||||
if (prev != 0) {
|
||||
Entry<E> p = db.fetch(prev, entrySerializer);
|
||||
if (p.next != next) throw new Error();
|
||||
p.next = recid;
|
||||
db.update(prev, p, entrySerializer);
|
||||
}
|
||||
|
||||
//update next entry
|
||||
Entry<E> n = fetch(next);
|
||||
if (n.prev != prev) throw new Error();
|
||||
n.prev = recid;
|
||||
db.update(next, n, entrySerializer);
|
||||
|
||||
//update List
|
||||
Root r = getRoot();
|
||||
r.size++;
|
||||
db.update(rootRecid, r, ROOT_SERIALIZER);
|
||||
|
||||
//update iterator
|
||||
expectedModCount++;
|
||||
modCount++;
|
||||
prev = recid;
|
||||
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e);
|
||||
}finally {
|
||||
lock.writeLock().unlock();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
final void checkForComodification() {
|
||||
if (modCount != expectedModCount)
|
||||
throw new ConcurrentModificationException();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Copyes collection from one db to other, while keeping logical recids unchanged
|
||||
*/
|
||||
static void defrag(long recid, DBStore r1, DBStore r2) throws IOException {
|
||||
try {
|
||||
//move linked list itself
|
||||
byte[] data = r1.fetchRaw(recid);
|
||||
r2.forceInsert(recid, data);
|
||||
DataInputOutput in = new DataInputOutput();
|
||||
in.reset(data);
|
||||
LinkedList2 l = (LinkedList2) r1.defaultSerializer().deserialize(in);
|
||||
l.loadValues = false;
|
||||
//move linkedlist root
|
||||
if(l.rootRecid == 0) //empty list, done
|
||||
return;
|
||||
|
||||
data = r1.fetchRaw(l.rootRecid);
|
||||
r2.forceInsert(l.rootRecid, data);
|
||||
in.reset(data);
|
||||
Root r = ROOT_SERIALIZER.deserialize(in);
|
||||
//move all other nodes in linked list
|
||||
long current = r.first;
|
||||
while (current != 0) {
|
||||
data = r1.fetchRaw(current);
|
||||
in.reset(data);
|
||||
r2.forceInsert(current, data);
|
||||
|
||||
Entry e = (Entry) l.entrySerializer.deserialize(in);
|
||||
current = e.next;
|
||||
}
|
||||
} catch (ClassNotFoundException e) {
|
||||
throw new IOError(e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,239 @@
|
|||
/*******************************************************************************
|
||||
* Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
******************************************************************************/
|
||||
|
||||
package org.apache.jdbm;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
* This class manages the linked lists of logical rowid pages.
|
||||
*/
|
||||
final class LogicalRowIdManager {
|
||||
// our record file and associated page manager
|
||||
private final PageFile file;
|
||||
private final PageManager pageman;
|
||||
static final short ELEMS_PER_PAGE = (short) ((Storage.PAGE_SIZE - Magic.PAGE_HEADER_SIZE) / Magic.PhysicalRowId_SIZE);
|
||||
|
||||
private long[] freeRecordsInTransRowid = new long[4];
|
||||
private int freeRecordsInTransSize = 0;
|
||||
|
||||
|
||||
/** number of free logical rowids on logical free page, is SHORT*/
|
||||
static final int OFFSET_FREE_COUNT = Magic.PAGE_HEADER_SIZE;
|
||||
static final int FREE_HEADER_SIZE = Magic.PAGE_HEADER_SIZE + Magic.SZ_SHORT;
|
||||
/** maximal number of free logical per page */
|
||||
static final int FREE_RECORDS_PER_PAGE = (Storage.PAGE_SIZE -FREE_HEADER_SIZE)/6;
|
||||
|
||||
|
||||
/**
|
||||
* Creates a log rowid manager using the indicated record file and page manager
|
||||
*/
|
||||
LogicalRowIdManager(PageFile file, PageManager pageman) throws IOException {
|
||||
this.file = file;
|
||||
this.pageman = pageman;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new logical rowid pointing to the indicated physical id
|
||||
*
|
||||
* @param physloc physical location to point to
|
||||
* @return logical recid
|
||||
*/
|
||||
long insert(final long physloc) throws IOException {
|
||||
// check whether there's a free rowid to reuse
|
||||
long retval = getFreeSlot();
|
||||
if (retval == 0) {
|
||||
// no. This means that we bootstrap things by allocating
|
||||
// a new translation page and freeing all the rowids on it.
|
||||
long firstPage = pageman.allocate(Magic.TRANSLATION_PAGE);
|
||||
short curOffset = Magic.PAGE_HEADER_SIZE;
|
||||
for (int i = 0; i < ELEMS_PER_PAGE; i++) {
|
||||
putFreeSlot(((-firstPage) << Storage.PAGE_SIZE_SHIFT) + (long) curOffset);
|
||||
|
||||
curOffset += Magic.PhysicalRowId_SIZE;
|
||||
}
|
||||
|
||||
retval = getFreeSlot();
|
||||
if (retval == 0) {
|
||||
throw new Error("couldn't obtain free translation");
|
||||
}
|
||||
}
|
||||
// write the translation.
|
||||
update(retval, physloc);
|
||||
return retval;
|
||||
}
|
||||
|
||||
/**
|
||||
* Insert at forced location, use only for defragmentation !!
|
||||
*
|
||||
* @param logicalRowId
|
||||
* @param physLoc
|
||||
* @throws IOException
|
||||
*/
|
||||
void forceInsert(final long logicalRowId, final long physLoc) throws IOException {
|
||||
if (fetch(logicalRowId) != 0)
|
||||
throw new Error("can not forceInsert, record already exists: " + logicalRowId);
|
||||
|
||||
update(logicalRowId, physLoc);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Releases the indicated logical rowid.
|
||||
*/
|
||||
void delete(final long logicalrowid) throws IOException {
|
||||
//zero out old location, is needed for defragmentation
|
||||
final long pageId = -(logicalrowid>>> Storage.PAGE_SIZE_SHIFT);
|
||||
final PageIo xlatPage = file.get(pageId);
|
||||
xlatPage.pageHeaderSetLocation((short) (logicalrowid & Storage.OFFSET_MASK), 0);
|
||||
file.release(pageId, true);
|
||||
putFreeSlot(logicalrowid);
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the mapping
|
||||
*
|
||||
* @param logicalrowid The logical rowid
|
||||
* @param physloc The physical rowid
|
||||
*/
|
||||
void update(final long logicalrowid, final long physloc) throws IOException {
|
||||
|
||||
final long pageId = -(logicalrowid>>> Storage.PAGE_SIZE_SHIFT);
|
||||
final PageIo xlatPage = file.get(pageId);
|
||||
xlatPage.pageHeaderSetLocation((short) (logicalrowid & Storage.OFFSET_MASK), physloc);
|
||||
file.release(pageId, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a mapping
|
||||
*
|
||||
* @param logicalrowid The logical rowid
|
||||
* @return The physical rowid, 0 if does not exist
|
||||
*/
|
||||
long fetch(long logicalrowid) throws IOException {
|
||||
final long pageId = -(logicalrowid>>> Storage.PAGE_SIZE_SHIFT);
|
||||
final long last = pageman.getLast(Magic.TRANSLATION_PAGE);
|
||||
if (last - 1 > pageId)
|
||||
return 0;
|
||||
|
||||
final short offset = (short) (logicalrowid & Storage.OFFSET_MASK);
|
||||
|
||||
final PageIo xlatPage = file.get(pageId);
|
||||
final long ret = xlatPage.pageHeaderGetLocation(offset);
|
||||
|
||||
|
||||
file.release(pageId, false);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void commit() throws IOException {
|
||||
if(freeRecordsInTransSize==0) return;
|
||||
|
||||
long freeRecPageId = pageman.getLast(Magic.FREELOGIDS_PAGE);
|
||||
if(freeRecPageId == 0){
|
||||
//allocate new
|
||||
freeRecPageId = pageman.allocate(Magic.FREELOGIDS_PAGE);
|
||||
}
|
||||
PageIo freeRecPage = file.get(freeRecPageId);
|
||||
//write all uncommited free records
|
||||
for(int rowPos = 0;rowPos<freeRecordsInTransSize;rowPos++){
|
||||
short count = freeRecPage.readShort(OFFSET_FREE_COUNT);
|
||||
if(count == FREE_RECORDS_PER_PAGE){
|
||||
//allocate new free recid page
|
||||
file.release(freeRecPage);
|
||||
freeRecPageId = pageman.allocate(Magic.FREELOGIDS_PAGE);
|
||||
freeRecPage = file.get(freeRecPageId);
|
||||
freeRecPage.writeShort(FREE_RECORDS_PER_PAGE, (short)0);
|
||||
count = 0;
|
||||
}
|
||||
final int offset = (count ) *6 + FREE_HEADER_SIZE;
|
||||
//write free recid and increase counter
|
||||
freeRecPage.writeSixByteLong(offset,freeRecordsInTransRowid[rowPos]);
|
||||
count++;
|
||||
freeRecPage.writeShort(OFFSET_FREE_COUNT, count);
|
||||
|
||||
}
|
||||
file.release(freeRecPage);
|
||||
|
||||
clearFreeRecidsInTransaction();
|
||||
}
|
||||
|
||||
private void clearFreeRecidsInTransaction() {
|
||||
if(freeRecordsInTransRowid.length>128)
|
||||
freeRecordsInTransRowid = new long[4];
|
||||
freeRecordsInTransSize = 0;
|
||||
}
|
||||
|
||||
void rollback() throws IOException {
|
||||
clearFreeRecidsInTransaction();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns a free Logical rowid, or
|
||||
* 0 if nothing was found.
|
||||
*/
|
||||
long getFreeSlot() throws IOException {
|
||||
if (freeRecordsInTransSize != 0) {
|
||||
return freeRecordsInTransRowid[--freeRecordsInTransSize];
|
||||
}
|
||||
|
||||
final long logicFreePageId = pageman.getLast(Magic.FREELOGIDS_PAGE);
|
||||
if(logicFreePageId == 0) {
|
||||
return 0;
|
||||
}
|
||||
PageIo logicFreePage = file.get(logicFreePageId);
|
||||
short recCount = logicFreePage.readShort(OFFSET_FREE_COUNT);
|
||||
if(recCount <= 0){
|
||||
throw new InternalError();
|
||||
}
|
||||
|
||||
|
||||
final int offset = (recCount -1) *6 + FREE_HEADER_SIZE;
|
||||
final long ret = logicFreePage.readSixByteLong(offset);
|
||||
|
||||
recCount--;
|
||||
|
||||
if(recCount>0){
|
||||
//decrease counter and zero out old record
|
||||
logicFreePage.writeSixByteLong(offset,0);
|
||||
logicFreePage.writeShort(OFFSET_FREE_COUNT, recCount);
|
||||
file.release(logicFreePage);
|
||||
}else{
|
||||
//release this page
|
||||
file.release(logicFreePage);
|
||||
pageman.free(Magic.FREELOGIDS_PAGE,logicFreePageId);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Puts the indicated rowid on the free list
|
||||
*/
|
||||
void putFreeSlot(long rowid) throws IOException {
|
||||
//ensure capacity
|
||||
if(freeRecordsInTransSize == freeRecordsInTransRowid.length)
|
||||
freeRecordsInTransRowid = Arrays.copyOf(freeRecordsInTransRowid, freeRecordsInTransRowid.length * 4);
|
||||
//add record and increase size
|
||||
freeRecordsInTransRowid[freeRecordsInTransSize]=rowid;
|
||||
freeRecordsInTransSize++;
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,432 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.jdbm;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.NoSuchElementException;
|
||||
|
||||
/**
|
||||
* Hash Map which uses primitive long as key.
|
||||
* Main advantage is new instanceof of Long does not have to be created for each lookup.
|
||||
* <p/>
|
||||
* This code comes from Android, which in turns comes from Apache Harmony.
|
||||
* This class was modified to use primitive longs and stripped down to consume less space.
|
||||
* <p/>
|
||||
* Author of JDBM modifications: Jan Kotek
|
||||
*/
|
||||
class LongHashMap<V> implements Serializable {
|
||||
private static final long serialVersionUID = 362499999763181265L;
|
||||
|
||||
private int elementCount;
|
||||
|
||||
private Entry<V>[] elementData;
|
||||
|
||||
private final float loadFactor;
|
||||
|
||||
private int threshold;
|
||||
|
||||
private int defaultSize = 16;
|
||||
|
||||
private transient Entry<V> reuseAfterDelete = null;
|
||||
|
||||
static final class Entry<V> implements Serializable{
|
||||
private static final long serialVersionUID = 362445231113181265L;
|
||||
|
||||
Entry<V> next;
|
||||
|
||||
V value;
|
||||
|
||||
long key;
|
||||
|
||||
Entry(long theKey) {
|
||||
this.key = theKey;
|
||||
this.value = null;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
static class HashMapIterator<V> implements Iterator<V> {
|
||||
private int position = 0;
|
||||
|
||||
|
||||
boolean canRemove = false;
|
||||
|
||||
Entry<V> entry;
|
||||
|
||||
Entry<V> lastEntry;
|
||||
|
||||
final LongHashMap<V> associatedMap;
|
||||
|
||||
HashMapIterator(LongHashMap<V> hm) {
|
||||
associatedMap = hm;
|
||||
}
|
||||
|
||||
public boolean hasNext() {
|
||||
if (entry != null) {
|
||||
return true;
|
||||
}
|
||||
|
||||
Entry<V>[] elementData = associatedMap.elementData;
|
||||
int length = elementData.length;
|
||||
int newPosition = position;
|
||||
boolean result = false;
|
||||
|
||||
while (newPosition < length) {
|
||||
if (elementData[newPosition] == null) {
|
||||
newPosition++;
|
||||
} else {
|
||||
result = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
position = newPosition;
|
||||
return result;
|
||||
}
|
||||
|
||||
public V next() {
|
||||
|
||||
if (!hasNext()) {
|
||||
throw new NoSuchElementException();
|
||||
}
|
||||
|
||||
Entry<V> result;
|
||||
Entry<V> _entry = entry;
|
||||
if (_entry == null) {
|
||||
result = lastEntry = associatedMap.elementData[position++];
|
||||
entry = lastEntry.next;
|
||||
} else {
|
||||
if (lastEntry.next != _entry) {
|
||||
lastEntry = lastEntry.next;
|
||||
}
|
||||
result = _entry;
|
||||
entry = _entry.next;
|
||||
}
|
||||
canRemove = true;
|
||||
return result.value;
|
||||
}
|
||||
|
||||
public void remove() {
|
||||
if (!canRemove) {
|
||||
throw new IllegalStateException();
|
||||
}
|
||||
|
||||
canRemove = false;
|
||||
|
||||
if (lastEntry.next == entry) {
|
||||
while (associatedMap.elementData[--position] == null) {
|
||||
// Do nothing
|
||||
}
|
||||
associatedMap.elementData[position] = associatedMap.elementData[position].next;
|
||||
entry = null;
|
||||
} else {
|
||||
lastEntry.next = entry;
|
||||
}
|
||||
if (lastEntry != null) {
|
||||
Entry<V> reuse = lastEntry;
|
||||
lastEntry = null;
|
||||
reuse.key = Long.MIN_VALUE;
|
||||
reuse.value = null;
|
||||
associatedMap.reuseAfterDelete = reuse;
|
||||
}
|
||||
|
||||
associatedMap.elementCount--;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private Entry<V>[] newElementArray(int s) {
|
||||
return new Entry[s];
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new empty {@code HashMap} instance.
|
||||
*
|
||||
* @since Android 1.0
|
||||
*/
|
||||
public LongHashMap() {
|
||||
this(16);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new {@code HashMap} instance with the specified capacity.
|
||||
*
|
||||
* @param capacity the initial capacity of this hash map.
|
||||
* @throws IllegalArgumentException when the capacity is less than zero.
|
||||
* @since Android 1.0
|
||||
*/
|
||||
public LongHashMap(int capacity) {
|
||||
defaultSize = capacity;
|
||||
if (capacity >= 0) {
|
||||
elementCount = 0;
|
||||
elementData = newElementArray(capacity == 0 ? 1 : capacity);
|
||||
loadFactor = 0.75f; // Default load factor of 0.75
|
||||
computeMaxSize();
|
||||
} else {
|
||||
throw new IllegalArgumentException();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// BEGIN android-changed
|
||||
|
||||
/**
|
||||
* Removes all mappings from this hash map, leaving it empty.
|
||||
*
|
||||
* @see #isEmpty
|
||||
* @see #size
|
||||
* @since Android 1.0
|
||||
*/
|
||||
|
||||
public void clear() {
|
||||
if (elementCount > 0) {
|
||||
elementCount = 0;
|
||||
}
|
||||
if(elementData.length>1024 && elementData.length>defaultSize)
|
||||
elementData = new Entry[defaultSize];
|
||||
else
|
||||
Arrays.fill(elementData, null);
|
||||
computeMaxSize();
|
||||
}
|
||||
// END android-changed
|
||||
|
||||
/**
|
||||
* Returns a shallow copy of this map.
|
||||
*
|
||||
* @return a shallow copy of this map.
|
||||
* @since Android 1.0
|
||||
*/
|
||||
|
||||
|
||||
private void computeMaxSize() {
|
||||
threshold = (int) (elementData.length * loadFactor);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns the value of the mapping with the specified key.
|
||||
*
|
||||
* @param key the key.
|
||||
* @return the value of the mapping with the specified key, or {@code null}
|
||||
* if no mapping for the specified key is found.
|
||||
* @since Android 1.0
|
||||
*/
|
||||
|
||||
public V get(final long key) {
|
||||
|
||||
final int hash = powerHash(key);
|
||||
final int index = (hash & 0x7FFFFFFF) % elementData.length;
|
||||
|
||||
//find non null entry
|
||||
Entry<V> m = elementData[index];
|
||||
while (m != null) {
|
||||
if (key == m.key)
|
||||
return m.value;
|
||||
m = m.next;
|
||||
}
|
||||
|
||||
return null;
|
||||
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns whether this map is empty.
|
||||
*
|
||||
* @return {@code true} if this map has no elements, {@code false}
|
||||
* otherwise.
|
||||
* @see #size()
|
||||
* @since Android 1.0
|
||||
*/
|
||||
|
||||
public boolean isEmpty() {
|
||||
return elementCount == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return iterator over keys
|
||||
*/
|
||||
|
||||
// public Iterator<K> keyIterator(){
|
||||
// return new HashMapIterator<K, K, V>(
|
||||
// new MapEntry.Type<K, K, V>() {
|
||||
// public K get(Entry<K, V> entry) {
|
||||
// return entry.key;
|
||||
// }
|
||||
// }, HashMap.this);
|
||||
//
|
||||
// }
|
||||
|
||||
/**
|
||||
* Maps the specified key to the specified value.
|
||||
*
|
||||
* @param key the key.
|
||||
* @param value the value.
|
||||
* @return the value of any previous mapping with the specified key or
|
||||
* {@code null} if there was no such mapping.
|
||||
* @since Android 1.0
|
||||
*/
|
||||
|
||||
|
||||
public V put(final long key, final V value) {
|
||||
|
||||
int hash = powerHash(key);
|
||||
int index = (hash & 0x7FFFFFFF) % elementData.length;
|
||||
|
||||
//find non null entry
|
||||
Entry<V> entry = elementData[index];
|
||||
while (entry != null && key != entry.key) {
|
||||
entry = entry.next;
|
||||
}
|
||||
|
||||
if (entry == null) {
|
||||
if (++elementCount > threshold) {
|
||||
rehash();
|
||||
index = (hash & 0x7FFFFFFF) % elementData.length;
|
||||
}
|
||||
entry = createHashedEntry(key, index);
|
||||
}
|
||||
|
||||
|
||||
V result = entry.value;
|
||||
entry.value = value;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
Entry<V> createHashedEntry(final long key, final int index) {
|
||||
Entry<V> entry = reuseAfterDelete;
|
||||
if (entry == null) {
|
||||
entry = new Entry<V>(key);
|
||||
} else {
|
||||
reuseAfterDelete = null;
|
||||
entry.key = key;
|
||||
entry.value = null;
|
||||
}
|
||||
|
||||
entry.next = elementData[index];
|
||||
elementData[index] = entry;
|
||||
return entry;
|
||||
}
|
||||
|
||||
|
||||
void rehash(final int capacity) {
|
||||
int length = (capacity == 0 ? 1 : capacity << 1);
|
||||
|
||||
Entry<V>[] newData = newElementArray(length);
|
||||
for (int i = 0; i < elementData.length; i++) {
|
||||
Entry<V> entry = elementData[i];
|
||||
while (entry != null) {
|
||||
int index = ((int) powerHash(entry.key) & 0x7FFFFFFF) % length;
|
||||
Entry<V> next = entry.next;
|
||||
entry.next = newData[index];
|
||||
newData[index] = entry;
|
||||
entry = next;
|
||||
}
|
||||
}
|
||||
elementData = newData;
|
||||
computeMaxSize();
|
||||
}
|
||||
|
||||
void rehash() {
|
||||
rehash(elementData.length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes the mapping with the specified key from this map.
|
||||
*
|
||||
* @param key the key of the mapping to remove.
|
||||
* @return the value of the removed mapping or {@code null} if no mapping
|
||||
* for the specified key was found.
|
||||
* @since Android 1.0
|
||||
*/
|
||||
|
||||
public V remove(final long key) {
|
||||
Entry<V> entry = removeEntry(key);
|
||||
if (entry == null)
|
||||
return null;
|
||||
V ret = entry.value;
|
||||
entry.value = null;
|
||||
entry.key = Long.MIN_VALUE;
|
||||
reuseAfterDelete = entry;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
Entry<V> removeEntry(final long key) {
|
||||
Entry<V> last = null;
|
||||
|
||||
final int hash = powerHash(key);
|
||||
final int index = (hash & 0x7FFFFFFF) % elementData.length;
|
||||
Entry<V> entry = elementData[index];
|
||||
|
||||
while (true) {
|
||||
if (entry == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (key == entry.key) {
|
||||
if (last == null) {
|
||||
elementData[index] = entry.next;
|
||||
} else {
|
||||
last.next = entry.next;
|
||||
}
|
||||
elementCount--;
|
||||
return entry;
|
||||
}
|
||||
|
||||
last = entry;
|
||||
entry = entry.next;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of elements in this map.
|
||||
*
|
||||
* @return the number of elements in this map.
|
||||
* @since Android 1.0
|
||||
*/
|
||||
|
||||
public int size() {
|
||||
return elementCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* @returns iterator over values in map
|
||||
*/
|
||||
public Iterator<V> valuesIterator() {
|
||||
return new HashMapIterator<V>(this);
|
||||
|
||||
}
|
||||
|
||||
static final private int powerHash(final long key){
|
||||
int h = (int)(key ^ (key >>> 32));
|
||||
h ^= (h >>> 20) ^ (h >>> 12);
|
||||
return h ^ (h >>> 7) ^ (h >>> 4);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,106 @@
|
|||
/*
|
||||
Copyright (c) 2008, Nathan Sweet
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
* Neither the name of Esoteric Software nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.apache.jdbm;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Packing utility for non-negative <code>long</code> and values.
|
||||
* <p/>
|
||||
* Originally developed for Kryo by Nathan Sweet.
|
||||
* Modified for JDBM by Jan Kotek
|
||||
*/
|
||||
public final class LongPacker {
|
||||
|
||||
|
||||
/**
|
||||
* Pack non-negative long into output stream.
|
||||
* It will occupy 1-10 bytes depending on value (lower values occupy smaller space)
|
||||
*
|
||||
* @param os
|
||||
* @param value
|
||||
* @throws IOException
|
||||
*/
|
||||
static public void packLong(DataOutput os, long value) throws IOException {
|
||||
|
||||
if (value < 0) {
|
||||
throw new IllegalArgumentException("negative value: v=" + value);
|
||||
}
|
||||
|
||||
while ((value & ~0x7FL) != 0) {
|
||||
os.write((((int) value & 0x7F) | 0x80));
|
||||
value >>>= 7;
|
||||
}
|
||||
os.write((byte) value);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Unpack positive long value from the input stream.
|
||||
*
|
||||
* @param is The input stream.
|
||||
* @return The long value.
|
||||
* @throws java.io.IOException
|
||||
*/
|
||||
static public long unpackLong(DataInput is) throws IOException {
|
||||
|
||||
long result = 0;
|
||||
for (int offset = 0; offset < 64; offset += 7) {
|
||||
long b = is.readUnsignedByte();
|
||||
result |= (b & 0x7F) << offset;
|
||||
if ((b & 0x80) == 0) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
throw new Error("Malformed long.");
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Pack non-negative long into output stream.
|
||||
* It will occupy 1-5 bytes depending on value (lower values occupy smaller space)
|
||||
*
|
||||
* @param os
|
||||
* @param value
|
||||
* @throws IOException
|
||||
*/
|
||||
|
||||
static public void packInt(DataOutput os, int value) throws IOException {
|
||||
|
||||
if (value < 0) {
|
||||
throw new IllegalArgumentException("negative value: v=" + value);
|
||||
}
|
||||
|
||||
while ((value & ~0x7F) != 0) {
|
||||
os.write(((value & 0x7F) | 0x80));
|
||||
value >>>= 7;
|
||||
}
|
||||
|
||||
os.write((byte) value);
|
||||
}
|
||||
|
||||
static public int unpackInt(DataInput is) throws IOException {
|
||||
for (int offset = 0, result = 0; offset < 32; offset += 7) {
|
||||
int b = is.readUnsignedByte();
|
||||
result |= (b & 0x7F) << offset;
|
||||
if ((b & 0x80) == 0) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
throw new Error("Malformed integer.");
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,105 @@
|
|||
/*******************************************************************************
|
||||
* Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
******************************************************************************/
|
||||
|
||||
package org.apache.jdbm;
|
||||
|
||||
/**
|
||||
* This interface contains magic cookies.
|
||||
*/
|
||||
interface Magic {
|
||||
/**
|
||||
* Magic cookie at start of file
|
||||
*/
|
||||
short FILE_HEADER = 0x1350;
|
||||
|
||||
/**
|
||||
* Magic for pages. They're offset by the page type magic codes.
|
||||
*/
|
||||
short PAGE_MAGIC = 0x1351;
|
||||
|
||||
/**
|
||||
* Magics for pages in certain lists.
|
||||
*/
|
||||
short FREE_PAGE = 0;
|
||||
short USED_PAGE = 1;
|
||||
short TRANSLATION_PAGE = 2;
|
||||
short FREELOGIDS_PAGE = 3;
|
||||
short FREEPHYSIDS_PAGE = 4;
|
||||
short FREEPHYSIDS_ROOT_PAGE = 5;
|
||||
|
||||
/**
|
||||
* Number of lists in a file
|
||||
*/
|
||||
short NLISTS = 6;
|
||||
|
||||
/**
|
||||
* Magic for transaction file
|
||||
*/
|
||||
short LOGFILE_HEADER = 0x1360;
|
||||
|
||||
/**
|
||||
* Size of an externalized byte
|
||||
*/
|
||||
short SZ_BYTE = 1;
|
||||
/**
|
||||
* Size of an externalized short
|
||||
*/
|
||||
short SZ_SHORT = 2;
|
||||
|
||||
/**
|
||||
* Size of an externalized int
|
||||
*/
|
||||
short SZ_INT = 4;
|
||||
/**
|
||||
* Size of an externalized long
|
||||
*/
|
||||
short SZ_LONG = 8;
|
||||
|
||||
/**
|
||||
* size of three byte integer
|
||||
*/
|
||||
short SZ_SIX_BYTE_LONG = 6;
|
||||
|
||||
|
||||
/**offsets in file header (zero page in file)*/
|
||||
short FILE_HEADER_O_MAGIC = 0; // short magic
|
||||
short FILE_HEADER_O_LISTS = Magic.SZ_SHORT; // long[2*NLISTS]
|
||||
int FILE_HEADER_O_ROOTS = FILE_HEADER_O_LISTS + (Magic.NLISTS * 2 * Magic.SZ_LONG);
|
||||
/**
|
||||
* The number of "root" rowids available in the file.
|
||||
*/
|
||||
int FILE_HEADER_NROOTS = 16;
|
||||
|
||||
|
||||
short PAGE_HEADER_O_MAGIC = 0; // short magic
|
||||
short PAGE_HEADER_O_NEXT = Magic.SZ_SHORT;
|
||||
short PAGE_HEADER_O_PREV = PAGE_HEADER_O_NEXT + Magic.SZ_SIX_BYTE_LONG;
|
||||
short PAGE_HEADER_SIZE = PAGE_HEADER_O_PREV + Magic.SZ_SIX_BYTE_LONG;
|
||||
|
||||
short PhysicalRowId_O_LOCATION = 0; // long page
|
||||
// short PhysicalRowId_O_OFFSET = Magic.SZ_SIX_BYTE_LONG; // short offset
|
||||
int PhysicalRowId_SIZE = Magic.SZ_SIX_BYTE_LONG;
|
||||
|
||||
short DATA_PAGE_O_FIRST = PAGE_HEADER_SIZE; // short firstrowid
|
||||
short DATA_PAGE_O_DATA = (short) (DATA_PAGE_O_FIRST + Magic.SZ_SHORT);
|
||||
short DATA_PER_PAGE = (short) (Storage.PAGE_SIZE - DATA_PAGE_O_DATA);
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
package org.apache.jdbm;
|
||||
|
||||
import java.io.DataInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.ObjectInput;
|
||||
import java.util.ArrayList;
|
||||
|
||||
/**
|
||||
* An alternative to <code>java.io.ObjectInputStream</code> which uses more efficient serialization
|
||||
*/
|
||||
public class ObjectInputStream2 extends DataInputStream implements ObjectInput {
|
||||
|
||||
|
||||
public ObjectInputStream2(InputStream in) {
|
||||
super(in);
|
||||
}
|
||||
|
||||
public Object readObject() throws ClassNotFoundException, IOException {
|
||||
//first read class data
|
||||
ArrayList<SerialClassInfo.ClassInfo> info = SerialClassInfo.serializer.deserialize(this);
|
||||
|
||||
Serialization ser = new Serialization(null,0,info);
|
||||
return ser.deserialize(this);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
package org.apache.jdbm;
|
||||
|
||||
import java.io.*;
|
||||
import java.util.ArrayList;
|
||||
|
||||
/**
|
||||
* An alternative to <code>java.io.ObjectOutputStream</code> which uses more efficient serialization
|
||||
*/
|
||||
public class ObjectOutputStream2 extends DataOutputStream implements ObjectOutput {
|
||||
|
||||
public ObjectOutputStream2(OutputStream out) {
|
||||
super(out);
|
||||
}
|
||||
|
||||
public void writeObject(Object obj) throws IOException {
|
||||
ArrayList registered = new ArrayList();
|
||||
Serialization ser = new Serialization(null,0,registered);
|
||||
|
||||
byte[] data = ser.serialize(obj);
|
||||
//write class info first
|
||||
SerialClassInfo.serializer.serialize(this, registered);
|
||||
//and write data
|
||||
write(data);
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue