Compare commits

..

2 Commits

Author SHA1 Message Date
20c3994507 Initial plugin version 2018-08-27 23:11:59 +02:00
73c257af03 Added UI support for real value signals 2018-08-27 16:27:11 +02:00
502 changed files with 19112 additions and 413757 deletions

View File

@ -1,73 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>com.minres.scviewer.parent</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.eclipse.m2e.core.maven2Builder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.eclipse.m2e.core.maven2Nature</nature>
</natures>
<filteredResources>
<filter>
<id>0</id>
<name></name>
<type>10</type>
<matcher>
<id>org.eclipse.ui.ide.multiFilter</id>
<arguments>1.0-name-matches-false-false-features</arguments>
</matcher>
</filter>
<filter>
<id>0</id>
<name></name>
<type>10</type>
<matcher>
<id>org.eclipse.ui.ide.multiFilter</id>
<arguments>1.0-name-matches-false-false-plugins</arguments>
</matcher>
</filter>
<filter>
<id>0</id>
<name></name>
<type>10</type>
<matcher>
<id>org.eclipse.ui.ide.multiFilter</id>
<arguments>1.0-name-matches-false-false-com.minres.*</arguments>
</matcher>
</filter>
<filter>
<id>0</id>
<name></name>
<type>10</type>
<matcher>
<id>org.eclipse.ui.ide.multiFilter</id>
<arguments>1.0-name-matches-false-false-product</arguments>
</matcher>
</filter>
<filter>
<id>0</id>
<name></name>
<type>10</type>
<matcher>
<id>org.eclipse.ui.ide.multiFilter</id>
<arguments>1.0-name-matches-false-false-tests</arguments>
</matcher>
</filter>
<filter>
<id>0</id>
<name></name>
<type>10</type>
<matcher>
<id>org.eclipse.ui.ide.multiFilter</id>
<arguments>1.0-name-matches-false-false-p2repositories</arguments>
</matcher>
</filter>
</filteredResources>
</projectDescription>

View File

@ -6,18 +6,30 @@ created by the SystemC VCD trace implementation and the SystemC Verification Lib
For further description of the SCV please refer to
http://www.accellera.org/activities/committees/systemc-verification.
> If you encounter issue when running on Linux please try running as `SWT_GTK3=0 scviewer` as there exist issues wiht GTK3.
The viewer has the following features
- support of VCD files (compressed and uncompressed)
- real numbers
- showing vectors and real numbers as analog (step-wise & continuous)
- various value representations of bit vectors
- support of SCV transaction recordings in various formats
- text log files (compressed and uncompressed)
- sqlite based
- visualization of transaction relations
The viewer is in early alpha stage and not yet ready for production use!
The plugins are structured as follows:
- com.minres.scviewer.database
the interface defining the API to access the database and the implementation for VCD
- com.minres.scviewer.database.text
an implementation of the API to read the text files generated by the SCV
sc_tr_text database
- com.minres.scviewer.database.sqlite
an implementation of the API to read the files generated by implementation in the
sc_tr_sqlite project using a SQLite based database
- com.minres.scviewer.database.test
a some JUnit tests of the 3 back ends
- com.minres.scviewer.ui
the viewer it self to diplay the transactions and associated views like the
outline of the DB and the properties of the transaction
- com.minres.scviewer.feature
the feature combining the plugins above into a somhow usable form
- scv_tr_sqlite
a C++ project containing the SQLite based SCV database implementation and the scv4tlm
socket implementations.
A simple example (scv_tr_recording_example.cpp) for testig purposes of the database is
provided.
To build the plugins the Eclipse SDK or PDE can be used. In both cases the Groovy
eclipse plugin (http://groovy.codehaus.org/Eclipse+Plugin or Market) has to be
installed.
@ -27,5 +39,4 @@ TODO
- add more tests
- move to feature based product to allow automatic updates
- improve graphics
- catch-up e3 plugin to functionality of e4 product
- add calculated traces
- catch-up e3 plugin to functionality of e4 product

View File

@ -1,7 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?>
<classpath>
<classpathentry exported="true" kind="lib" path="json-20180813.jar" sourcepath="json-20180813-sources.jar"/>
<classpathentry exported="true" kind="lib" path="leveldb-0.11-SNAPSHOT-uber.jar" sourcepath="leveldb-0.11-SNAPSHOT-sources.jar"/>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8"/>
<classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>
<classpathentry kind="src" path="src"/>

View File

@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>com.minres.scviewer.database.leveldb</name>
<name>com.minres.scviewer.database.binary</name>
<comment></comment>
<projects>
</projects>

View File

@ -0,0 +1,14 @@
Manifest-Version: 1.0
Bundle-ManifestVersion: 2
Bundle-Name: Binary Database
Bundle-SymbolicName: com.minres.scviewer.database.binary
Bundle-Version: 1.0.0.qualifier
Bundle-Vendor: MINRES Technologies GmbH
Bundle-RequiredExecutionEnvironment: JavaSE-1.8
Import-Package: org.eclipse.osgi.framework.console;version="1.0.0",
org.osgi.framework;version="1.3.0",
org.osgi.service.component.annotations;version="1.2.0";resolution:="optional",
org.osgi.util.tracker;version="1.3.1"
Automatic-Module-Name: com.minres.scviewer.database.binary
Service-Component: OSGI-INF/*.xml
Require-Bundle: com.minres.scviewer.database

View File

@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<scr:component xmlns:scr="http://www.osgi.org/xmlns/scr/v1.1.0" name="LevelDbLoader">
<implementation class="com.minres.scviewer.database.leveldb.LevelDBLoader"/>
<scr:component xmlns:scr="http://www.osgi.org/xmlns/scr/v1.1.0" name="BinaryDbLoader">
<implementation class="com.minres.scviewer.database.binary.BinaryDbLoader"/>
<service>
<provide interface="com.minres.scviewer.database.IWaveformDbLoader"/>
</service>

View File

@ -0,0 +1,5 @@
source.. = src/
output.. = bin/
bin.includes = META-INF/,\
.,\
OSGI-INF/

View File

@ -0,0 +1,56 @@
/*******************************************************************************
* Copyright (c) 2015 MINRES Technologies GmbH and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* MINRES Technologies GmbH - initial API and implementation
*******************************************************************************/
package com.minres.scviewer.database.binary;
import java.io.File;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import com.minres.scviewer.database.IWaveform;
import com.minres.scviewer.database.IWaveformDb;
import com.minres.scviewer.database.IWaveformDbLoader;
import com.minres.scviewer.database.IWaveformEvent;
import com.minres.scviewer.database.RelationType;
public class BinaryDbLoader implements IWaveformDbLoader {
private List<RelationType> usedRelationsList = new ArrayList<>();
private IWaveformDb db;
public BinaryDbLoader() {
}
@Override
public Long getMaxTime() {
return 0L;
}
@Override
public List<IWaveform<? extends IWaveformEvent>> getAllWaves() {
List<IWaveform<? extends IWaveformEvent>> streams=new ArrayList<IWaveform<? extends IWaveformEvent>>();
return streams;
}
@Override
public boolean load(IWaveformDb db, File file) throws Exception {
return false;
}
@Override
public Collection<RelationType> getAllRelationTypes(){
return usedRelationsList;
}
}

View File

@ -0,0 +1,2 @@
eclipse.preferences.version=1
encoding/<project>=UTF-8

View File

@ -5,11 +5,13 @@ Bundle-SymbolicName: com.minres.scviewer.database.sqlite
Bundle-Version: 1.0.0.qualifier
Bundle-Vendor: MINRES Technologies GmbH
Bundle-RequiredExecutionEnvironment: JavaSE-1.8
Require-Bundle: com.minres.scviewer.database;bundle-version="1.0.0"
Require-Bundle: com.minres.scviewer.database;bundle-version="1.0.0",
org.eclipse.equinox.util;bundle-version="1.0.500",
org.eclipse.equinox.ds;bundle-version="1.4.200",
org.eclipse.osgi.services;bundle-version="3.4.0"
Bundle-ClassPath: .,sqlite-jdbc-3.8.7.jar
Service-Component: OSGI-INF/component.xml
Bundle-ActivationPolicy: lazy
Embed-Dependency: sqlite-jdbc
Embedded-Artifacts: sqlite-jdbc-3.8.7.jar;g="org.xerial";
a="sqlite-jdbc";v="3.8.7"
Automatic-Module-Name: com.minres.scviewer.database.sqlite

View File

@ -4,8 +4,8 @@
<parent>
<groupId>com.minres.scviewer</groupId>
<artifactId>com.minres.scviewer.parent</artifactId>
<version>2.0.0-SNAPSHOT</version>
<relativePath>../..</relativePath>
<version>1.0.0-SNAPSHOT</version>
<relativePath>../com.minres.scviewer.parent</relativePath>
</parent>
<packaging>eclipse-plugin</packaging>
<dependencies>
@ -15,5 +15,4 @@
<version>3.8.7</version>
</dependency>
</dependencies>
<version>1.0.0-SNAPSHOT</version>
</project>
</project>

View File

@ -13,8 +13,6 @@ package com.minres.scviewer.database.sqlite;
import java.beans.IntrospectionException;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.sql.SQLException;
import java.util.ArrayList;
@ -24,6 +22,7 @@ import java.util.List;
import com.minres.scviewer.database.IWaveform;
import com.minres.scviewer.database.IWaveformDb;
import com.minres.scviewer.database.IWaveformDbLoader;
import com.minres.scviewer.database.IWaveformEvent;
import com.minres.scviewer.database.RelationType;
import com.minres.scviewer.database.sqlite.db.IDatabase;
import com.minres.scviewer.database.sqlite.db.SQLiteDatabase;
@ -61,9 +60,9 @@ public class SQLiteDbLoader implements IWaveformDbLoader {
}
@Override
public Collection<IWaveform> getAllWaves() {
public List<IWaveform<? extends IWaveformEvent>> getAllWaves() {
SQLiteDatabaseSelectHandler<ScvStream> handler = new SQLiteDatabaseSelectHandler<ScvStream>(ScvStream.class, database);
List<IWaveform> streams=new ArrayList<IWaveform>();
List<IWaveform<? extends IWaveformEvent>> streams=new ArrayList<IWaveform<? extends IWaveformEvent>>();
try {
for(ScvStream scvStream:handler.selectObjects()){
TxStream stream = new TxStream(database, db, scvStream);
@ -81,21 +80,15 @@ public class SQLiteDbLoader implements IWaveformDbLoader {
@Override
public boolean load(IWaveformDb db, File file) throws Exception {
if(file.isDirectory() || !file.exists()) return false;
this.db=db;
try {
FileInputStream fis = new FileInputStream(file);
byte[] buffer = new byte[x.length];
int read = fis.read(buffer, 0, x.length);
fis.close();
if (read == x.length)
for (int i = 0; i < x.length; i++)
if (buffer[i] != x[i]) return false;
} catch(FileNotFoundException e) {
return false;
} catch(IOException e) { //if an I/O error occurs
return false;
}
FileInputStream fis = new FileInputStream(file);
byte[] buffer = new byte[x.length];
int read = fis.read(buffer, 0, x.length);
fis.close();
if (read == x.length)
for (int i = 0; i < x.length; i++)
if (buffer[i] != x[i]) return false;
database=new SQLiteDatabase(file.getAbsolutePath());
database.setData("TIMERESOLUTION", 1L);
SQLiteDatabaseSelectHandler<ScvSimProps> handler = new SQLiteDatabaseSelectHandler<ScvSimProps>(ScvSimProps.class, database);

View File

@ -29,6 +29,7 @@ import com.minres.scviewer.database.ITxGenerator;
import com.minres.scviewer.database.ITxStream;
import com.minres.scviewer.database.IWaveform;
import com.minres.scviewer.database.IWaveformDb;
import com.minres.scviewer.database.IWaveformEvent;
import com.minres.scviewer.database.RelationType;
import com.minres.scviewer.database.sqlite.db.IDatabase;
import com.minres.scviewer.database.sqlite.db.SQLiteDatabaseSelectHandler;
@ -116,7 +117,6 @@ public class TxStream extends HierNode implements ITxStream<ITxEvent> {
sb.append(scvStream.getId());
resultSet = statement.executeQuery(sb.toString());
while (resultSet.next()) {
if(maxConcurrency==null) maxConcurrency=0;
Object value = resultSet.getObject("concurrencyLevel");
if(value!=null)
maxConcurrency=(Integer) value;
@ -192,8 +192,8 @@ public class TxStream extends HierNode implements ITxStream<ITxEvent> {
}
@Override
public Boolean equals(IWaveform other) {
return(other instanceof TxStream && this.getId().equals(other.getId()));
public Boolean equals(IWaveform<? extends IWaveformEvent> other) {
return(other instanceof TxStream && this.getId()==other.getId());
}
}

View File

@ -0,0 +1,3 @@
eclipse.preferences.version=1
encoding//src/com/minres/scviewer/database/test/DatabaseServicesTest.java=UTF-8
encoding/<project>=UTF-8

View File

@ -0,0 +1,46 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<launchConfiguration type="org.eclipse.pde.ui.JunitLaunchConfig">
<booleanAttribute key="append.args" value="true"/>
<stringAttribute key="application" value="org.eclipse.pde.junit.runtime.coretestapplication"/>
<booleanAttribute key="askclear" value="false"/>
<booleanAttribute key="automaticAdd" value="true"/>
<booleanAttribute key="automaticValidate" value="false"/>
<stringAttribute key="bootstrap" value=""/>
<stringAttribute key="checked" value="[NONE]"/>
<booleanAttribute key="clearConfig" value="true"/>
<booleanAttribute key="clearws" value="true"/>
<booleanAttribute key="clearwslog" value="false"/>
<stringAttribute key="configLocation" value="${workspace_loc}/.metadata/.plugins/org.eclipse.pde.core/pde-junit"/>
<booleanAttribute key="default" value="false"/>
<stringAttribute key="deselected_workspace_plugins" value="com.minres.scviewer.e4.application,com.minres.scviewer.ui"/>
<booleanAttribute key="includeOptional" value="true"/>
<stringAttribute key="location" value="${workspace_loc}/../junit-workspace"/>
<listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_PATHS">
<listEntry value="/com.minres.scviewer.database.test"/>
</listAttribute>
<listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_TYPES">
<listEntry value="4"/>
</listAttribute>
<stringAttribute key="org.eclipse.jdt.junit.CONTAINER" value="=com.minres.scviewer.database.test"/>
<booleanAttribute key="org.eclipse.jdt.junit.KEEPRUNNING_ATTR" value="false"/>
<stringAttribute key="org.eclipse.jdt.junit.TESTNAME" value=""/>
<stringAttribute key="org.eclipse.jdt.junit.TEST_KIND" value="org.eclipse.jdt.junit.loader.junit4"/>
<booleanAttribute key="org.eclipse.jdt.launching.ATTR_USE_START_ON_FIRST_THREAD" value="true"/>
<stringAttribute key="org.eclipse.jdt.launching.JRE_CONTAINER" value="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8"/>
<stringAttribute key="org.eclipse.jdt.launching.MAIN_TYPE" value=""/>
<stringAttribute key="org.eclipse.jdt.launching.PROGRAM_ARGUMENTS" value="-os ${target.os} -ws ${target.ws} -arch ${target.arch} -nl ${target.nl} -consoleLog"/>
<stringAttribute key="org.eclipse.jdt.launching.PROJECT_ATTR" value="com.minres.scviewer.database.test"/>
<stringAttribute key="org.eclipse.jdt.launching.SOURCE_PATH_PROVIDER" value="org.eclipse.pde.ui.workbenchClasspathProvider"/>
<stringAttribute key="org.eclipse.jdt.launching.VM_ARGUMENTS" value="-Xms40m -Xmx512m"/>
<stringAttribute key="pde.version" value="3.3"/>
<stringAttribute key="product" value="com.minres.scviewer.e4.product"/>
<booleanAttribute key="run_in_ui_thread" value="true"/>
<stringAttribute key="selected_target_plugins" value="com.google.guava@default:default,javax.annotation@default:default,javax.inject@default:default,javax.servlet@default:default,javax.xml@default:default,org.apache.ant@default:default,org.apache.commons.jxpath@default:default,org.apache.felix.gogo.command@default:default,org.apache.felix.gogo.runtime@default:default,org.codehaus.groovy@default:default,org.eclipse.ant.core@default:default,org.eclipse.core.commands@default:default,org.eclipse.core.contenttype@default:default,org.eclipse.core.expressions@default:default,org.eclipse.core.filesystem@default:default,org.eclipse.core.jobs@default:default,org.eclipse.core.resources@default:default,org.eclipse.core.runtime@default:true,org.eclipse.core.variables@default:default,org.eclipse.e4.core.contexts@default:default,org.eclipse.e4.core.di.annotations@default:default,org.eclipse.e4.core.di.extensions@default:default,org.eclipse.e4.core.di@default:default,org.eclipse.e4.core.services@default:default,org.eclipse.e4.emf.xpath@default:default,org.eclipse.e4.ui.di@default:default,org.eclipse.e4.ui.model.workbench@default:default,org.eclipse.e4.ui.services@default:default,org.eclipse.emf.common@default:default,org.eclipse.emf.ecore@default:default,org.eclipse.equinox.app@default:default,org.eclipse.equinox.bidi@default:default,org.eclipse.equinox.common@2:true,org.eclipse.equinox.ds@1:true,org.eclipse.equinox.preferences@default:default,org.eclipse.equinox.registry@default:default,org.eclipse.equinox.util@default:default,org.eclipse.jface@default:default,org.eclipse.osgi.compatibility.state@default:false,org.eclipse.osgi.services@default:default,org.eclipse.osgi@-1:true,org.eclipse.swt@default:default,org.hamcrest.core@default:default,org.junit@default:default"/>
<stringAttribute key="selected_workspace_plugins" value="com.minres.scviewer.database.binary@default:default,com.minres.scviewer.database.sqlite@default:true,com.minres.scviewer.database.test@default:default,com.minres.scviewer.database.text@default:true,com.minres.scviewer.database.ui.swt@default:default,com.minres.scviewer.database.ui@default:default,com.minres.scviewer.database.vcd@default:default,com.minres.scviewer.database@default:true,com.opcoach.e4.preferences@default:default"/>
<booleanAttribute key="show_selected_only" value="false"/>
<booleanAttribute key="tracing" value="false"/>
<booleanAttribute key="useCustomFeatures" value="false"/>
<booleanAttribute key="useDefaultConfig" value="true"/>
<booleanAttribute key="useDefaultConfigArea" value="false"/>
<booleanAttribute key="useProduct" value="false"/>
</launchConfiguration>

View File

@ -2,7 +2,7 @@ Manifest-Version: 1.0
Bundle-ManifestVersion: 2
Bundle-Name: SCViewer database tests
Bundle-SymbolicName: com.minres.scviewer.database.test
Bundle-Version: 1.0.1.qualifier
Bundle-Version: 1.0.0.qualifier
Bundle-Vendor: MINRES Technologies GnbH
Bundle-RequiredExecutionEnvironment: JavaSE-1.8
Require-Bundle: org.junit,
@ -12,4 +12,3 @@ Require-Bundle: org.junit,
com.minres.scviewer.database.vcd;bundle-version="1.0.0"
Bundle-ActivationPolicy: lazy
Service-Component: OSGI-INF/component.xml
Automatic-Module-Name: com.minres.scviewer.database.test

View File

@ -0,0 +1,2 @@
/.scviewer.*
/.my_db.txlog*

View File

@ -2,12 +2,11 @@
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>com.minres.scviewer.database.test</artifactId>
<version>1.0.1-SNAPSHOT</version>
<parent>
<groupId>com.minres.scviewer</groupId>
<artifactId>com.minres.scviewer.parent</artifactId>
<version>2.0.0-SNAPSHOT</version>
<relativePath>../..</relativePath>
<version>1.0.0-SNAPSHOT</version>
<relativePath>../com.minres.scviewer.parent</relativePath>
</parent>
<packaging>eclipse-test-plugin</packaging>
<build>
@ -15,8 +14,17 @@
<plugin>
<groupId>org.eclipse.tycho</groupId>
<artifactId>tycho-surefire-plugin</artifactId>
<version>${tycho-version}</version>
<version>0.23.1</version>
<configuration>
<!-- <bundleStartLevel /> -->
<dependencies>
<dependency>
<type>p2-installable-unit</type>
<artifactId>org.eclipse.equinox.ds</artifactId>
</dependency>
</dependencies>
</configuration>
</plugin>
</plugins>
</build>
</project>
</project>

View File

@ -0,0 +1,90 @@
/*******************************************************************************
* Copyright (c) 2015 MINRES Technologies GmbH and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* MINRES Technologies GmbH - initial API and implementation
*******************************************************************************/
package com.minres.scviewer.database.test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.File;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import com.minres.scviewer.database.IWaveformDb;
import com.minres.scviewer.database.IWaveformDbFactory;
public class DatabaseServicesTest {
private static IWaveformDbFactory waveformDbFactory;
private IWaveformDb waveformDb;
public synchronized void setFactory(IWaveformDbFactory service) {
waveformDbFactory = service;
}
public synchronized void unsetFactory(IWaveformDbFactory service) {
if (waveformDbFactory == service) {
waveformDbFactory = null;
}
}
@Before
public void setUp() throws Exception {
waveformDb=waveformDbFactory.getDatabase();
// Wait for OSGi dependencies
// for (int i = 0; i < 10; i++) {
// if (waveformDb.size() == 3) // Dependencies fulfilled
// return;
// Thread.sleep(1000);
// }
// assertEquals("OSGi dependencies unfulfilled", 3, WaveformDb.getLoaders().size());
}
@After
public void tearDown() throws Exception {
}
@Test
public void testVCD() throws Exception {
File f = new File("inputs/my_db.vcd").getAbsoluteFile();
assertTrue(f.exists());
waveformDb.load(f);
assertNotNull(waveformDb);
assertEquals(14, waveformDb.getAllWaves().size());
assertEquals(2, waveformDb.getChildNodes().size());
}
@Test
public void testTxSQLite() throws Exception {
File f = new File("inputs/my_db.txdb").getAbsoluteFile();
assertTrue(f.exists());
waveformDb.load(f);
assertNotNull(waveformDb);
assertEquals(3, waveformDb.getAllWaves().size());
assertEquals(1, waveformDb.getChildNodes().size());
}
@Test
public void testTxText() throws Exception {
File f = new File("inputs/my_db.txlog").getAbsoluteFile();
assertTrue(f.exists());
waveformDb.load(f);
assertNotNull(waveformDb);
assertEquals(3, waveformDb.getAllWaves().size());
assertEquals(1, waveformDb.getChildNodes().size());
}
}

View File

@ -2,6 +2,7 @@
<classpath>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8"/>
<classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>
<classpathentry kind="src" path="src/"/>
<classpathentry kind="src" path="src"/>
<classpathentry exported="true" kind="con" path="GROOVY_DSL_SUPPORT"/>
<classpathentry kind="output" path="target/classes"/>
</classpath>

View File

@ -0,0 +1,2 @@
eclipse.preferences.version=1
groovy.compiler.level=23

View File

@ -0,0 +1,17 @@
Manifest-Version: 1.0
Bundle-ManifestVersion: 2
Bundle-Name: Textual transaction database
Bundle-SymbolicName: com.minres.scviewer.database.text
Bundle-Version: 1.0.0.qualifier
Bundle-Vendor: MINRES Technologies GmbH
Bundle-RequiredExecutionEnvironment: JavaSE-1.8
Import-Package: com.minres.scviewer.database,
org.osgi.framework;version="1.3.0"
Require-Bundle: com.minres.scviewer.database;bundle-version="1.0.0",
org.codehaus.groovy;bundle-version="1.8.6",
org.eclipse.equinox.util;bundle-version="1.0.500",
org.eclipse.equinox.ds;bundle-version="1.4.200",
org.eclipse.osgi.services;bundle-version="3.4.0",
com.google.guava;bundle-version="15.0.0"
Service-Component: OSGI-INF/component.xml
Bundle-ActivationPolicy: lazy

View File

@ -10,16 +10,6 @@
###############################################################################
bin.includes = META-INF/,\
.,\
OSGI-INF/,\
lib/,\
lib/mapdb-3.0.7.jar,\
lib/eclipse-collections-9.2.0.jar,\
lib/eclipse-collections-api-9.2.0.jar,\
lib/eclipse-collections-forkjoin-9.2.0.jar,\
lib/kotlin-stdlib-1.2.42.jar,\
lib/lz4-1.3.0.jar,\
lib/elsa-3.0.0-M5.jar
bin.excludes = **/*.groovy,\
lib/mapdb-3.0.7-sources.jar,\
lib/mapdb-3.0.7-javadoc.jar
OSGI-INF/
bin.excludes = **/*.groovy
source.. = src/

View File

@ -2,30 +2,25 @@
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>com.minres.scviewer.database.text</artifactId>
<version>2.0.2-SNAPSHOT</version>
<parent>
<groupId>com.minres.scviewer</groupId>
<artifactId>com.minres.scviewer.parent</artifactId>
<version>2.0.0-SNAPSHOT</version>
<relativePath>../..</relativePath>
<version>1.0.0-SNAPSHOT</version>
<relativePath>../com.minres.scviewer.parent</relativePath>
</parent>
<packaging>eclipse-plugin</packaging>
<build>
<plugins>
<plugin>
<plugins>
<plugin>
<groupId>org.eclipse.tycho</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.8.0</version>
<artifactId>tycho-compiler-plugin</artifactId>
<version>0.23.1</version>
<configuration>
<compilerId>groovy-eclipse-compiler</compilerId>
<compilerArguments>
<indy/><!-- optional; supported by batch 2.4.12-04+ -->
</compilerArguments>
<!-- set verbose to be true if you want lots of uninteresting messages -->
<!-- <verbose>true</verbose> -->
<source>1.8</source>
<target>1.8</target>
<source>1.7</source>
<target>1.7</target>
</configuration>
<dependencies>
<dependency>
@ -37,9 +32,11 @@
<groupId>org.codehaus.groovy</groupId>
<artifactId>groovy-eclipse-batch</artifactId>
<version>${groovy-eclipse-batch-version}</version>
<!-- or choose a different compiler version -->
<!-- <version>2.1.8-01</version> -->
</dependency>
</dependencies>
</plugin>
</plugins>
</plugins>
</build>
</project>
</project>

View File

@ -13,11 +13,8 @@ package com.minres.scviewer.database.text;
import java.nio.charset.CharsetDecoder;
import java.util.Collection;
import java.util.zip.GZIPInputStream
import org.codehaus.groovy.ast.stmt.CatchStatement
import org.mapdb.DB
import org.mapdb.DBMaker
import org.apache.jdbm.DB
import org.apache.jdbm.DBMaker
import groovy.io.FileType
import com.minres.scviewer.database.AssociationType
@ -35,12 +32,12 @@ public class TextDbLoader implements IWaveformDbLoader{
IWaveformDb db;
DB backingDb;
def streams = []
def relationTypes=[:]
DB mapDb
public TextDbLoader() {
}
@ -50,8 +47,8 @@ public class TextDbLoader implements IWaveformDbLoader{
}
@Override
public Collection<IWaveform> getAllWaves() {
return streams;
public List<IWaveform> getAllWaves() {
return new LinkedList<IWaveform>(streams);
}
public Map<Long, ITxGenerator> getGeneratorsById() {
@ -64,40 +61,27 @@ public class TextDbLoader implements IWaveformDbLoader{
@Override
boolean load(IWaveformDb db, File file) throws Exception {
if(file.isDirectory() || !file.exists()) return false;
this.db=db
this.streams=[]
try {
def gzipped = isGzipped(file)
if(isTxfile(gzipped?new GZIPInputStream(new FileInputStream(file)):new FileInputStream(file))){
def mapDbFile = File.createTempFile("."+file.name, null /*"tmp"*/, null /*file.parentFile*/)
mapDbFile.delete()
mapDbFile.deleteOnExit()
this.mapDb = DBMaker
.fileDB(mapDbFile)
.fileMmapEnableIfSupported()
.fileMmapPreclearDisable()
.cleanerHackEnable()
.allocateStartSize(64*1024*1024)
.allocateIncrement(64*1024*1024)
.make()
// NPE here --->
parseInput(gzipped?new GZIPInputStream(new FileInputStream(file)):new FileInputStream(file))
streams.each{ TxStream stream -> stream.getMaxConcurrency() }
return true
def gzipped = isGzipped(file)
if(isTxfile(gzipped?new GZIPInputStream(new FileInputStream(file)):new FileInputStream(file))){
if(true) {
def parentDir=file.absoluteFile.parent
def filename=file.name
new File(parentDir).eachFileRecurse (FileType.FILES) { f -> if(f.name=~/^\.${filename}/) f.delete() }
this.backingDb = DBMaker.openFile(parentDir+File.separator+"."+filename+"_bdb")
.deleteFilesAfterClose()
.useRandomAccessFile()
.setMRUCacheSize(4096)
//.disableTransactions()
.disableLocking()
.make();
} else {
this.backingDb = DBMaker.openMemory().disableLocking().make()
}
} catch (IndexOutOfBoundsException e) {
return false
} catch (IllegalArgumentException e) {
return false
} catch (NumberFormatException e) {
return false
} catch(EOFException e) {
return true;
} catch(Exception e) {
System.out.println("---->>> Exception "+e.toString()+" caught while loading database");
//System.out.println("---->>> Exception "+e.toString()+" caught while loading database. StackTrace following... ");
//e.printStackTrace()
parseInput(gzipped?new GZIPInputStream(new FileInputStream(file)):new FileInputStream(file))
calculateConcurrencyIndicees()
return true
}
return false;
}
@ -123,7 +107,7 @@ public class TextDbLoader implements IWaveformDbLoader{
} catch (IOException e) {
return false;
} finally {
if(is!=null) is.close()
is.close()
}
}
@ -136,9 +120,7 @@ public class TextDbLoader implements IWaveformDbLoader{
case "ms":return 1000000000000L
case "s": return 1000000000000000L
}
return 1L
}
private def parseInput(InputStream inputStream){
def streamsById = [:]
def generatorsById = [:]
@ -150,32 +132,26 @@ public class TextDbLoader implements IWaveformDbLoader{
BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream, "UTF-8"));
long lineCnt=0;
reader.eachLine { line ->
def tokens = line.split(/\s+/) as ArrayList
def tokens = line.split(/\s+/)
switch(tokens[0]){
case "scv_tr_stream":
case "scv_tr_generator":
case "begin_attribute":
case "end_attribute":
if ((matcher = line =~ /^scv_tr_stream\s+\(ID (\d+),\s+name\s+"([^"]+)",\s+kind\s+"([^"]+)"\)$/)) {
def id = Integer.parseInt(matcher[0][1])
def stream = new TxStream(this, id, matcher[0][2], matcher[0][3])
def stream = new TxStream(db, id, matcher[0][2], matcher[0][3], backingDb)
streams<<stream
streamsById[id]=stream
}
break;
case "scv_tr_generator":
if ((matcher = line =~ /^scv_tr_generator\s+\(ID\s+(\d+),\s+name\s+"([^"]+)",\s+scv_tr_stream\s+(\d+),$/)) {
} else if ((matcher = line =~ /^scv_tr_generator\s+\(ID\s+(\d+),\s+name\s+"([^"]+)",\s+scv_tr_stream\s+(\d+),$/)) {
def id = Integer.parseInt(matcher[0][1])
ITxStream stream=streamsById[Integer.parseInt(matcher[0][3])]
generator=new TxGenerator(id, stream, matcher[0][2])
stream.generators<<generator
generatorsById[id]=generator
}
break;
case "begin_attribute":
if ((matcher = line =~ /^begin_attribute \(ID (\d+), name "([^"]+)", type "([^"]+)"\)$/)) {
} else if ((matcher = line =~ /^begin_attribute \(ID (\d+), name "([^"]+)", type "([^"]+)"\)$/)) {
generator.begin_attrs << TxAttributeType.getAttrType(matcher[0][2], DataType.valueOf(matcher[0][3]), AssociationType.BEGIN)
}
break;
case "end_attribute":
if ((matcher = line =~ /^end_attribute \(ID (\d+), name "([^"]+)", type "([^"]+)"\)$/)) {
} else if ((matcher = line =~ /^end_attribute \(ID (\d+), name "([^"]+)", type "([^"]+)"\)$/)) {
generator.end_attrs << TxAttributeType.getAttrType(matcher[0][2], DataType.valueOf(matcher[0][3]), AssociationType.END)
}
break;
@ -203,18 +179,13 @@ public class TextDbLoader implements IWaveformDbLoader{
break
case "tx_record_attribute"://matcher = line =~ /^tx_record_attribute\s+(\d+)\s+"([^"]+)"\s+(\S+)\s*=\s*(.+)$/
def id = Integer.parseInt(tokens[1])
def name = tokens[2][1..-2]
def type = tokens[3] as DataType
def remaining = tokens.size()>5?tokens[5..-1].join(' '):""
transactionsById[id].attributes<<new TxAttribute(name, type, AssociationType.RECORD, remaining)
transactionsById[id].attributes<<new TxAttribute(tokens[2][1..-2], DataType.valueOf(tokens[3]), AssociationType.RECORD, tokens[5..-1].join(' '))
break
case "a"://matcher = line =~ /^a\s+(.+)$/
if(endTransaction){
transaction.attributes << new TxAttribute(transaction.generator.end_attrs[transaction.generator.end_attrs_idx], tokens[1])
transaction.generator.end_attrs_idx++
transaction.attributes << new TxAttribute(transaction.generator.end_attrs[0], tokens[1])
} else {
transaction.attributes << new TxAttribute(transaction.generator.begin_attrs[transaction.generator.begin_attrs_idx], tokens[1])
transaction.generator.begin_attrs_idx++
transaction.attributes << new TxAttribute(transaction.generator.begin_attrs[0], tokens[1])
}
break
case "tx_relation"://matcher = line =~ /^tx_relation\s+\"(\S+)\"\s+(\d+)\s+(\d+)$/
@ -231,9 +202,17 @@ public class TextDbLoader implements IWaveformDbLoader{
}
lineCnt++
if((lineCnt%1000) == 0) {
backingDb.commit()
}
}
}
private def calculateConcurrencyIndicees(){
streams.each{ TxStream stream -> stream.getMaxConcurrency() }
}
public Collection<RelationType> getAllRelationTypes(){
return relationTypes.values();
}

View File

@ -16,9 +16,7 @@ import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
import org.mapdb.Serializer
import org.apache.jdbm.DB
import com.minres.scviewer.database.ITxEvent;
import com.minres.scviewer.database.IWaveform;
import com.minres.scviewer.database.IWaveformDb
@ -45,15 +43,15 @@ class TxStream extends HierNode implements ITxStream {
private TreeMap<Long, List<ITxEvent>> events
TxStream(TextDbLoader loader, int id, String name, String kind){
TxStream(IWaveformDb db, int id, String name, String kind, DB backingStore){
super(name)
this.id=id
this.database=loader.db
this.database=db
this.fullName=name
this.kind=kind
this.maxConcurrency=0
//events = new TreeMap<Long, List<ITxEvent>>()
events = loader.mapDb.treeMap(name).keySerializer(Serializer.LONG).createOrOpen();
events=backingStore.createTreeMap("stream-"+name)
}
List<ITxGenerator> getGenerators(){
@ -112,7 +110,7 @@ class TxStream extends HierNode implements ITxStream {
}
@Override
public Boolean equals(IWaveform other) {
public Boolean equals(IWaveform<? extends IWaveformEvent> other) {
return(other instanceof TxStream && this.getId()==other.getId());
}

View File

@ -0,0 +1,706 @@
/*******************************************************************************
* Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package org.apache.jdbm;
import java.io.*;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
/**
* B+Tree persistent indexing data structure. B+Trees are optimized for
* block-based, random I/O storage because they store multiple keys on
* one tree node (called <code>BTreeNode</code>). In addition, the leaf nodes
* directly contain (inline) small values associated with the keys, allowing a
* single (or sequential) disk read of all the values on the node.
* <p/>
* B+Trees are n-airy, yeilding log(N) search cost. They are self-balancing,
* preventing search performance degradation when the size of the tree grows.
* <p/>
* BTree stores its keys sorted. By default JDBM expects key to implement
* <code>Comparable</code> interface but user may supply its own <code>Comparator</code>
* at BTree creation time. Comparator is serialized and stored as part of BTree.
* <p/>
* The B+Tree allows traversing the keys in forward and reverse order using a
* TupleBrowser obtained from the browse() methods. But it is better to use
* <code>BTreeMap</code> wrapper which implements <code>SortedMap</code> interface
* <p/>
* This implementation does not directly support duplicate keys. It is
* possible to handle duplicates by grouping values using an ArrayList as value.
* This scenario is supported by JDBM serialization so there is no big performance penalty.
* <p/>
* There is no limit on key size or value size, but it is recommended to keep
* keys as small as possible to reduce disk I/O. If serialized value exceeds 32 bytes,
* it is stored in separate record and tree contains only recid reference to it.
* BTree uses delta compression for its keys.
*
*
* @author Alex Boisvert
* @author Jan Kotek
*/
class BTree<K, V> {
private static final boolean DEBUG = false;
/**
* Default node size (number of entries per node)
*/
public static final int DEFAULT_SIZE = 32; //TODO test optimal size, it has serious impact on sequencial write and read
/**
* Record manager used to persist changes in BTreeNodes
*/
protected transient DBAbstract _db;
/**
* This BTree's record ID in the DB.
*/
private transient long _recid;
/**
* Comparator used to index entries (optional)
*/
protected Comparator<K> _comparator;
/**
* Serializer used to serialize index keys (optional)
*/
protected Serializer<K> keySerializer;
/**
* Serializer used to serialize index values (optional)
*/
protected Serializer<V> valueSerializer;
/**
* indicates if values should be loaded during deserialization, set to false during defragmentation
*/
boolean loadValues = true;
/** if false map contains only keys, used for set*/
boolean hasValues = true;
/**
* The number of structural modifications to the tree for fail fast iterators. This value is just for runtime, it is not persisted
*/
transient int modCount = 0;
/**
* cached instance of an insert result, so we do not have to allocate new object on each insert
*/
protected BTreeNode.InsertResult<K, V> insertResultReuse; //TODO investigate performance impact of removing this
public Serializer<K> getKeySerializer() {
return keySerializer;
}
public Serializer<V> getValueSerializer() {
return valueSerializer;
}
/**
* Height of the B+Tree. This is the number of BTreeNodes you have to traverse
* to get to a leaf BTreeNode, starting from the root.
*/
private int _height;
/**
* Recid of the root BTreeNode
*/
private transient long _root;
/**
* Total number of entries in the BTree
*/
protected volatile long _entries;
/**
* Serializer used for BTreeNodes of this tree
*/
private transient BTreeNode<K, V> _nodeSerializer = new BTreeNode();
{
_nodeSerializer._btree = this;
}
/**
* Listeners which are notified about changes in records
*/
protected RecordListener[] recordListeners = new RecordListener[0];
final protected ReadWriteLock lock = new ReentrantReadWriteLock();
/**
* No-argument constructor used by serialization.
*/
public BTree() {
// empty
}
/**
* Create a new persistent BTree
*/
@SuppressWarnings("unchecked")
public static <K extends Comparable, V> BTree<K, V> createInstance(DBAbstract db)
throws IOException {
return createInstance(db, null, null, null,true);
}
/**
* Create a new persistent BTree
*/
public static <K, V> BTree<K, V> createInstance(DBAbstract db,
Comparator<K> comparator,
Serializer<K> keySerializer,
Serializer<V> valueSerializer,
boolean hasValues)
throws IOException {
BTree<K, V> btree;
if (db == null) {
throw new IllegalArgumentException("Argument 'db' is null");
}
btree = new BTree<K, V>();
btree._db = db;
btree._comparator = comparator;
btree.keySerializer = keySerializer;
btree.valueSerializer = valueSerializer;
btree.hasValues = hasValues;
btree._recid = db.insert(btree, btree.getRecordManager().defaultSerializer(),false);
return btree;
}
/**
* Load a persistent BTree.
*
* @param db DB used to store the persistent btree
* @param recid Record id of the BTree
*/
@SuppressWarnings("unchecked")
public static <K, V> BTree<K, V> load(DBAbstract db, long recid)
throws IOException {
BTree<K, V> btree = (BTree<K, V>) db.fetch(recid);
btree._recid = recid;
btree._db = db;
btree._nodeSerializer = new BTreeNode<K, V>();
btree._nodeSerializer._btree = btree;
return btree;
}
/**
* Get the {@link ReadWriteLock} associated with this BTree.
* This should be used with browsing operations to ensure
* consistency.
*
* @return
*/
public ReadWriteLock getLock() {
return lock;
}
/**
* Insert an entry in the BTree.
* <p/>
* The BTree cannot store duplicate entries. An existing entry can be
* replaced using the <code>replace</code> flag. If an entry with the
* same key already exists in the BTree, its value is returned.
*
* @param key Insert key
* @param value Insert value
* @param replace Set to true to replace an existing key-value pair.
* @return Existing value, if any.
*/
public V insert(final K key, final V value,
final boolean replace)
throws IOException {
if (key == null) {
throw new IllegalArgumentException("Argument 'key' is null");
}
if (value == null) {
throw new IllegalArgumentException("Argument 'value' is null");
}
try {
lock.writeLock().lock();
BTreeNode<K, V> rootNode = getRoot();
if (rootNode == null) {
// BTree is currently empty, create a new root BTreeNode
if (DEBUG) {
System.out.println("BTree.insert() new root BTreeNode");
}
rootNode = new BTreeNode<K, V>(this, key, value);
_root = rootNode._recid;
_height = 1;
_entries = 1;
_db.update(_recid, this);
modCount++;
//notifi listeners
for (RecordListener<K, V> l : recordListeners) {
l.recordInserted(key, value);
}
return null;
} else {
BTreeNode.InsertResult<K, V> insert = rootNode.insert(_height, key, value, replace);
boolean dirty = false;
if (insert._overflow != null) {
// current root node overflowed, we replace with a new root node
if (DEBUG) {
System.out.println("BTreeNode.insert() replace root BTreeNode due to overflow");
}
rootNode = new BTreeNode<K, V>(this, rootNode, insert._overflow);
_root = rootNode._recid;
_height += 1;
dirty = true;
}
if (insert._existing == null) {
_entries++;
modCount++;
dirty = true;
}
if (dirty) {
_db.update(_recid, this);
}
//notify listeners
for (RecordListener<K, V> l : recordListeners) {
if (insert._existing == null)
l.recordInserted(key, value);
else
l.recordUpdated(key, insert._existing, value);
}
// insert might have returned an existing value
V ret = insert._existing;
//zero out tuple and put it for reuse
insert._existing = null;
insert._overflow = null;
this.insertResultReuse = insert;
return ret;
}
} finally {
lock.writeLock().unlock();
}
}
/**
* Remove an entry with the given key from the BTree.
*
* @param key Removal key
* @return Value associated with the key, or null if no entry with given
* key existed in the BTree.
*/
public V remove(K key)
throws IOException {
if (key == null) {
throw new IllegalArgumentException("Argument 'key' is null");
}
try {
lock.writeLock().lock();
BTreeNode<K, V> rootNode = getRoot();
if (rootNode == null) {
return null;
}
boolean dirty = false;
BTreeNode.RemoveResult<K, V> remove = rootNode.remove(_height, key);
if (remove._underflow && rootNode.isEmpty()) {
_height -= 1;
dirty = true;
_db.delete(_root);
if (_height == 0) {
_root = 0;
} else {
_root = rootNode.loadLastChildNode()._recid;
}
}
if (remove._value != null) {
_entries--;
modCount++;
dirty = true;
}
if (dirty) {
_db.update(_recid, this);
}
if (remove._value != null)
for (RecordListener<K, V> l : recordListeners)
l.recordRemoved(key, remove._value);
return remove._value;
} finally {
lock.writeLock().unlock();
}
}
/**
* Find the value associated with the given key.
*
* @param key Lookup key.
* @return Value associated with the key, or null if not found.
*/
public V get(K key)
throws IOException {
if (key == null) {
throw new IllegalArgumentException("Argument 'key' is null");
}
try {
lock.readLock().lock();
BTreeNode<K, V> rootNode = getRoot();
if (rootNode == null) {
return null;
}
return rootNode.findValue(_height, key);
} finally {
lock.readLock().unlock();
}
}
/**
* Find the value associated with the given key, or the entry immediately
* following this key in the ordered BTree.
*
* @param key Lookup key.
* @return Value associated with the key, or a greater entry, or null if no
* greater entry was found.
*/
public BTreeTuple<K, V> findGreaterOrEqual(K key)
throws IOException {
BTreeTuple<K, V> tuple;
BTreeTupleBrowser<K, V> browser;
if (key == null) {
// there can't be a key greater than or equal to "null"
// because null is considered an infinite key.
return null;
}
tuple = new BTreeTuple<K, V>(null, null);
browser = browse(key,true);
if (browser.getNext(tuple)) {
return tuple;
} else {
return null;
}
}
/**
* Get a browser initially positioned at the beginning of the BTree.
* <p><b>
* WARNING: If you make structural modifications to the BTree during
* browsing, you will get inconsistent browing results.
* </b>
*
* @return Browser positionned at the beginning of the BTree.
*/
@SuppressWarnings("unchecked")
public BTreeTupleBrowser<K, V> browse()
throws IOException {
try {
lock.readLock().lock();
BTreeNode<K, V> rootNode = getRoot();
if (rootNode == null) {
return EMPTY_BROWSER;
}
return rootNode.findFirst();
} finally {
lock.readLock().unlock();
}
}
/**
* Get a browser initially positioned just before the given key.
* <p><b>
* WARNING: <20>If you make structural modifications to the BTree during
* browsing, you will get inconsistent browing results.
* </b>
*
* @param key Key used to position the browser. If null, the browser
* will be positionned after the last entry of the BTree.
* (Null is considered to be an "infinite" key)
* @return Browser positionned just before the given key.
*/
@SuppressWarnings("unchecked")
public BTreeTupleBrowser<K, V> browse(final K key, final boolean inclusive)
throws IOException {
try {
lock.readLock().lock();
BTreeNode<K, V> rootNode = getRoot();
if (rootNode == null) {
return EMPTY_BROWSER;
}
BTreeTupleBrowser<K, V> browser = rootNode.find(_height, key, inclusive);
return browser;
} finally {
lock.readLock().unlock();
}
}
/**
* Return the persistent record identifier of the BTree.
*/
public long getRecid() {
return _recid;
}
/**
* Return the root BTreeNode, or null if it doesn't exist.
*/
BTreeNode<K, V> getRoot()
throws IOException {
if (_root == 0) {
return null;
}
BTreeNode<K, V> root = _db.fetch(_root, _nodeSerializer);
if (root != null) {
root._recid = _root;
root._btree = this;
}
return root;
}
static BTree readExternal(DataInput in, Serialization ser)
throws IOException, ClassNotFoundException {
BTree tree = new BTree();
tree._db = ser.db;
tree._height = in.readInt();
tree._recid = in.readLong();
tree._root = in.readLong();
tree._entries = in.readLong();
tree.hasValues = in.readBoolean();
tree._comparator = (Comparator) ser.deserialize(in);
tree.keySerializer = (Serializer) ser.deserialize(in);
tree.valueSerializer = (Serializer) ser.deserialize(in);
return tree;
}
public void writeExternal(DataOutput out)
throws IOException {
out.writeInt(_height);
out.writeLong(_recid);
out.writeLong(_root);
out.writeLong(_entries);
out.writeBoolean(hasValues);
_db.defaultSerializer().serialize(out, _comparator);
_db.defaultSerializer().serialize(out, keySerializer);
_db.defaultSerializer().serialize(out, valueSerializer);
}
/**
* Copyes tree from one db to other, defragmenting it allong the way
* @param recid
* @param r1
* @param r2
* @throws IOException
*/
public static void defrag(long recid, DBStore r1, DBStore r2) throws IOException {
try {
byte[] data = r1.fetchRaw(recid);
r2.forceInsert(recid, data);
DataInput in = new DataInputOutput(data);
BTree t = (BTree) r1.defaultSerializer().deserialize(in);
t.loadValues = false;
t._db = r1;
t._nodeSerializer = new BTreeNode(t, false);
BTreeNode p = t.getRoot();
if (p != null) {
r2.forceInsert(t._root, r1.fetchRaw(t._root));
p.defrag(r1, r2);
}
} catch (ClassNotFoundException e) {
throw new IOError(e);
}
}
/**
* Browser returning no element.
*/
private static final BTreeTupleBrowser EMPTY_BROWSER = new BTreeTupleBrowser() {
public boolean getNext(BTreeTuple tuple) {
return false;
}
public boolean getPrevious(BTreeTuple tuple) {
return false;
}
public void remove(Object key) {
throw new IndexOutOfBoundsException();
}
};
/**
* add RecordListener which is notified about record changes
*
* @param listener
*/
public void addRecordListener(RecordListener<K, V> listener) {
recordListeners = Arrays.copyOf(recordListeners, recordListeners.length + 1);
recordListeners[recordListeners.length - 1] = listener;
}
/**
* remove RecordListener which is notified about record changes
*
* @param listener
*/
public void removeRecordListener(RecordListener<K, V> listener) {
List l = Arrays.asList(recordListeners);
l.remove(listener);
recordListeners = (RecordListener[]) l.toArray(new RecordListener[1]);
}
public DBAbstract getRecordManager() {
return _db;
}
public Comparator<K> getComparator() {
return _comparator;
}
/**
* Deletes all BTreeNodes in this BTree
*/
public void clear()
throws IOException {
try {
lock.writeLock().lock();
BTreeNode<K, V> rootNode = getRoot();
if (rootNode != null)
rootNode.delete();
_entries = 0;
modCount++;
} finally {
lock.writeLock().unlock();
}
}
/**
* Used for debugging and testing only. Populates the 'out' list with
* the recids of all child nodes in the BTree.
*
* @param out
* @throws IOException
*/
void dumpChildNodeRecIDs(List<Long> out) throws IOException {
BTreeNode<K, V> root = getRoot();
if (root != null) {
out.add(root._recid);
root.dumpChildNodeRecIDs(out, _height);
}
}
public boolean hasValues() {
return hasValues;
}
/**
* Browser to traverse a collection of tuples. The browser allows for
* forward and reverse order traversal.
*
*
*/
static interface BTreeTupleBrowser<K, V> {
/**
* Get the next tuple.
*
* @param tuple Tuple into which values are copied.
* @return True if values have been copied in tuple, or false if there is no next tuple.
*/
boolean getNext(BTree.BTreeTuple<K, V> tuple) throws IOException;
/**
* Get the previous tuple.
*
* @param tuple Tuple into which values are copied.
* @return True if values have been copied in tuple, or false if there is no previous tuple.
*/
boolean getPrevious(BTree.BTreeTuple<K, V> tuple) throws IOException;
/**
* Remove an entry with given key, and increases browsers expectedModCount
* This method is here to support 'ConcurrentModificationException' on Map interface.
*
* @param key
*/
void remove(K key) throws IOException;
}
/**
* Tuple consisting of a key-value pair.
*/
static final class BTreeTuple<K, V> {
K key;
V value;
BTreeTuple() {
// empty
}
BTreeTuple(K key, V value) {
this.key = key;
this.value = value;
}
}
}

View File

@ -0,0 +1,97 @@
package org.apache.jdbm;
import java.io.*;
/**
* An record lazily loaded from store.
* This is used in BTree/HTree to store big records outside of index tree
*
* @author Jan Kotek
*/
class BTreeLazyRecord<E> {
private E value = null;
private DBAbstract db;
private Serializer<E> serializer;
final long recid;
BTreeLazyRecord(DBAbstract db, long recid, Serializer<E> serializer) {
this.db = db;
this.recid = recid;
this.serializer = serializer;
}
E get() {
if (value != null) return value;
try {
value = db.fetch(recid, serializer);
} catch (IOException e) {
throw new IOError(e);
}
return value;
}
void delete() {
try {
db.delete(recid);
} catch (IOException e) {
throw new IOError(e);
}
value = null;
serializer = null;
db = null;
}
/**
* Serialier used to insert already serialized data into store
*/
static final Serializer FAKE_SERIALIZER = new Serializer() {
public void serialize(DataOutput out, Object obj) throws IOException {
byte[] data = (byte[]) obj;
out.write(data);
}
public Object deserialize(DataInput in) throws IOException, ClassNotFoundException {
throw new UnsupportedOperationException();
}
};
static Object fastDeser(DataInputOutput in, Serializer serializer, int expectedSize) throws IOException, ClassNotFoundException {
//we should propably copy data for deserialization into separate buffer and pass it to Serializer
//but to make it faster, Serializer will operate directly on top of buffer.
//and we check that it readed correct number of bytes.
int origAvail = in.available();
if (origAvail == 0)
throw new InternalError(); //is backed up by byte[] buffer, so there should be always avail bytes
Object ret = serializer.deserialize(in);
//check than valueSerializer did not read more bytes, if yes it readed bytes from next record
int readed = origAvail - in.available();
if (readed > expectedSize)
throw new IOException("Serializer readed more bytes than is record size.");
else if (readed != expectedSize) {
//deserializer did not readed all bytes, unussual but valid.
//Skip some to get into correct position
for (int ii = 0; ii < expectedSize - readed; ii++)
in.readUnsignedByte();
}
return ret;
}
/**
* if value in tree is serialized in more bytes, it is stored as separate record outside of tree
* This value must be always smaller than 250
*/
static final int MAX_INTREE_RECORD_SIZE = 32;
static {
if (MAX_INTREE_RECORD_SIZE > 250) throw new Error();
}
static final int NULL = 255;
static final int LAZY_RECORD = 254;
}

View File

@ -0,0 +1,611 @@
/*******************************************************************************
* Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package org.apache.jdbm;
import java.io.IOError;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.ConcurrentNavigableMap;
/**
* Wrapper for <code>BTree</code> which implements <code>ConcurrentNavigableMap</code> interface
*
* @param <K> key type
* @param <V> value type
*
* @author Jan Kotek
*/
class BTreeMap<K, V> extends AbstractMap<K, V> implements ConcurrentNavigableMap<K, V> {
protected BTree<K, V> tree;
protected final K fromKey;
protected final K toKey;
protected final boolean readonly;
protected NavigableSet<K> keySet2;
private final boolean toInclusive;
private final boolean fromInclusive;
public BTreeMap(BTree<K, V> tree, boolean readonly) {
this(tree, readonly, null, false, null, false);
}
protected BTreeMap(BTree<K, V> tree, boolean readonly, K fromKey, boolean fromInclusive, K toKey, boolean toInclusive) {
this.tree = tree;
this.fromKey = fromKey;
this.fromInclusive = fromInclusive;
this.toKey = toKey;
this.toInclusive = toInclusive;
this.readonly = readonly;
}
@Override
public Set<Entry<K, V>> entrySet() {
return _entrySet;
}
private final Set<java.util.Map.Entry<K, V>> _entrySet = new AbstractSet<Entry<K, V>>() {
protected Entry<K, V> newEntry(K k, V v) {
return new SimpleEntry<K, V>(k, v) {
private static final long serialVersionUID = 978651696969194154L;
public V setValue(V arg0) {
BTreeMap.this.put(getKey(), arg0);
return super.setValue(arg0);
}
};
}
public boolean add(java.util.Map.Entry<K, V> e) {
if (readonly)
throw new UnsupportedOperationException("readonly");
try {
if (e.getKey() == null)
throw new NullPointerException("Can not add null key");
if (!inBounds(e.getKey()))
throw new IllegalArgumentException("key outside of bounds");
return tree.insert(e.getKey(), e.getValue(), true) == null;
} catch (IOException e1) {
throw new IOError(e1);
}
}
@SuppressWarnings("unchecked")
public boolean contains(Object o) {
if (o instanceof Entry) {
Entry<K, V> e = (java.util.Map.Entry<K, V>) o;
try {
if (!inBounds(e.getKey()))
return false;
if (e.getKey() != null && tree.get(e.getKey()) != null)
return true;
} catch (IOException e1) {
throw new IOError(e1);
}
}
return false;
}
public Iterator<java.util.Map.Entry<K, V>> iterator() {
try {
final BTree.BTreeTupleBrowser<K, V> br = fromKey == null ?
tree.browse() : tree.browse(fromKey, fromInclusive);
return new Iterator<Entry<K, V>>() {
private Entry<K, V> next;
private K lastKey;
void ensureNext() {
try {
BTree.BTreeTuple<K, V> t = new BTree.BTreeTuple<K, V>();
if (br.getNext(t) && inBounds(t.key))
next = newEntry(t.key, t.value);
else
next = null;
} catch (IOException e1) {
throw new IOError(e1);
}
}
{
ensureNext();
}
public boolean hasNext() {
return next != null;
}
public java.util.Map.Entry<K, V> next() {
if (next == null)
throw new NoSuchElementException();
Entry<K, V> ret = next;
lastKey = ret.getKey();
//move to next position
ensureNext();
return ret;
}
public void remove() {
if (readonly)
throw new UnsupportedOperationException("readonly");
if (lastKey == null)
throw new IllegalStateException();
try {
br.remove(lastKey);
lastKey = null;
} catch (IOException e1) {
throw new IOError(e1);
}
}
};
} catch (IOException e) {
throw new IOError(e);
}
}
@SuppressWarnings("unchecked")
public boolean remove(Object o) {
if (readonly)
throw new UnsupportedOperationException("readonly");
if (o instanceof Entry) {
Entry<K, V> e = (java.util.Map.Entry<K, V>) o;
try {
//check for nulls
if (e.getKey() == null || e.getValue() == null)
return false;
if (!inBounds(e.getKey()))
throw new IllegalArgumentException("out of bounds");
//get old value, must be same as item in entry
V v = get(e.getKey());
if (v == null || !e.getValue().equals(v))
return false;
V v2 = tree.remove(e.getKey());
return v2 != null;
} catch (IOException e1) {
throw new IOError(e1);
}
}
return false;
}
public int size() {
return BTreeMap.this.size();
}
public void clear(){
if(fromKey!=null || toKey!=null)
super.clear();
else
try {
tree.clear();
} catch (IOException e) {
throw new IOError(e);
}
}
};
public boolean inBounds(K e) {
if(fromKey == null && toKey == null)
return true;
Comparator comp = comparator();
if (comp == null) comp = Utils.COMPARABLE_COMPARATOR;
if(fromKey!=null){
final int compare = comp.compare(e, fromKey);
if(compare<0) return false;
if(!fromInclusive && compare == 0) return false;
}
if(toKey!=null){
final int compare = comp.compare(e, toKey);
if(compare>0)return false;
if(!toInclusive && compare == 0) return false;
}
return true;
}
@SuppressWarnings("unchecked")
@Override
public V get(Object key) {
try {
if (key == null)
return null;
if (!inBounds((K) key))
return null;
return tree.get((K) key);
} catch (ClassCastException e) {
return null;
} catch (IOException e) {
throw new IOError(e);
}
}
@SuppressWarnings("unchecked")
@Override
public V remove(Object key) {
if (readonly)
throw new UnsupportedOperationException("readonly");
try {
if (key == null || tree.get((K) key) == null)
return null;
if (!inBounds((K) key))
throw new IllegalArgumentException("out of bounds");
return tree.remove((K) key);
} catch (ClassCastException e) {
return null;
} catch (IOException e) {
throw new IOError(e);
}
}
public V put(K key, V value) {
if (readonly)
throw new UnsupportedOperationException("readonly");
try {
if (key == null || value == null)
throw new NullPointerException("Null key or value");
if (!inBounds(key))
throw new IllegalArgumentException("out of bounds");
return tree.insert(key, value, true);
} catch (IOException e) {
throw new IOError(e);
}
}
public void clear(){
entrySet().clear();
}
@SuppressWarnings("unchecked")
@Override
public boolean containsKey(Object key) {
if (key == null)
return false;
try {
if (!inBounds((K) key))
return false;
V v = tree.get((K) key);
return v != null;
} catch (IOException e) {
throw new IOError(e);
} catch (ClassCastException e) {
return false;
}
}
public Comparator<? super K> comparator() {
return tree._comparator;
}
public K firstKey() {
if (isEmpty())
return null;
try {
BTree.BTreeTupleBrowser<K, V> b = fromKey == null ? tree.browse() : tree.browse(fromKey,fromInclusive);
BTree.BTreeTuple<K, V> t = new BTree.BTreeTuple<K, V>();
b.getNext(t);
return t.key;
} catch (IOException e) {
throw new IOError(e);
}
}
public K lastKey() {
if (isEmpty())
return null;
try {
BTree.BTreeTupleBrowser<K, V> b = toKey == null ? tree.browse(null,true) : tree.browse(toKey,false);
BTree.BTreeTuple<K, V> t = new BTree.BTreeTuple<K, V>();
b.getPrevious(t);
if(!toInclusive && toKey!=null){
//make sure we wont return last key
Comparator c = comparator();
if(c==null) c=Utils.COMPARABLE_COMPARATOR;
if(c.compare(t.key,toKey)==0)
b.getPrevious(t);
}
return t.key;
} catch (IOException e) {
throw new IOError(e);
}
}
public ConcurrentNavigableMap<K, V> headMap(K toKey2, boolean inclusive) {
K toKey3 = Utils.min(this.toKey,toKey2,comparator());
boolean inclusive2 = toKey3 == toKey? toInclusive : inclusive;
return new BTreeMap<K, V>(tree, readonly, this.fromKey, this.fromInclusive, toKey3, inclusive2);
}
public ConcurrentNavigableMap<K, V> headMap(K toKey) {
return headMap(toKey,false);
}
public Entry<K, V> lowerEntry(K key) {
K k = lowerKey(key);
return k==null? null : new SimpleEntry<K, V>(k,get(k));
}
public K lowerKey(K key) {
if (isEmpty())
return null;
K key2 = Utils.min(key,toKey,comparator());
try {
BTree.BTreeTupleBrowser<K, V> b = tree.browse(key2,true) ;
BTree.BTreeTuple<K, V> t = new BTree.BTreeTuple<K, V>();
b.getPrevious(t);
return t.key;
} catch (IOException e) {
throw new IOError(e);
}
}
public Entry<K, V> floorEntry(K key) {
K k = floorKey(key);
return k==null? null : new SimpleEntry<K, V>(k,get(k));
}
public K floorKey(K key) {
if (isEmpty())
return null;
K key2 = Utils.max(key,fromKey,comparator());
try {
BTree.BTreeTupleBrowser<K, V> b = tree.browse(key2,true) ;
BTree.BTreeTuple<K, V> t = new BTree.BTreeTuple<K, V>();
b.getNext(t);
Comparator comp = comparator();
if (comp == null) comp = Utils.COMPARABLE_COMPARATOR;
if(comp.compare(t.key,key2) == 0)
return t.key;
b.getPrevious(t);
b.getPrevious(t);
return t.key;
} catch (IOException e) {
throw new IOError(e);
}
}
public Entry<K, V> ceilingEntry(K key) {
K k = ceilingKey(key);
return k==null? null : new SimpleEntry<K, V>(k,get(k));
}
public K ceilingKey(K key) {
if (isEmpty())
return null;
K key2 = Utils.min(key,toKey,comparator());
try {
BTree.BTreeTupleBrowser<K, V> b = tree.browse(key2,true) ;
BTree.BTreeTuple<K, V> t = new BTree.BTreeTuple<K, V>();
b.getNext(t);
return t.key;
} catch (IOException e) {
throw new IOError(e);
}
}
public Entry<K, V> higherEntry(K key) {
K k = higherKey(key);
return k==null? null : new SimpleEntry<K, V>(k,get(k));
}
public K higherKey(K key) {
if (isEmpty())
return null;
K key2 = Utils.max(key,fromKey,comparator());
try {
BTree.BTreeTupleBrowser<K, V> b = tree.browse(key2,false) ;
BTree.BTreeTuple<K, V> t = new BTree.BTreeTuple<K, V>();
b.getNext(t);
return t.key;
} catch (IOException e) {
throw new IOError(e);
}
}
public Entry<K, V> firstEntry() {
K k = firstKey();
return k==null? null : new SimpleEntry<K, V>(k,get(k));
}
public Entry<K, V> lastEntry() {
K k = lastKey();
return k==null? null : new SimpleEntry<K, V>(k,get(k));
}
public Entry<K, V> pollFirstEntry() {
Entry<K,V> first = firstEntry();
if(first!=null)
remove(first.getKey());
return first;
}
public Entry<K, V> pollLastEntry() {
Entry<K,V> last = lastEntry();
if(last!=null)
remove(last.getKey());
return last;
}
public ConcurrentNavigableMap<K, V> descendingMap() {
throw new UnsupportedOperationException("not implemented yet");
//TODO implement descending (reverse order) map
}
public NavigableSet<K> keySet() {
return navigableKeySet();
}
public NavigableSet<K> navigableKeySet() {
if(keySet2 == null)
keySet2 = new BTreeSet<K>((BTreeMap<K,Object>) this);
return keySet2;
}
public NavigableSet<K> descendingKeySet() {
return descendingMap().navigableKeySet();
}
public ConcurrentNavigableMap<K, V> tailMap(K fromKey) {
return tailMap(fromKey,true);
}
public ConcurrentNavigableMap<K, V> tailMap(K fromKey2, boolean inclusive) {
K fromKey3 = Utils.max(this.fromKey,fromKey2,comparator());
boolean inclusive2 = fromKey3 == toKey? toInclusive : inclusive;
return new BTreeMap<K, V>(tree, readonly, fromKey3, inclusive2, toKey, toInclusive);
}
public ConcurrentNavigableMap<K, V> subMap(K fromKey, boolean fromInclusive, K toKey, boolean toInclusive) {
Comparator comp = comparator();
if (comp == null) comp = Utils.COMPARABLE_COMPARATOR;
if (comp.compare(fromKey, toKey) > 0)
throw new IllegalArgumentException("fromKey is bigger then toKey");
return new BTreeMap<K, V>(tree, readonly, fromKey, fromInclusive, toKey, toInclusive);
}
public ConcurrentNavigableMap<K, V> subMap(K fromKey, K toKey) {
return subMap(fromKey,true,toKey,false);
}
public BTree<K, V> getTree() {
return tree;
}
public void addRecordListener(RecordListener<K, V> listener) {
tree.addRecordListener(listener);
}
public DBAbstract getRecordManager() {
return tree.getRecordManager();
}
public void removeRecordListener(RecordListener<K, V> listener) {
tree.removeRecordListener(listener);
}
public int size() {
if (fromKey == null && toKey == null)
return (int) tree._entries; //use fast counter on tree if Map has no bounds
else {
//had to count items in iterator
Iterator iter = keySet().iterator();
int counter = 0;
while (iter.hasNext()) {
iter.next();
counter++;
}
return counter;
}
}
public V putIfAbsent(K key, V value) {
tree.lock.writeLock().lock();
try{
if (!containsKey(key))
return put(key, value);
else
return get(key);
}finally {
tree.lock.writeLock().unlock();
}
}
public boolean remove(Object key, Object value) {
tree.lock.writeLock().lock();
try{
if (containsKey(key) && get(key).equals(value)) {
remove(key);
return true;
} else return false;
}finally {
tree.lock.writeLock().unlock();
}
}
public boolean replace(K key, V oldValue, V newValue) {
tree.lock.writeLock().lock();
try{
if (containsKey(key) && get(key).equals(oldValue)) {
put(key, newValue);
return true;
} else return false;
}finally {
tree.lock.writeLock().unlock();
}
}
public V replace(K key, V value) {
tree.lock.writeLock().lock();
try{
if (containsKey(key)) {
return put(key, value);
} else return null;
}finally {
tree.lock.writeLock().unlock();
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,187 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.jdbm;
import java.util.*;
/**
* Wrapper class for <code>>SortedMap</code> to implement <code>>NavigableSet</code>
* <p/>
* This code originally comes from Apache Harmony, was adapted by Jan Kotek for JDBM
*/
class BTreeSet<E> extends AbstractSet<E> implements NavigableSet<E> {
/**
* use keyset from this map
*/
final BTreeMap<E, Object> map;
BTreeSet(BTreeMap<E, Object> map) {
this.map = map;
}
public boolean add(E object) {
return map.put(object, Utils.EMPTY_STRING) == null;
}
public boolean addAll(Collection<? extends E> collection) {
return super.addAll(collection);
}
public void clear() {
map.clear();
}
public Comparator<? super E> comparator() {
return map.comparator();
}
public boolean contains(Object object) {
return map.containsKey(object);
}
public boolean isEmpty() {
return map.isEmpty();
}
public E lower(E e) {
return map.lowerKey(e);
}
public E floor(E e) {
return map.floorKey(e);
}
public E ceiling(E e) {
return map.ceilingKey(e);
}
public E higher(E e) {
return map.higherKey(e);
}
public E pollFirst() {
Map.Entry<E,Object> e = map.pollFirstEntry();
return e!=null? e.getKey():null;
}
public E pollLast() {
Map.Entry<E,Object> e = map.pollLastEntry();
return e!=null? e.getKey():null;
}
public Iterator<E> iterator() {
final Iterator<Map.Entry<E,Object>> iter = map.entrySet().iterator();
return new Iterator<E>() {
public boolean hasNext() {
return iter.hasNext();
}
public E next() {
Map.Entry<E,Object> e = iter.next();
return e!=null?e.getKey():null;
}
public void remove() {
iter.remove();
}
};
}
public NavigableSet<E> descendingSet() {
return map.descendingKeySet();
}
public Iterator<E> descendingIterator() {
return map.descendingKeySet().iterator();
}
public NavigableSet<E> subSet(E fromElement, boolean fromInclusive, E toElement, boolean toInclusive) {
return map.subMap(fromElement,fromInclusive,toElement,toInclusive).navigableKeySet();
}
public NavigableSet<E> headSet(E toElement, boolean inclusive) {
return map.headMap(toElement,inclusive).navigableKeySet();
}
public NavigableSet<E> tailSet(E fromElement, boolean inclusive) {
return map.tailMap(fromElement,inclusive).navigableKeySet();
}
public boolean remove(Object object) {
return map.remove(object) != null;
}
public int size() {
return map.size();
}
public E first() {
return map.firstKey();
}
public E last() {
return map.lastKey();
}
public SortedSet<E> subSet(E start, E end) {
Comparator<? super E> c = map.comparator();
int compare = (c == null) ? ((Comparable<E>) start).compareTo(end) : c
.compare(start, end);
if (compare <= 0) {
return new BTreeSet<E>((BTreeMap<E,Object>) map.subMap(start, true,end,false));
}
throw new IllegalArgumentException();
}
public SortedSet<E> headSet(E end) {
// Check for errors
Comparator<? super E> c = map.comparator();
if (c == null) {
((Comparable<E>) end).compareTo(end);
} else {
c.compare(end, end);
}
return new BTreeSet<E>((BTreeMap<E,Object>) map.headMap(end,false));
}
public SortedSet<E> tailSet(E start) {
// Check for errors
Comparator<? super E> c = map.comparator();
if (c == null) {
((Comparable<E>) start).compareTo(start);
} else {
c.compare(start, start);
}
return new BTreeSet<E>((BTreeMap<E,Object>) map.tailMap(start,true));
}
}

View File

@ -0,0 +1,173 @@
package org.apache.jdbm;
import java.util.*;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ConcurrentNavigableMap;
/**
* Database is root class for creating and loading persistent collections. It also contains
* transaction operations.
* //TODO just write some readme
* <p/>
*
* @author Jan Kotek
* @author Alex Boisvert
* @author Cees de Groot
*/
public interface DB {
/**
* Closes the DB and release resources.
* DB can not be used after it was closed
*/
void close();
/** @return true if db was already closed */
boolean isClosed();
/**
* Clear cache and remove all entries it contains.
* This may be useful for some Garbage Collection when reference cache is used.
*/
void clearCache();
/**
* Defragments storage so it consumes less space.
* It basically copyes all records into different store and then renames it, replacing original store.
* <p/>
* Defrag has two steps: In first collections are rearranged, so records in collection are close to each other,
* and read speed is improved. In second step all records are sequentially transferred, reclaiming all unused space.
* First step is optinal and may slow down defragmentation significantly as ut requires many random-access reads.
* Second step reads and writes data sequentially and is very fast, comparable to copying files to new location.
*
* <p/>
* This commits any uncommited data. Defrag also requires free space, as store is basically recreated at new location.
*
* @param sortCollections if collection records should be rearranged during defragment, this takes some extra time
*/
void defrag(boolean sortCollections);
/**
* Commit (make persistent) all changes since beginning of transaction.
* JDBM supports only single transaction.
*/
void commit();
/**
* Rollback (cancel) all changes since beginning of transaction.
* JDBM supports only single transaction.
* This operations affects all maps created or loaded by this DB.
*/
void rollback();
/**
* This calculates some database statistics such as collection sizes and record distributions.
* Can be useful for performance optimalisations and trouble shuting.
* This method can run for very long time.
*
* @return statistics contained in string
*/
String calculateStatistics();
/**
* Copy database content into ZIP file
* @param zipFile
*/
void copyToZip(String zipFile);
/**
* Get a <code>Map</code> which was already created and saved in DB.
* This map uses disk based H*Tree and should have similar performance
* as <code>HashMap</code>.
*
* @param name of hash map
*
* @return map
*/
<K, V> ConcurrentMap<K, V> getHashMap(String name);
/**
* Creates Map which persists data into DB.
*
* @param name record name
* @return
*/
<K, V> ConcurrentMap<K, V> createHashMap(String name);
/**
* Creates Hash Map which persists data into DB.
* Map will use custom serializers for Keys and Values.
* Leave keySerializer null to use default serializer for keys
*
* @param <K> Key type
* @param <V> Value type
* @param name record name
* @param keySerializer serializer to be used for Keys, leave null to use default serializer
* @param valueSerializer serializer to be used for Values
* @return
*/
<K, V> ConcurrentMap<K, V> createHashMap(String name, Serializer<K> keySerializer, Serializer<V> valueSerializer);
<K> Set<K> createHashSet(String name);
<K> Set<K> getHashSet(String name);
<K> Set<K> createHashSet(String name, Serializer<K> keySerializer);
<K, V> ConcurrentNavigableMap<K, V> getTreeMap(String name);
/**
* Create TreeMap which persists data into DB.
*
* @param <K> Key type
* @param <V> Value type
* @param name record name
* @return
*/
<K extends Comparable, V> NavigableMap<K, V> createTreeMap(String name);
/**
* Creates TreeMap which persists data into DB.
*
* @param <K> Key type
* @param <V> Value type
* @param name record name
* @param keyComparator Comparator used to sort keys
* @param keySerializer Serializer used for keys. This may reduce disk space usage *
* @param valueSerializer Serializer used for values. This may reduce disk space usage
* @return
*/
<K, V> ConcurrentNavigableMap<K, V> createTreeMap(String name,
Comparator<K> keyComparator, Serializer<K> keySerializer, Serializer<V> valueSerializer);
<K> NavigableSet<K> getTreeSet(String name);
<K> NavigableSet<K> createTreeSet(String name);
<K> NavigableSet<K> createTreeSet(String name, Comparator<K> keyComparator, Serializer<K> keySerializer);
<K> List<K> createLinkedList(String name);
<K> List<K> createLinkedList(String name, Serializer<K> serializer);
<K> List<K> getLinkedList(String name);
/** returns unmodifiable map which contains all collection names and collections thenselfs*/
Map<String,Object> getCollections();
/** completely remove collection from store*/
void deleteCollection(String name);
/** Java Collections returns their size as int. This may not be enought for JDBM collections.
* This method returns number of elements in JDBM collection as long.
*
* @param collection created by JDBM
* @return number of elements in collection as long
*/
long collectionSize(Object collection);
}

View File

@ -0,0 +1,590 @@
/*******************************************************************************
* Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package org.apache.jdbm;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOError;
import java.io.IOException;
import java.lang.ref.WeakReference;
import java.util.*;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ConcurrentNavigableMap;
/**
* An abstract class implementing most of DB.
* It also has some JDBM package protected stuff (getNamedRecord)
*/
abstract class DBAbstract implements DB {
/**
* Reserved slot for name directory recid.
*/
static final byte NAME_DIRECTORY_ROOT = 0;
/**
* Reserved slot for version number
*/
static final byte STORE_VERSION_NUMBER_ROOT = 1;
/**
* Reserved slot for recid where Serial class info is stored
*
* NOTE when introducing more roots, do not forget to update defrag
*/
static final byte SERIAL_CLASS_INFO_RECID_ROOT = 2;
/** to prevent double instances of the same collection, we use weak value map
*
* //TODO what to do when there is rollback?
* //TODO clear on close
*/
final private Map<String,WeakReference<Object>> collections = new HashMap<String,WeakReference<Object>>();
/**
* Inserts a new record using a custom serializer.
*
* @param obj the object for the new record.
* @param serializer a custom serializer
* @return the rowid for the new record.
* @throws java.io.IOException when one of the underlying I/O operations fails.
*/
abstract <A> long insert(A obj, Serializer<A> serializer,boolean disableCache) throws IOException;
/**
* Deletes a record.
*
* @param recid the rowid for the record that should be deleted.
* @throws java.io.IOException when one of the underlying I/O operations fails.
*/
abstract void delete(long recid) throws IOException;
/**
* Updates a record using a custom serializer.
* If given recid does not exist, IOException will be thrown before/during commit (cache).
*
* @param recid the recid for the record that is to be updated.
* @param obj the new object for the record.
* @param serializer a custom serializer
* @throws java.io.IOException when one of the underlying I/O operations fails
*/
abstract <A> void update(long recid, A obj, Serializer<A> serializer)
throws IOException;
/**
* Fetches a record using a custom serializer.
*
* @param recid the recid for the record that must be fetched.
* @param serializer a custom serializer
* @return the object contained in the record, null if given recid does not exist
* @throws java.io.IOException when one of the underlying I/O operations fails.
*/
abstract <A> A fetch(long recid, Serializer<A> serializer)
throws IOException;
/**
* Fetches a record using a custom serializer and optionaly disabled cache
*
* @param recid the recid for the record that must be fetched.
* @param serializer a custom serializer
* @param disableCache true to disable any caching mechanism
* @return the object contained in the record, null if given recid does not exist
* @throws java.io.IOException when one of the underlying I/O operations fails.
*/
abstract <A> A fetch(long recid, Serializer<A> serializer, boolean disableCache)
throws IOException;
public long insert(Object obj) throws IOException {
return insert(obj, defaultSerializer(),false);
}
public void update(long recid, Object obj) throws IOException {
update(recid, obj, defaultSerializer());
}
synchronized public <A> A fetch(long recid) throws IOException {
return (A) fetch(recid, defaultSerializer());
}
synchronized public <K, V> ConcurrentMap<K, V> getHashMap(String name) {
Object o = getCollectionInstance(name);
if(o!=null)
return (ConcurrentMap<K, V>) o;
try {
long recid = getNamedObject(name);
if(recid == 0) return null;
HTree tree = fetch(recid);
tree.setPersistenceContext(this);
if(!tree.hasValues()){
throw new ClassCastException("HashSet is not HashMap");
}
collections.put(name,new WeakReference<Object>(tree));
return tree;
} catch (IOException e) {
throw new IOError(e);
}
}
synchronized public <K, V> ConcurrentMap<K, V> createHashMap(String name) {
return createHashMap(name, null, null);
}
public synchronized <K, V> ConcurrentMap<K, V> createHashMap(String name, Serializer<K> keySerializer, Serializer<V> valueSerializer) {
try {
assertNameNotExist(name);
HTree<K, V> tree = new HTree(this, keySerializer, valueSerializer,true);
long recid = insert(tree);
setNamedObject(name, recid);
collections.put(name,new WeakReference<Object>(tree));
return tree;
} catch (IOException e) {
throw new IOError(e);
}
}
public synchronized <K> Set<K> getHashSet(String name) {
Object o = getCollectionInstance(name);
if(o!=null)
return (Set<K>) o;
try {
long recid = getNamedObject(name);
if(recid == 0) return null;
HTree tree = fetch(recid);
tree.setPersistenceContext(this);
if(tree.hasValues()){
throw new ClassCastException("HashMap is not HashSet");
}
Set<K> ret = new HTreeSet(tree);
collections.put(name,new WeakReference<Object>(ret));
return ret;
} catch (IOException e) {
throw new IOError(e);
}
}
public synchronized <K> Set<K> createHashSet(String name) {
return createHashSet(name, null);
}
public synchronized <K> Set<K> createHashSet(String name, Serializer<K> keySerializer) {
try {
assertNameNotExist(name);
HTree<K, Object> tree = new HTree(this, keySerializer, null,false);
long recid = insert(tree);
setNamedObject(name, recid);
Set<K> ret = new HTreeSet<K>(tree);
collections.put(name,new WeakReference<Object>(ret));
return ret;
} catch (IOException e) {
throw new IOError(e);
}
}
synchronized public <K, V> ConcurrentNavigableMap<K, V> getTreeMap(String name) {
Object o = getCollectionInstance(name);
if(o!=null)
return (ConcurrentNavigableMap<K, V> ) o;
try {
long recid = getNamedObject(name);
if(recid == 0) return null;
BTree t = BTree.<K, V>load(this, recid);
if(!t.hasValues())
throw new ClassCastException("TreeSet is not TreeMap");
ConcurrentNavigableMap<K,V> ret = new BTreeMap<K, V>(t,false); //TODO put readonly flag here
collections.put(name,new WeakReference<Object>(ret));
return ret;
} catch (IOException e) {
throw new IOError(e);
}
}
synchronized public <K extends Comparable, V> ConcurrentNavigableMap<K, V> createTreeMap(String name) {
return createTreeMap(name, null, null, null);
}
public synchronized <K, V> ConcurrentNavigableMap<K, V> createTreeMap(String name,
Comparator<K> keyComparator,
Serializer<K> keySerializer,
Serializer<V> valueSerializer) {
try {
assertNameNotExist(name);
BTree<K, V> tree = BTree.createInstance(this, keyComparator, keySerializer, valueSerializer,true);
setNamedObject(name, tree.getRecid());
ConcurrentNavigableMap<K,V> ret = new BTreeMap<K, V>(tree,false); //TODO put readonly flag here
collections.put(name,new WeakReference<Object>(ret));
return ret;
} catch (IOException e) {
throw new IOError(e);
}
}
public synchronized <K> NavigableSet<K> getTreeSet(String name) {
Object o = getCollectionInstance(name);
if(o!=null)
return (NavigableSet<K> ) o;
try {
long recid = getNamedObject(name);
if(recid == 0) return null;
BTree t = BTree.<K, Object>load(this, recid);
if(t.hasValues())
throw new ClassCastException("TreeMap is not TreeSet");
BTreeSet<K> ret = new BTreeSet<K>(new BTreeMap(t,false));
collections.put(name,new WeakReference<Object>(ret));
return ret;
} catch (IOException e) {
throw new IOError(e);
}
}
public synchronized <K> NavigableSet<K> createTreeSet(String name) {
return createTreeSet(name, null, null);
}
public synchronized <K> NavigableSet<K> createTreeSet(String name, Comparator<K> keyComparator, Serializer<K> keySerializer) {
try {
assertNameNotExist(name);
BTree<K, Object> tree = BTree.createInstance(this, keyComparator, keySerializer, null,false);
setNamedObject(name, tree.getRecid());
BTreeSet<K> ret = new BTreeSet<K>(new BTreeMap(tree,false));
collections.put(name,new WeakReference<Object>(ret));
return ret;
} catch (IOException e) {
throw new IOError(e);
}
}
synchronized public <K> List<K> createLinkedList(String name) {
return createLinkedList(name, null);
}
synchronized public <K> List<K> createLinkedList(String name, Serializer<K> serializer) {
try {
assertNameNotExist(name);
//allocate record and overwrite it
LinkedList2<K> list = new LinkedList2<K>(this, serializer);
long recid = insert(list);
setNamedObject(name, recid);
collections.put(name,new WeakReference<Object>(list));
return list;
} catch (IOException e) {
throw new IOError(e);
}
}
synchronized public <K> List<K> getLinkedList(String name) {
Object o = getCollectionInstance(name);
if(o!=null)
return (List<K> ) o;
try {
long recid = getNamedObject(name);
if(recid == 0) return null;
LinkedList2<K> list = (LinkedList2<K>) fetch(recid);
list.setPersistenceContext(this);
collections.put(name,new WeakReference<Object>(list));
return list;
} catch (IOException e) {
throw new IOError(e);
}
}
private synchronized Object getCollectionInstance(String name){
WeakReference ref = collections.get(name);
if(ref==null)return null;
Object o = ref.get();
if(o != null) return o;
//already GCed
collections.remove(name);
return null;
}
private void assertNameNotExist(String name) throws IOException {
if (getNamedObject(name) != 0)
throw new IllegalArgumentException("Object with name '" + name + "' already exists");
}
/**
* Obtain the record id of a named object. Returns 0 if named object
* doesn't exist.
* Named objects are used to store Map views and other well known objects.
*/
synchronized protected long getNamedObject(String name) throws IOException{
long nameDirectory_recid = getRoot(NAME_DIRECTORY_ROOT);
if(nameDirectory_recid == 0){
return 0;
}
HTree<String,Long> m = fetch(nameDirectory_recid);
Long res = m.get(name);
if(res == null)
return 0;
return res;
}
/**
* Set the record id of a named object.
* Named objects are used to store Map views and other well known objects.
*/
synchronized protected void setNamedObject(String name, long recid) throws IOException{
long nameDirectory_recid = getRoot(NAME_DIRECTORY_ROOT);
HTree<String,Long> m = null;
if(nameDirectory_recid == 0){
//does not exists, create it
m = new HTree<String, Long>(this,null,null,true);
nameDirectory_recid = insert(m);
setRoot(NAME_DIRECTORY_ROOT,nameDirectory_recid);
}else{
//fetch it
m = fetch(nameDirectory_recid);
}
m.put(name,recid);
}
synchronized public Map<String,Object> getCollections(){
try{
Map<String,Object> ret = new LinkedHashMap<String, Object>();
long nameDirectory_recid = getRoot(NAME_DIRECTORY_ROOT);
if(nameDirectory_recid==0)
return ret;
HTree<String,Long> m = fetch(nameDirectory_recid);
for(Map.Entry<String,Long> e:m.entrySet()){
Object o = fetch(e.getValue());
if(o instanceof BTree){
if(((BTree) o).hasValues)
o = getTreeMap(e.getKey());
else
o = getTreeSet(e.getKey());
}
else if( o instanceof HTree){
if(((HTree) o).hasValues)
o = getHashMap(e.getKey());
else
o = getHashSet(e.getKey());
}
ret.put(e.getKey(), o);
}
return Collections.unmodifiableMap(ret);
}catch(IOException e){
throw new IOError(e);
}
}
synchronized public void deleteCollection(String name){
try{
long nameDirectory_recid = getRoot(NAME_DIRECTORY_ROOT);
if(nameDirectory_recid==0)
throw new IOException("Collection not found");
HTree<String,Long> dir = fetch(nameDirectory_recid);
Long recid = dir.get(name);
if(recid == null) throw new IOException("Collection not found");
Object o = fetch(recid);
//we can not use O instance since it is not correctly initialized
if(o instanceof LinkedList2){
LinkedList2 l = (LinkedList2) o;
l.clear();
delete(l.rootRecid);
}else if(o instanceof BTree){
((BTree) o).clear();
} else if( o instanceof HTree){
HTree t = (HTree) o;
t.clear();
HTreeDirectory n = (HTreeDirectory) fetch(t.rootRecid,t.SERIALIZER);
n.deleteAllChildren();
delete(t.rootRecid);
}else{
throw new InternalError("unknown collection type: "+(o==null?null:o.getClass()));
}
delete(recid);
collections.remove(name);
dir.remove(name);
}catch(IOException e){
throw new IOError(e);
}
}
/** we need to set reference to this DB instance, so serializer needs to be here*/
final Serializer<Serialization> defaultSerializationSerializer = new Serializer<Serialization>(){
public void serialize(DataOutput out, Serialization obj) throws IOException {
LongPacker.packLong(out,obj.serialClassInfoRecid);
SerialClassInfo.serializer.serialize(out,obj.registered);
}
public Serialization deserialize(DataInput in) throws IOException, ClassNotFoundException {
final long recid = LongPacker.unpackLong(in);
final ArrayList<SerialClassInfo.ClassInfo> classes = SerialClassInfo.serializer.deserialize(in);
return new Serialization(DBAbstract.this,recid,classes);
}
};
public synchronized Serializer defaultSerializer() {
try{
long serialClassInfoRecid = getRoot(SERIAL_CLASS_INFO_RECID_ROOT);
if (serialClassInfoRecid == 0) {
//allocate new recid
serialClassInfoRecid = insert(null,Utils.NULL_SERIALIZER,false);
//and insert new serializer
Serialization ser = new Serialization(this,serialClassInfoRecid,new ArrayList<SerialClassInfo.ClassInfo>());
update(serialClassInfoRecid,ser, defaultSerializationSerializer);
setRoot(SERIAL_CLASS_INFO_RECID_ROOT, serialClassInfoRecid);
return ser;
}else{
return fetch(serialClassInfoRecid,defaultSerializationSerializer);
}
} catch (IOException e) {
throw new IOError(e);
}
}
final protected void checkNotClosed(){
if(isClosed()) throw new IllegalStateException("db was closed");
}
protected abstract void setRoot(byte root, long recid);
protected abstract long getRoot(byte root);
synchronized public long collectionSize(Object collection){
if(collection instanceof BTreeMap){
BTreeMap t = (BTreeMap) collection;
if(t.fromKey!=null|| t.toKey!=null) throw new IllegalArgumentException("collectionSize does not work on BTree submap");
return t.tree._entries;
}else if(collection instanceof HTree){
return ((HTree)collection).getRoot().size;
}else if(collection instanceof HTreeSet){
return collectionSize(((HTreeSet) collection).map);
}else if(collection instanceof BTreeSet){
return collectionSize(((BTreeSet) collection).map);
}else if(collection instanceof LinkedList2){
return ((LinkedList2)collection).getRoot().size;
}else{
throw new IllegalArgumentException("Not JDBM collection");
}
}
void addShutdownHook(){
if(shutdownCloseThread!=null){
shutdownCloseThread = new ShutdownCloseThread();
Runtime.getRuntime().addShutdownHook(shutdownCloseThread);
}
}
public void close(){
if(shutdownCloseThread!=null){
Runtime.getRuntime().removeShutdownHook(shutdownCloseThread);
shutdownCloseThread.dbToClose = null;
shutdownCloseThread = null;
}
}
ShutdownCloseThread shutdownCloseThread = null;
private static class ShutdownCloseThread extends Thread{
DBAbstract dbToClose = null;
ShutdownCloseThread(){
super("JDBM shutdown");
}
public void run(){
if(dbToClose!=null && !dbToClose.isClosed()){
dbToClose.shutdownCloseThread = null;
dbToClose.close();
}
}
}
synchronized public void rollback() {
try {
for(WeakReference<Object> o:collections.values()){
Object c = o.get();
if(c != null && c instanceof BTreeMap){
//reload tree
BTreeMap m = (BTreeMap) c;
m.tree = fetch(m.tree.getRecid());
}
if(c != null && c instanceof BTreeSet){
//reload tree
BTreeSet m = (BTreeSet) c;
m.map.tree = fetch(m.map.tree.getRecid());
}
}
} catch (IOException e) {
throw new IOError(e);
}
}
}

View File

@ -0,0 +1,162 @@
package org.apache.jdbm;
import javax.crypto.Cipher;
import java.io.IOError;
import java.io.IOException;
import java.util.Comparator;
import java.util.Iterator;
/**
* Abstract class with common cache functionality
*/
abstract class DBCache extends DBStore{
static final int NUM_OF_DIRTY_RECORDS_BEFORE_AUTOCOMIT = 1024;
static final byte NONE = 1;
static final byte MRU = 2;
static final byte WEAK = 3;
static final byte SOFT = 4;
static final byte HARD = 5;
static final class DirtyCacheEntry {
long _recid; //TODO recid is already part of _hashDirties, so this field could be removed to save memory
Object _obj;
Serializer _serializer;
}
/**
* Dirty status of _hash CacheEntry Values
*/
final protected LongHashMap<DirtyCacheEntry> _hashDirties = new LongHashMap<DirtyCacheEntry>();
private Serializer cachedDefaultSerializer = null;
/**
* Construct a CacheRecordManager wrapping another DB and
* using a given cache policy.
*/
public DBCache(String filename, boolean readonly, boolean transactionDisabled,
Cipher cipherIn, Cipher cipherOut, boolean useRandomAccessFile,
boolean deleteFilesAfterClose,boolean lockingDisabled){
super(filename, readonly, transactionDisabled,
cipherIn, cipherOut, useRandomAccessFile,
deleteFilesAfterClose,lockingDisabled);
}
@Override
public synchronized Serializer defaultSerializer(){
if(cachedDefaultSerializer==null)
cachedDefaultSerializer = super.defaultSerializer();
return cachedDefaultSerializer;
}
@Override
boolean needsAutoCommit() {
return super.needsAutoCommit()||
(transactionsDisabled && !commitInProgress && _hashDirties.size() > NUM_OF_DIRTY_RECORDS_BEFORE_AUTOCOMIT);
}
public synchronized <A> long insert(final A obj, final Serializer<A> serializer, final boolean disableCache)
throws IOException {
checkNotClosed();
if(super.needsAutoCommit())
commit();
if(disableCache)
return super.insert(obj, serializer, disableCache);
//prealocate recid so we have something to return
final long recid = super.insert(PREALOCATE_OBJ, null, disableCache);
// super.update(recid, obj,serializer);
// return super.insert(obj,serializer,disableCache);
//and create new dirty record for future update
final DirtyCacheEntry e = new DirtyCacheEntry();
e._recid = recid;
e._obj = obj;
e._serializer = serializer;
_hashDirties.put(recid,e);
return recid;
}
public synchronized void commit() {
try{
commitInProgress = true;
updateCacheEntries();
super.commit();
}finally {
commitInProgress = false;
}
}
public synchronized void rollback(){
cachedDefaultSerializer = null;
_hashDirties.clear();
super.rollback();
}
private static final Comparator<DirtyCacheEntry> DIRTY_COMPARATOR = new Comparator<DirtyCacheEntry>() {
final public int compare(DirtyCacheEntry o1, DirtyCacheEntry o2) {
return (int) (o1._recid - o2._recid);
}
};
/**
* Update all dirty cache objects to the underlying DB.
*/
protected void updateCacheEntries() {
try {
synchronized(_hashDirties){
while(!_hashDirties.isEmpty()){
//make defensive copy of values as _db.update() may trigger changes in db
//and this would modify dirties again
DirtyCacheEntry[] vals = new DirtyCacheEntry[_hashDirties.size()];
Iterator<DirtyCacheEntry> iter = _hashDirties.valuesIterator();
for(int i = 0;i<vals.length;i++){
vals[i] = iter.next();
}
iter = null;
java.util.Arrays.sort(vals,DIRTY_COMPARATOR);
for(int i = 0;i<vals.length;i++){
final DirtyCacheEntry entry = vals[i];
vals[i] = null;
super.update(entry._recid, entry._obj, entry._serializer);
_hashDirties.remove(entry._recid);
}
//update may have triggered more records to be added into dirties, so repeat until all records are written.
}
}
} catch (IOException e) {
throw new IOError(e);
}
}
}

View File

@ -0,0 +1,350 @@
/*******************************************************************************
* Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package org.apache.jdbm;
import javax.crypto.Cipher;
import java.io.IOException;
/**
* A DB wrapping and caching another DB.
*
* @author Jan Kotek
* @author Alex Boisvert
* @author Cees de Groot
*
* TODO add 'cache miss' statistics
*/
class DBCacheMRU
extends DBCache {
private static final boolean debug = false;
/**
* Cached object hashtable
*/
protected LongHashMap<CacheEntry> _hash;
/**
* Maximum number of objects in the cache.
*/
protected int _max;
/**
* Beginning of linked-list of cache elements. First entry is element
* which has been used least recently.
*/
protected CacheEntry _first;
/**
* End of linked-list of cache elements. Last entry is element
* which has been used most recently.
*/
protected CacheEntry _last;
/**
* Construct a CacheRecordManager wrapping another DB and
* using a given cache policy.
*/
public DBCacheMRU(String filename, boolean readonly, boolean transactionDisabled,
Cipher cipherIn, Cipher cipherOut, boolean useRandomAccessFile,
boolean deleteFilesAfterClose, int cacheMaxRecords, boolean lockingDisabled) {
super(filename, readonly, transactionDisabled,
cipherIn, cipherOut, useRandomAccessFile,
deleteFilesAfterClose,lockingDisabled);
_hash = new LongHashMap<CacheEntry>(cacheMaxRecords);
_max = cacheMaxRecords;
}
public synchronized <A> A fetch(long recid, Serializer<A> serializer, boolean disableCache) throws IOException {
if (disableCache)
return super.fetch(recid, serializer, disableCache);
else
return fetch(recid, serializer);
}
public synchronized void delete(long recid)
throws IOException {
checkNotClosed();
super.delete(recid);
synchronized (_hash){
CacheEntry entry = _hash.get(recid);
if (entry != null) {
removeEntry(entry);
_hash.remove(entry._recid);
}
_hashDirties.remove(recid);
}
if(super.needsAutoCommit())
commit();
}
public synchronized <A> void update(final long recid, final A obj, final Serializer<A> serializer) throws IOException {
checkNotClosed();
synchronized (_hash){
//remove entry if it already exists
CacheEntry entry = cacheGet(recid);
if (entry != null) {
_hash.remove(recid);
removeEntry(entry);
}
//check if entry is in dirties, in this case just update its object
DirtyCacheEntry e = _hashDirties.get(recid);
if(e!=null){
if(recid!=e._recid) throw new Error();
e._obj = obj;
e._serializer = serializer;
return;
}
//create new dirty entry
e = new DirtyCacheEntry();
e._recid = recid;
e._obj = obj;
e._serializer = serializer;
_hashDirties.put(recid,e);
}
if(super.needsAutoCommit())
commit();
}
public synchronized <A> A fetch(long recid, Serializer<A> serializer)
throws IOException {
checkNotClosed();
final CacheEntry entry = cacheGet(recid);
if (entry != null) {
return (A) entry._obj;
}
//check dirties
final DirtyCacheEntry entry2 = _hashDirties.get(recid);
if(entry2!=null){
return (A) entry2._obj;
}
A value = super.fetch(recid, serializer);
if(super.needsAutoCommit())
commit();
//put record into MRU cache
cachePut(recid, value);
return value;
}
public synchronized void close() {
if(isClosed())
return;
updateCacheEntries();
super.close();
_hash = null;
}
public synchronized void rollback() {
// discard all cache entries since we don't know which entries
// where part of the transaction
synchronized (_hash){
_hash.clear();
_first = null;
_last = null;
}
super.rollback();
}
/**
* Obtain an object in the cache
*/
protected CacheEntry cacheGet(long key) {
synchronized (_hash){
CacheEntry entry = _hash.get(key);
if ( entry != null && _last != entry) {
//touch entry
removeEntry(entry);
addEntry(entry);
}
return entry;
}
}
/**
* Place an object in the cache.
*
* @throws IOException
*/
protected void cachePut(final long recid, final Object value) throws IOException {
synchronized (_hash){
CacheEntry entry = _hash.get(recid);
if (entry != null) {
entry._obj = value;
//touch entry
if (_last != entry) {
removeEntry(entry);
addEntry(entry);
}
} else {
if (_hash.size() >= _max) {
// purge and recycle entry
entry = purgeEntry();
entry._recid = recid;
entry._obj = value;
} else {
entry = new CacheEntry(recid, value);
}
addEntry(entry);
_hash.put(entry._recid, entry);
}
}
}
/**
* Add a CacheEntry. Entry goes at the end of the list.
*/
protected void addEntry(CacheEntry entry) {
synchronized (_hash){
if (_first == null) {
_first = entry;
_last = entry;
} else {
_last._next = entry;
entry._previous = _last;
_last = entry;
}
}
}
/**
* Remove a CacheEntry from linked list
*/
protected void removeEntry(CacheEntry entry) {
synchronized (_hash){
if (entry == _first) {
_first = entry._next;
}
if (_last == entry) {
_last = entry._previous;
}
CacheEntry previous = entry._previous;
CacheEntry next = entry._next;
if (previous != null) {
previous._next = next;
}
if (next != null) {
next._previous = previous;
}
entry._previous = null;
entry._next = null;
}
}
/**
* Purge least recently used object from the cache
*
* @return recyclable CacheEntry
*/
protected CacheEntry purgeEntry() {
synchronized (_hash){
CacheEntry entry = _first;
if (entry == null)
return new CacheEntry(-1, null);
removeEntry(entry);
_hash.remove(entry._recid);
entry._obj = null;
return entry;
}
}
@SuppressWarnings("unchecked")
static final class CacheEntry {
protected long _recid;
protected Object _obj;
protected CacheEntry _previous;
protected CacheEntry _next;
CacheEntry(long recid, Object obj) {
_recid = recid;
_obj = obj;
}
}
public void clearCache() {
if(debug)
System.err.println("DBCache: Clear cache");
// discard all cache entries since we don't know which entries
// where part of the transaction
synchronized (_hash){
_hash.clear();
_first = null;
_last = null;
//clear dirties
updateCacheEntries();
}
}
}

View File

@ -0,0 +1,401 @@
/*******************************************************************************
* Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package org.apache.jdbm;
import javax.crypto.Cipher;
import java.io.IOException;
import java.lang.ref.ReferenceQueue;
import java.lang.ref.SoftReference;
import java.lang.ref.WeakReference;
import java.util.Iterator;
import java.util.concurrent.atomic.AtomicInteger;
/**
* A DB wrapping and caching another DB.
*
* @author Jan Kotek
* @author Alex Boisvert
* @author Cees de Groot
*
* TODO add 'cache miss' statistics
*/
public class DBCacheRef
extends DBCache {
private static final boolean debug = false;
/**
* If Soft Cache is enabled, this contains softly referenced clean entries.
* If entry became dirty, it is moved to _hash with limited size.
* This map is accessed from SoftCache Disposer thread, so all access must be
* synchronized
*/
protected LongHashMap _softHash;
/**
* Reference queue used to collect Soft Cache entries
*/
protected ReferenceQueue<ReferenceCacheEntry> _refQueue;
/**
* Thread in which Soft Cache references are disposed
*/
protected Thread _softRefThread;
protected static AtomicInteger threadCounter = new AtomicInteger(0);
/** counter which counts number of insert since last 'action'*/
protected int insertCounter = 0;
private final boolean _autoClearReferenceCacheOnLowMem;
private final byte _cacheType;
/**
* Construct a CacheRecordManager wrapping another DB and
* using a given cache policy.
*/
public DBCacheRef(String filename, boolean readonly, boolean transactionDisabled,
Cipher cipherIn, Cipher cipherOut, boolean useRandomAccessFile,
boolean deleteFilesAfterClose,
byte cacheType, boolean cacheAutoClearOnLowMem, boolean lockingDisabled) {
super(filename, readonly, transactionDisabled,
cipherIn, cipherOut, useRandomAccessFile,
deleteFilesAfterClose, lockingDisabled);
this._cacheType = cacheType;
_autoClearReferenceCacheOnLowMem = cacheAutoClearOnLowMem;
_softHash = new LongHashMap<ReferenceCacheEntry>();
_refQueue = new ReferenceQueue<ReferenceCacheEntry>();
_softRefThread = new Thread(
new SoftRunnable(this, _refQueue),
"JDBM Soft Cache Disposer " + (threadCounter.incrementAndGet()));
_softRefThread.setDaemon(true);
_softRefThread.start();
}
void clearCacheIfLowOnMem() {
insertCounter = 0;
if(!_autoClearReferenceCacheOnLowMem)
return;
Runtime r = Runtime.getRuntime();
long max = r.maxMemory();
if(max == Long.MAX_VALUE)
return;
double free = r.freeMemory();
double total = r.totalMemory();
//We believe that free refers to total not max.
//Increasing heap size to max would increase to max
free = free + (max-total);
if(debug)
System.err.println("DBCache: freemem = " +free + " = "+(free/max)+"%");
if(free<1e7 || free*4 <max)
clearCache();
}
public synchronized <A> A fetch(long recid, Serializer<A> serializer, boolean disableCache) throws IOException {
if (disableCache)
return super.fetch(recid, serializer, disableCache);
else
return fetch(recid, serializer);
}
public synchronized void delete(long recid)
throws IOException {
checkNotClosed();
super.delete(recid);
synchronized (_hashDirties){
_hashDirties.remove(recid);
}
synchronized (_softHash) {
Object e = _softHash.remove(recid);
if (e != null && e instanceof ReferenceCacheEntry) {
((ReferenceCacheEntry)e).clear();
}
}
if(needsAutoCommit())
commit();
}
public synchronized <A> void update(final long recid, A obj, Serializer<A> serializer) throws IOException {
checkNotClosed();
synchronized (_softHash) {
//soft cache can not contain dirty objects
Object e = _softHash.remove(recid);
if (e != null && e instanceof ReferenceCacheEntry) {
((ReferenceCacheEntry)e).clear();
}
}
synchronized (_hashDirties){
//put into dirty cache
final DirtyCacheEntry e = new DirtyCacheEntry();
e._recid = recid;
e._obj = obj;
e._serializer = serializer;
_hashDirties.put(recid,e);
}
if(needsAutoCommit())
commit();
}
public synchronized <A> A fetch(long recid, Serializer<A> serializer)
throws IOException {
checkNotClosed();
synchronized (_softHash) {
Object e = _softHash.get(recid);
if (e != null) {
if(e instanceof ReferenceCacheEntry)
e = ((ReferenceCacheEntry)e).get();
if (e != null) {
return (A) e;
}
}
}
synchronized (_hashDirties){
DirtyCacheEntry e2 = _hashDirties.get(recid);
if(e2!=null){
return (A) e2._obj;
}
}
A value = super.fetch(recid, serializer);
if(needsAutoCommit())
commit();
synchronized (_softHash) {
if (_cacheType == SOFT)
_softHash.put(recid, new SoftCacheEntry(recid, value, _refQueue));
else if (_cacheType == WEAK)
_softHash.put(recid, new WeakCacheEntry(recid, value, _refQueue));
else
_softHash.put(recid,value);
}
return value;
}
public synchronized void close() {
checkNotClosed();
updateCacheEntries();
super.close();
_softHash = null;
_softRefThread.interrupt();
}
public synchronized void rollback() {
checkNotClosed();
// discard all cache entries since we don't know which entries
// where part of the transaction
synchronized (_softHash) {
Iterator<ReferenceCacheEntry> iter = _softHash.valuesIterator();
while (iter.hasNext()) {
ReferenceCacheEntry e = iter.next();
e.clear();
}
_softHash.clear();
}
super.rollback();
}
protected boolean isCacheEntryDirty(DirtyCacheEntry entry) {
return _hashDirties.get(entry._recid) != null;
}
protected void setCacheEntryDirty(DirtyCacheEntry entry, boolean dirty) {
if (dirty) {
_hashDirties.put(entry._recid, entry);
} else {
_hashDirties.remove(entry._recid);
}
}
interface ReferenceCacheEntry {
long getRecid();
void clear();
Object get();
}
@SuppressWarnings("unchecked")
static final class SoftCacheEntry extends SoftReference implements ReferenceCacheEntry {
protected final long _recid;
public long getRecid() {
return _recid;
}
SoftCacheEntry(long recid, Object obj, ReferenceQueue queue) {
super(obj, queue);
_recid = recid;
}
}
@SuppressWarnings("unchecked")
static final class WeakCacheEntry extends WeakReference implements ReferenceCacheEntry {
protected final long _recid;
public long getRecid() {
return _recid;
}
WeakCacheEntry(long recid, Object obj, ReferenceQueue queue) {
super(obj, queue);
_recid = recid;
}
}
/**
* Runs in separate thread and cleans SoftCache.
* Runnable auto exists when CacheRecordManager is GCed
*
* @author Jan Kotek
*/
static final class SoftRunnable implements Runnable {
private ReferenceQueue<ReferenceCacheEntry> entryQueue;
private WeakReference<DBCacheRef> db2;
public SoftRunnable(DBCacheRef db,
ReferenceQueue<ReferenceCacheEntry> entryQueue) {
this.db2 = new WeakReference<DBCacheRef>(db);
this.entryQueue = entryQueue;
}
public void run() {
while (true) try {
//collect next item from cache,
//limit 10000 ms is to keep periodically checking if db was GCed
ReferenceCacheEntry e = (ReferenceCacheEntry) entryQueue.remove(10000);
//check if db was GCed, cancel in that case
DBCacheRef db = db2.get();
if (db == null)
return;
if (e != null) {
synchronized (db._softHash) {
int counter = 0;
while (e != null) {
db._softHash.remove(e.getRecid());
e = (SoftCacheEntry) entryQueue.poll();
if(debug)
counter++;
}
if(debug)
System.err.println("DBCache: "+counter+" objects released from ref cache.");
}
}else{
//check memory consumption every 10 seconds
db.clearCacheIfLowOnMem();
}
} catch (InterruptedException e) {
return;
} catch (Throwable e) {
//this thread must keep spinning,
//otherwise SoftCacheEntries would not be disposed
e.printStackTrace();
}
}
}
public void clearCache() {
if(debug)
System.err.println("DBCache: Clear cache");
synchronized (_softHash) {
if(_cacheType!=HARD){
Iterator<ReferenceCacheEntry> iter = _softHash.valuesIterator();
while (iter.hasNext()) {
ReferenceCacheEntry e = iter.next();
e.clear();
}
}
_softHash.clear();
}
}
}

View File

@ -0,0 +1,351 @@
/*******************************************************************************
* Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package org.apache.jdbm;
import javax.crypto.Cipher;
import javax.crypto.SecretKey;
import javax.crypto.SecretKeyFactory;
import javax.crypto.spec.IvParameterSpec;
import javax.crypto.spec.PBEKeySpec;
import javax.crypto.spec.SecretKeySpec;
import java.io.IOError;
import java.security.spec.KeySpec;
/**
* Class used to configure and create DB. It uses builder pattern.
*/
public class DBMaker {
private byte cacheType = DBCacheRef.MRU;
private int mruCacheSize = 2048;
private String location = null;
private boolean disableTransactions = false;
private boolean lockingDisabled = false;
private boolean readonly = false;
private String password = null;
private boolean useAES256Bit = true;
private boolean useRandomAccessFile = false;
private boolean autoClearRefCacheOnLowMem = true;
private boolean closeOnJVMExit = false;
private boolean deleteFilesAfterCloseFlag = false;
private DBMaker(){}
/**
* Creates new DBMaker and sets file to load data from.
* @param file to load data from
* @return new DBMaker
*/
public static DBMaker openFile(String file){
DBMaker m = new DBMaker();
m.location = file;
return m;
}
/**
* Creates new DBMaker which uses in memory store. Data will be lost after JVM exits.
* @return new DBMaker
*/
public static DBMaker openMemory(){
return new DBMaker();
}
/**
* Open store in zip file
*
* @param zip file
* @return new DBMaker
*/
public static DBMaker openZip(String zip) {
DBMaker m = new DBMaker();
m.location = "$$ZIP$$://"+zip;
return m;
}
static String isZipFileLocation(String location){
String match = "$$ZIP$$://";
if( location.startsWith(match)){
return location.substring(match.length());
}
return null;
}
/**
* Use WeakReference for cache.
* This cache does not improve performance much,
* but prevents JDBM from creating multiple instances of the same object.
*
* @return this builder
*/
public DBMaker enableWeakCache() {
cacheType = DBCacheRef.WEAK;
return this;
}
/**
* Use SoftReference for cache.
* This cache greatly improves performance if you have enoguth memory.
* Instances in cache are Garbage Collected when memory gets low
*
* @return this builder
*/
public DBMaker enableSoftCache() {
cacheType = DBCacheRef.SOFT;
return this;
}
/**
* Use hard reference for cache.
* This greatly improves performance if there is enought memory
* Hard cache has smaller memory overhead then Soft or Weak, because
* reference objects and queue does not have to be maintained
*
* @return this builder
*/
public DBMaker enableHardCache() {
cacheType = DBCacheRef.SOFT;
return this;
}
/**
* Use 'Most Recently Used' cache with limited size.
* Oldest instances are released from cache when new instances are fetched.
* This cache is not cleared by GC. Is good for systems with limited memory.
* <p/>
* Default size for MRU cache is 2048 records.
*
* @return this builder
*/
public DBMaker enableMRUCache() {
cacheType = DBCacheRef.MRU;
return this;
}
/**
*
* Sets 'Most Recently Used' cache size. This cache is activated by default with size 2048
*
* @param cacheSize number of instances which will be kept in cache.
* @return this builder
*/
public DBMaker setMRUCacheSize(int cacheSize) {
if (cacheSize < 0) throw new IllegalArgumentException("Cache size is smaller than zero");
cacheType = DBCacheRef.MRU;
mruCacheSize = cacheSize;
return this;
}
/**
* If reference (soft,weak or hard) cache is enabled,
* GC may not release references fast enough (or not at all in case of hard cache).
* So JDBM periodically checks amount of free heap memory.
* If free memory is less than 25% or 10MB,
* JDBM completely clears its reference cache to prevent possible memory issues.
* <p>
* Calling this method disables auto cache clearing when mem is low.
* And of course it can cause some out of memory exceptions.
*
* @return this builder
*/
public DBMaker disableCacheAutoClear(){
this.autoClearRefCacheOnLowMem = false;
return this;
}
/**
* Enabled storage encryption using AES cipher. JDBM supports both 128 bit and 256 bit encryption if JRE provides it.
* There are some restrictions on AES 256 bit and not all JREs have it by default.
* <p/>
* Storage can not be read (decrypted), unless the key is provided next time it is opened
*
* @param password used to encrypt store
* @param useAES256Bit if true strong AES 256 bit encryption is used. Otherwise more usual AES 128 bit is used.
* @return this builder
*/
public DBMaker enableEncryption(String password, boolean useAES256Bit) {
this.password = password;
this.useAES256Bit = useAES256Bit;
return this;
}
/**
* Make DB readonly.
* Update/delete/insert operation will throw 'UnsupportedOperationException'
*
* @return this builder
*/
public DBMaker readonly() {
readonly = true;
return this;
}
/**
* Disable cache completely
*
* @return this builder
*/
public DBMaker disableCache() {
cacheType = DBCacheRef.NONE;
return this;
}
/**
* Option to disable transaction (to increase performance at the cost of potential data loss).
* Transactions are enabled by default
* <p/>
* Switches off transactioning for the record manager. This means
* that a) a transaction log is not kept, and b) writes aren't
* synch'ed after every update. Writes are cached in memory and then flushed
* to disk every N writes. You may also flush writes manually by calling commit().
* This is useful when batch inserting into a new database.
* <p/>
* When using this, database must be properly closed before JVM shutdown.
* Failing to do so may and WILL corrupt store.
*
* @return this builder
*/
public DBMaker disableTransactions() {
this.disableTransactions = true;
return this;
}
/**
* Disable file system based locking (for file systems that do not support it).
*
* Locking is not supported by many remote or distributed file systems; such
* as Lustre and NFS. Attempts to perform locks will result in an
* IOException with the message "Function not implemented".
*
* Disabling locking will avoid this issue, though of course it comes with
* all the issues of uncontrolled file access.
*
* @return this builder
*/
public DBMaker disableLocking(){
this.lockingDisabled = true;
return this;
}
/**
* By default JDBM uses mapped memory buffers to read from files.
* But this may behave strangely on some platforms.
* Safe alternative is to use old RandomAccessFile rather then mapped ByteBuffer.
* There is typically slower (pages needs to be copyed into memory on every write).
*
* @return this builder
*/
public DBMaker useRandomAccessFile(){
this.useRandomAccessFile = true;
return this;
}
/**
* Registers shutdown hook and close database on JVM exit, if it was not already closed;
*
* @return this builder
*/
public DBMaker closeOnExit(){
this.closeOnJVMExit = true;
return this;
}
/**
* Delete all storage files after DB is closed
*
* @return this builder
*/
public DBMaker deleteFilesAfterClose(){
this.deleteFilesAfterCloseFlag = true;
return this;
}
/**
* Opens database with settings earlier specified in this builder.
*
* @return new DB
* @throws java.io.IOError if db could not be opened
*/
public DB make() {
Cipher cipherIn = null;
Cipher cipherOut = null;
if (password != null) try {
//initialize ciphers
//this code comes from stack owerflow
//http://stackoverflow.com/questions/992019/java-256bit-aes-encryption/992413#992413
byte[] salt = new byte[]{3, -34, 123, 53, 78, 121, -12, -1, 45, -12, -48, 89, 11, 100, 99, 8};
SecretKeyFactory factory = SecretKeyFactory.getInstance("PBKDF2WithHmacSHA1");
KeySpec spec = new PBEKeySpec(password.toCharArray(), salt, 1024, useAES256Bit?256:128);
SecretKey tmp = factory.generateSecret(spec);
SecretKey secret = new SecretKeySpec(tmp.getEncoded(), "AES");
String transform = "AES/CBC/NoPadding";
IvParameterSpec params = new IvParameterSpec(salt);
cipherIn = Cipher.getInstance(transform);
cipherIn.init(Cipher.ENCRYPT_MODE, secret, params);
cipherOut = Cipher.getInstance(transform);
cipherOut.init(Cipher.DECRYPT_MODE, secret, params);
//sanity check, try with page size
byte[] data = new byte[Storage.PAGE_SIZE];
byte[] encData = cipherIn.doFinal(data);
if (encData.length != Storage.PAGE_SIZE)
throw new Error("Page size changed after encryption, make sure you use '/NoPadding'");
byte[] data2 = cipherOut.doFinal(encData);
for (int i = 0; i < data.length; i++) {
if (data[i] != data2[i]) throw new Error("Encryption provided by JRE does not work");
}
} catch (Exception e) {
throw new IOError(e);
}
DBAbstract db = null;
if (cacheType == DBCacheRef.MRU){
db = new DBCacheMRU(location, readonly, disableTransactions, cipherIn, cipherOut,useRandomAccessFile,deleteFilesAfterCloseFlag, mruCacheSize,lockingDisabled);
}else if( cacheType == DBCacheRef.SOFT || cacheType == DBCacheRef.HARD || cacheType == DBCacheRef.WEAK) {
db = new DBCacheRef(location, readonly, disableTransactions, cipherIn, cipherOut,useRandomAccessFile,deleteFilesAfterCloseFlag, cacheType,autoClearRefCacheOnLowMem,lockingDisabled);
} else if (cacheType == DBCacheRef.NONE) {
db = new DBStore(location, readonly, disableTransactions, cipherIn, cipherOut,useRandomAccessFile,deleteFilesAfterCloseFlag,lockingDisabled);
} else {
throw new IllegalArgumentException("Unknown cache type: " + cacheType);
}
if(closeOnJVMExit){
db.addShutdownHook();
}
return db;
}
}

View File

@ -0,0 +1,928 @@
/*******************************************************************************
* Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package org.apache.jdbm;
import javax.crypto.Cipher;
import java.io.*;
import java.util.*;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
/**
* This class manages records, which are uninterpreted blobs of data. The
* set of operations is simple and straightforward: you communicate with
* the class using long "rowids" and byte[] data blocks. Rowids are returned
* on inserts and you can stash them away someplace safe to be able to get
* back to them. Data blocks can be as long as you wish, and may have
* lengths different from the original when updating.
* <p/>
* Operations are synchronized, so that only one of them will happen
* concurrently even if you hammer away from multiple threads. Operations
* are made atomic by keeping a transaction log which is recovered after
* a crash, so the operations specified by this interface all have ACID
* properties.
* <p/>
* You identify a file by just the name. The package attaches <tt>.db</tt>
* for the database file, and <tt>.lg</tt> for the transaction log. The
* transaction log is synchronized regularly and then restarted, so don't
* worry if you see the size going up and down.
*
* @author Alex Boisvert
* @author Cees de Groot
*/
class DBStore
extends DBAbstract {
/**
* Version of storage. It should be safe to open lower versions, but engine should throw exception
* while opening new versions (as it contains unsupported features or serialization)
*/
static final long STORE_FORMAT_VERSION = 1L;
/**
* Underlying file for store records.
*/
private PageFile _file;
/**
* Page manager for physical manager.
*/
private PageManager _pageman;
/**
* Physical row identifier manager.
*/
private PhysicalRowIdManager _physMgr;
/**
* Indicated that store is opened for readonly operations
* If true, store will throw UnsupportedOperationException when update/insert/delete operation is called
*/
private final boolean readonly;
final boolean transactionsDisabled;
private final boolean deleteFilesAfterClose;
private static final int AUTOCOMMIT_AFTER_N_PAGES = 1024 * 5;
boolean commitInProgress = false;
/**
* cipher used for decryption, may be null
*/
private Cipher cipherOut;
/**
* cipher used for encryption, may be null
*/
private Cipher cipherIn;
private boolean useRandomAccessFile;
private boolean lockingDisabled;
void checkCanWrite() {
if (readonly)
throw new UnsupportedOperationException("Could not write, store is opened as read-only");
}
/**
* Logigal to Physical row identifier manager.
*/
private LogicalRowIdManager _logicMgr;
/**
* Static debugging flag
*/
public static final boolean DEBUG = false;
static final long PREALOCATE_PHYS_RECID = Short.MIN_VALUE;
static final Object PREALOCATE_OBJ = new Object();
private final DataInputOutput buffer = new DataInputOutput();
private boolean bufferInUse = false;
private final String _filename;
public DBStore(String filename, boolean readonly, boolean transactionDisabled, boolean lockingDisabled) throws IOException {
this(filename, readonly, transactionDisabled, null, null, false,false,false);
}
/**
* Creates a record manager for the indicated file
*
* @throws IOException when the file cannot be opened or is not
* a valid file content-wise.
*/
public DBStore(String filename, boolean readonly, boolean transactionDisabled,
Cipher cipherIn, Cipher cipherOut, boolean useRandomAccessFile,
boolean deleteFilesAfterClose, boolean lockingDisabled){
_filename = filename;
this.readonly = readonly;
this.transactionsDisabled = transactionDisabled;
this.cipherIn = cipherIn;
this.cipherOut = cipherOut;
this.useRandomAccessFile = useRandomAccessFile;
this.deleteFilesAfterClose = deleteFilesAfterClose;
this.lockingDisabled = lockingDisabled;
reopen();
}
private void reopen() {
try{
_file = new PageFile(_filename, readonly, transactionsDisabled, cipherIn, cipherOut,useRandomAccessFile,lockingDisabled);
_pageman = new PageManager(_file);
_physMgr = new PhysicalRowIdManager(_file, _pageman);
_logicMgr = new LogicalRowIdManager(_file, _pageman);
long versionNumber = getRoot(STORE_VERSION_NUMBER_ROOT);
if (versionNumber > STORE_FORMAT_VERSION)
throw new IOException("Unsupported version of store. Please update JDBM. Minimal supported ver:" + STORE_FORMAT_VERSION + ", store ver:" + versionNumber);
if (!readonly)
setRoot(STORE_VERSION_NUMBER_ROOT, STORE_FORMAT_VERSION);
}catch(IOException e){
throw new IOError(e);
}
}
/**
* Closes the record manager.
*
* @throws IOException when one of the underlying I/O operations fails.
*/
public synchronized void close() {
checkNotClosed();
try {
super.close();
_pageman.close();
_file.close();
if(deleteFilesAfterClose)
_file.storage.deleteAllFiles();
_pageman = null;
_file = null;
} catch (IOException e) {
throw new IOError(e);
}
}
public boolean isClosed() {
return _pageman==null;
}
public synchronized <A> long insert(final A obj, final Serializer<A> serializer, final boolean disableCache)
throws IOException {
checkNotClosed();
checkCanWrite();
if (needsAutoCommit()) {
commit();
}
if (bufferInUse) {
//current reusable buffer is in use, have to fallback into creating new instances
DataInputOutput buffer2 = new DataInputOutput();
return insert2(obj, serializer, buffer2);
}
try {
bufferInUse = true;
return insert2(obj, serializer, buffer);
} finally {
bufferInUse = false;
}
}
boolean needsAutoCommit() {
return transactionsDisabled && !commitInProgress &&
(_file.getDirtyPageCount() >= AUTOCOMMIT_AFTER_N_PAGES );
}
private <A> long insert2(A obj, Serializer<A> serializer, DataInputOutput buf)
throws IOException {
buf.reset();
long physRowId;
if(obj==PREALOCATE_OBJ){
//if inserted record is PREALOCATE_OBJ , it gets special handling.
//it is inserted only into _logicMgr with special value to indicate null
//this is used to preallocate recid for lazy inserts in cache
physRowId = PREALOCATE_PHYS_RECID;
}else{
serializer.serialize(buf, obj);
if(buf.getPos()>RecordHeader.MAX_RECORD_SIZE){
throw new IllegalArgumentException("Too big record. JDBM only supports record size up to: "+RecordHeader.MAX_RECORD_SIZE+" bytes. Record size was: "+buf.getPos());
}
physRowId = _physMgr.insert(buf.getBuf(), 0, buf.getPos());
}
final long recid = _logicMgr.insert(physRowId);
if (DEBUG) {
System.out.println("BaseRecordManager.insert() recid " + recid + " length " + buf.getPos());
}
return compressRecid(recid);
}
public synchronized void delete(long logRowId)
throws IOException {
checkNotClosed();
checkCanWrite();
if (logRowId <= 0) {
throw new IllegalArgumentException("Argument 'recid' is invalid: "
+ logRowId);
}
if (needsAutoCommit()) {
commit();
}
if (DEBUG) {
System.out.println("BaseRecordManager.delete() recid " + logRowId);
}
logRowId = decompressRecid(logRowId);
long physRowId = _logicMgr.fetch(logRowId);
_logicMgr.delete(logRowId);
if(physRowId!=PREALOCATE_PHYS_RECID){
_physMgr.free(physRowId);
}
}
public synchronized <A> void update(long recid, A obj, Serializer<A> serializer)
throws IOException {
checkNotClosed();
checkCanWrite();
if (recid <= 0) {
throw new IllegalArgumentException("Argument 'recid' is invalid: "
+ recid);
}
if (needsAutoCommit()) {
commit();
}
if (bufferInUse) {
//current reusable buffer is in use, have to create new instances
DataInputOutput buffer2 = new DataInputOutput();
update2(recid, obj, serializer, buffer2);
return;
}
try {
bufferInUse = true;
update2(recid, obj, serializer, buffer);
} finally {
bufferInUse = false;
}
}
private <A> void update2(long logRecid, final A obj, final Serializer<A> serializer, final DataInputOutput buf)
throws IOException {
logRecid = decompressRecid(logRecid);
long physRecid = _logicMgr.fetch(logRecid);
if (physRecid == 0)
throw new IOException("Can not update, recid does not exist: " + logRecid);
buf.reset();
serializer.serialize(buf, obj);
if (DEBUG) {
System.out.println("BaseRecordManager.update() recid " + logRecid + " length " + buf.getPos());
}
long newRecid =
physRecid!=PREALOCATE_PHYS_RECID?
_physMgr.update(physRecid, buf.getBuf(), 0, buf.getPos()):
//previous record was only virtual and does not actually exist, so make new insert
_physMgr.insert(buf.getBuf(),0,buf.getPos());
_logicMgr.update(logRecid, newRecid);
}
public synchronized <A> A fetch(final long recid, final Serializer<A> serializer)
throws IOException {
checkNotClosed();
if (recid <= 0) {
throw new IllegalArgumentException("Argument 'recid' is invalid: " + recid);
}
if (bufferInUse) {
//current reusable buffer is in use, have to create new instances
DataInputOutput buffer2 = new DataInputOutput();
return fetch2(recid, serializer, buffer2);
}
try {
bufferInUse = true;
return fetch2(recid, serializer, buffer);
} finally {
bufferInUse = false;
}
}
public synchronized <A> A fetch(long recid, Serializer<A> serializer, boolean disableCache) throws IOException {
//we dont have any cache, so can ignore disableCache parameter
return fetch(recid, serializer);
}
private <A> A fetch2(long recid, final Serializer<A> serializer, final DataInputOutput buf)
throws IOException {
recid = decompressRecid(recid);
buf.reset();
long physLocation = _logicMgr.fetch(recid);
if (physLocation == 0) {
//throw new IOException("Record not found, recid: "+recid);
return null;
}
if(physLocation == PREALOCATE_PHYS_RECID){
throw new InternalError("cache should prevent this!");
}
_physMgr.fetch(buf, physLocation);
if (DEBUG) {
System.out.println("BaseRecordManager.fetch() recid " + recid + " length " + buf.getPos());
}
buf.resetForReading();
try {
return serializer.deserialize(buf); //TODO there should be write limit to throw EOFException
} catch (ClassNotFoundException e) {
throw new IOError(e);
}
}
byte[] fetchRaw(long recid) throws IOException {
recid = decompressRecid(recid);
long physLocation = _logicMgr.fetch(recid);
if (physLocation == 0) {
//throw new IOException("Record not found, recid: "+recid);
return null;
}
DataInputOutput i = new DataInputOutput();
_physMgr.fetch(i, physLocation);
return i.toByteArray();
}
public synchronized long getRoot(final byte id){
checkNotClosed();
return _pageman.getFileHeader().fileHeaderGetRoot(id);
}
public synchronized void setRoot(final byte id, final long rowid){
checkNotClosed();
checkCanWrite();
_pageman.getFileHeader().fileHeaderSetRoot(id, rowid);
}
public synchronized void commit() {
try {
commitInProgress = true;
checkNotClosed();
checkCanWrite();
/** flush free phys rows into pages*/
_physMgr.commit();
_logicMgr.commit();
/**commit pages */
_pageman.commit();
} catch (IOException e) {
throw new IOError(e);
}finally {
commitInProgress= false;
}
}
public synchronized void rollback() {
if (transactionsDisabled)
throw new IllegalAccessError("Transactions are disabled, can not rollback");
try {
checkNotClosed();
_physMgr.rollback();
_logicMgr.rollback();
_pageman.rollback();
super.rollback();
} catch (IOException e) {
throw new IOError(e);
}
}
public void copyToZip(String zipFile) {
try {
String zip = zipFile;
String zip2 = "db";
ZipOutputStream z = new ZipOutputStream(new FileOutputStream(zip));
//copy zero pages
{
String file = zip2 + 0;
z.putNextEntry(new ZipEntry(file));
z.write(Utils.encrypt(cipherIn, _pageman.getHeaderBufData()));
z.closeEntry();
}
//iterate over pages and create new file for each
for (long pageid = _pageman.getFirst(Magic.TRANSLATION_PAGE);
pageid != 0;
pageid = _pageman.getNext(pageid)
) {
PageIo page = _file.get(pageid);
String file = zip2 + pageid;
z.putNextEntry(new ZipEntry(file));
z.write(Utils.encrypt(cipherIn, page.getData()));
z.closeEntry();
_file.release(page);
}
for (long pageid = _pageman.getFirst(Magic.FREELOGIDS_PAGE);
pageid != 0;
pageid = _pageman.getNext(pageid)
) {
PageIo page = _file.get(pageid);
String file = zip2 + pageid;
z.putNextEntry(new ZipEntry(file));
z.write(Utils.encrypt(cipherIn, page.getData()));
z.closeEntry();
_file.release(page);
}
for (long pageid = _pageman.getFirst(Magic.USED_PAGE);
pageid != 0;
pageid = _pageman.getNext(pageid)
) {
PageIo page = _file.get(pageid);
String file = zip2 + pageid;
z.putNextEntry(new ZipEntry(file));
z.write(Utils.encrypt(cipherIn, page.getData()));
z.closeEntry();
_file.release(page);
}
for (long pageid = _pageman.getFirst(Magic.FREEPHYSIDS_PAGE);
pageid != 0;
pageid = _pageman.getNext(pageid)
) {
PageIo page = _file.get(pageid);
String file = zip2 + pageid;
z.putNextEntry(new ZipEntry(file));
z.write(Utils.encrypt(cipherIn, page.getData()));
z.closeEntry();
_file.release(page);
}
for (long pageid = _pageman.getFirst(Magic.FREEPHYSIDS_ROOT_PAGE);
pageid != 0;
pageid = _pageman.getNext(pageid)
) {
PageIo page = _file.get(pageid);
String file = zip2 + pageid;
z.putNextEntry(new ZipEntry(file));
z.write(Utils.encrypt(cipherIn, page.getData()));
z.closeEntry();
_file.release(page);
}
z.close();
} catch (IOException e) {
throw new IOError(e);
}
}
public synchronized void clearCache() {
//no cache
}
private long statisticsCountPages(short pageType) throws IOException {
long pageCounter = 0;
for (long pageid = _pageman.getFirst(pageType);
pageid != 0;
pageid = _pageman.getNext(pageid)
) {
pageCounter++;
}
return pageCounter;
}
public synchronized String calculateStatistics() {
checkNotClosed();
try {
final StringBuilder b = new StringBuilder();
//count pages
{
b.append("PAGES:\n");
long total = 0;
long pages = statisticsCountPages(Magic.USED_PAGE);
total += pages;
b.append(" " + pages + " used pages with size " + Utils.formatSpaceUsage(pages * Storage.PAGE_SIZE) + "\n");
pages = statisticsCountPages(Magic.TRANSLATION_PAGE);
total += pages;
b.append(" " + pages + " record translation pages with size " + Utils.formatSpaceUsage(pages * Storage.PAGE_SIZE) + "\n");
pages = statisticsCountPages(Magic.FREE_PAGE);
total += pages;
b.append(" " + pages + " free (unused) pages with size " + Utils.formatSpaceUsage(pages * Storage.PAGE_SIZE) + "\n");
pages = statisticsCountPages(Magic.FREEPHYSIDS_PAGE);
total += pages;
b.append(" " + pages + " free (phys) pages with size " + Utils.formatSpaceUsage(pages * Storage.PAGE_SIZE) + "\n");
pages = statisticsCountPages(Magic.FREELOGIDS_PAGE);
total += pages;
b.append(" " + pages + " free (logical) pages with size " + Utils.formatSpaceUsage(pages * Storage.PAGE_SIZE) + "\n");
b.append(" Total number of pages is " + total + " with size " + Utils.formatSpaceUsage(total * Storage.PAGE_SIZE) + "\n");
}
{
b.append("RECORDS:\n");
long recordCount = 0;
long freeRecordCount = 0;
long maximalRecordSize = 0;
long maximalAvailSizeDiff = 0;
long totalRecordSize = 0;
long totalAvailDiff = 0;
//count records
for (long pageid = _pageman.getFirst(Magic.TRANSLATION_PAGE);
pageid != 0;
pageid = _pageman.getNext(pageid)
) {
PageIo io = _file.get(pageid);
for (int i = 0; i < _logicMgr.ELEMS_PER_PAGE; i += 1) {
final int pos = Magic.PAGE_HEADER_SIZE + i * Magic.PhysicalRowId_SIZE;
final long physLoc = io.pageHeaderGetLocation((short) pos);
if (physLoc == 0) {
freeRecordCount++;
continue;
}
if(physLoc == PREALOCATE_PHYS_RECID){
continue;
}
recordCount++;
//get size
PageIo page = _file.get(physLoc>>> Storage.PAGE_SIZE_SHIFT);
final short physOffset =(short) (physLoc & Storage.OFFSET_MASK);
int availSize = RecordHeader.getAvailableSize(page, physOffset);
int currentSize = RecordHeader.getCurrentSize(page, physOffset);
_file.release(page);
maximalAvailSizeDiff = Math.max(maximalAvailSizeDiff, availSize - currentSize);
maximalRecordSize = Math.max(maximalRecordSize, currentSize);
totalAvailDiff += availSize - currentSize;
totalRecordSize += currentSize;
}
_file.release(io);
}
b.append(" Contains " + recordCount + " records and " + freeRecordCount + " free slots.\n");
b.append(" Total space occupied by data is " + Utils.formatSpaceUsage(totalRecordSize) + "\n");
b.append(" Average data size in record is " + Utils.formatSpaceUsage(Math.round(1D * totalRecordSize / recordCount)) + "\n");
b.append(" Maximal data size in record is " + Utils.formatSpaceUsage(maximalRecordSize) + "\n");
b.append(" Space wasted in record fragmentation is " + Utils.formatSpaceUsage(totalAvailDiff) + "\n");
b.append(" Maximal space wasted in single record fragmentation is " + Utils.formatSpaceUsage(maximalAvailSizeDiff) + "\n");
}
return b.toString();
} catch (IOException e) {
throw new IOError(e);
}
}
public synchronized void defrag(boolean sortCollections) {
try {
checkNotClosed();
checkCanWrite();
commit();
final String filename2 = _filename + "_defrag" + System.currentTimeMillis();
final String filename1 = _filename;
DBStore db2 = new DBStore(filename2, false, true, cipherIn, cipherOut, false,false,false);
//recreate logical file with original page layout
{
//find minimal logical pageid (logical pageids are negative)
LongHashMap<String> logicalPages = new LongHashMap<String>();
long minpageid = 0;
for (long pageid = _pageman.getFirst(Magic.TRANSLATION_PAGE);
pageid != 0;
pageid = _pageman.getNext(pageid)
) {
minpageid = Math.min(minpageid, pageid);
logicalPages.put(pageid, Utils.EMPTY_STRING);
}
//fill second db with logical pages
long pageCounter = 0;
for (
long pageid = db2._pageman.allocate(Magic.TRANSLATION_PAGE);
pageid >= minpageid;
pageid = db2._pageman.allocate(Magic.TRANSLATION_PAGE)
) {
pageCounter++;
if (pageCounter % 1000 == 0)
db2.commit();
}
logicalPages = null;
}
//reinsert collections so physical records are located near each other
//iterate over named object recids, it is sorted with TreeSet
if(sortCollections){
long nameRecid = getRoot(NAME_DIRECTORY_ROOT);
Collection<Long> recids = new TreeSet<Long>();
if(nameRecid!=0){
HTree<String,Long> m = fetch(nameRecid);
recids.addAll(m.values());
}
for (Long namedRecid : recids) {
Object obj = fetch(namedRecid);
if (obj instanceof LinkedList) {
LinkedList2.defrag(namedRecid, this, db2);
} else if (obj instanceof HTree) {
HTree.defrag(namedRecid, this, db2);
} else if (obj instanceof BTree) {
BTree.defrag(namedRecid, this, db2);
}
}
}
for (long pageid = _pageman.getFirst(Magic.TRANSLATION_PAGE);
pageid != 0;
pageid = _pageman.getNext(pageid)
) {
PageIo io = _file.get(pageid);
for (int i = 0; i < _logicMgr.ELEMS_PER_PAGE; i += 1) {
final int pos = Magic.PAGE_HEADER_SIZE + i * Magic.PhysicalRowId_SIZE;
if (pos > Short.MAX_VALUE)
throw new Error();
//write to new file
final long logicalRowId = ((-pageid) << Storage.PAGE_SIZE_SHIFT) + (long) pos;
//read from logical location in second db,
//check if record was already inserted as part of collections
if (db2._pageman.getLast(Magic.TRANSLATION_PAGE) <= pageid &&
db2._logicMgr.fetch(logicalRowId) != 0) {
//yes, this record already exists in second db
continue;
}
//get physical location in this db
final long physRowId = io.pageHeaderGetLocation((short) pos);
if (physRowId == 0)
continue;
if (physRowId == PREALOCATE_PHYS_RECID){
db2._logicMgr.forceInsert(logicalRowId, physRowId);
continue;
}
//read from physical location at this db
DataInputOutput b = new DataInputOutput();
_physMgr.fetch(b, physRowId);
byte[] bb = b.toByteArray();
//force insert into other file, without decompressing logical id to external form
long physLoc = db2._physMgr.insert(bb, 0, bb.length);
db2._logicMgr.forceInsert(logicalRowId, physLoc);
}
_file.release(io);
db2.commit();
}
for(byte b = 0;b<Magic.FILE_HEADER_NROOTS;b++){
db2.setRoot(b, getRoot(b));
}
db2.close();
_pageman.close();
_file.close();
List<File> filesToDelete = new ArrayList<File>();
//now rename old files
String[] exts = {StorageDiskMapped.IDR, StorageDiskMapped.DBR};
for (String ext : exts) {
String f1 = filename1 + ext;
String f2 = filename2 + "_OLD" + ext;
//first rename transaction log
File f1t = new File(f1 + StorageDisk.transaction_log_file_extension);
File f2t = new File(f2 + StorageDisk.transaction_log_file_extension);
f1t.renameTo(f2t);
filesToDelete.add(f2t);
//rename data files, iterate until file exist
for (int i = 0; ; i++) {
File f1d = new File(f1 + "." + i);
if (!f1d.exists()) break;
File f2d = new File(f2 + "." + i);
f1d.renameTo(f2d);
filesToDelete.add(f2d);
}
}
//rename new files
for (String ext : exts) {
String f1 = filename2 + ext;
String f2 = filename1 + ext;
//first rename transaction log
File f1t = new File(f1 + StorageDisk.transaction_log_file_extension);
File f2t = new File(f2 + StorageDisk.transaction_log_file_extension);
f1t.renameTo(f2t);
//rename data files, iterate until file exist
for (int i = 0; ; i++) {
File f1d = new File(f1 + "." + i);
if (!f1d.exists()) break;
File f2d = new File(f2 + "." + i);
f1d.renameTo(f2d);
}
}
for (File d : filesToDelete) {
d.delete();
}
reopen();
} catch (IOException e) {
throw new IOError(e);
}
}
/**
* Insert data at forced logicalRowId, use only for defragmentation !!
*
* @param logicalRowId
* @param data
* @throws IOException
*/
void forceInsert(long logicalRowId, byte[] data) throws IOException {
logicalRowId = decompressRecid(logicalRowId);
if (needsAutoCommit()) {
commit();
}
long physLoc = _physMgr.insert(data, 0, data.length);
_logicMgr.forceInsert(logicalRowId, physLoc);
}
/**
* Returns number of records stored in database.
* Is used for unit tests
*/
long countRecords() throws IOException {
long counter = 0;
long page = _pageman.getFirst(Magic.TRANSLATION_PAGE);
while (page != 0) {
PageIo io = _file.get(page);
for (int i = 0; i < _logicMgr.ELEMS_PER_PAGE; i += 1) {
int pos = Magic.PAGE_HEADER_SIZE + i * Magic.PhysicalRowId_SIZE;
if (pos > Short.MAX_VALUE)
throw new Error();
//get physical location
long physRowId = io.pageHeaderGetLocation((short) pos);
if (physRowId != 0)
counter += 1;
}
_file.release(io);
page = _pageman.getNext(page);
}
return counter;
}
private static int COMPRESS_RECID_PAGE_SHIFT = Integer.MIN_VALUE;
static{
int shift = 1;
while((1<<shift) <LogicalRowIdManager.ELEMS_PER_PAGE )
shift++;
COMPRESS_RECID_PAGE_SHIFT = shift;
}
private final static long COMPRESS_RECID_OFFSET_MASK = 0xFFFFFFFFFFFFFFFFL >>> (64- COMPRESS_RECID_PAGE_SHIFT);
/**
* Compress recid from physical form (block - offset) to (block - slot).
* This way resulting number is smaller and can be easier packed with LongPacker
*/
static long compressRecid(final long recid) {
final long page = recid>>> Storage.PAGE_SIZE_SHIFT;
short offset = (short) (recid & Storage.OFFSET_MASK);
offset = (short) (offset - Magic.PAGE_HEADER_SIZE);
if (offset % Magic.PhysicalRowId_SIZE != 0)
throw new InternalError("recid not dividable "+Magic.PhysicalRowId_SIZE);
long slot = offset / Magic.PhysicalRowId_SIZE;
return (page << COMPRESS_RECID_PAGE_SHIFT) + slot;
}
static long decompressRecid(final long recid) {
final long page = recid >>> COMPRESS_RECID_PAGE_SHIFT;
final short offset = (short) ((recid & COMPRESS_RECID_OFFSET_MASK) * Magic.PhysicalRowId_SIZE + Magic.PAGE_HEADER_SIZE);
return (page << Storage.PAGE_SIZE_SHIFT) + (long) offset;
}
}

View File

@ -0,0 +1,297 @@
package org.apache.jdbm;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.Arrays;
/**
* Utility class which implements DataInput and DataOutput on top of byte[] buffer
* with minimal overhead
*
* @author Jan Kotek
*/
class DataInputOutput implements DataInput, DataOutput, ObjectInput, ObjectOutput {
private int pos = 0;
private int count = 0;
private byte[] buf;
public DataInputOutput() {
buf = new byte[8];
}
public DataInputOutput(byte[] data) {
buf = data;
count = data.length;
}
public byte[] getBuf() {
return buf;
}
public int getPos() {
return pos;
}
public void reset() {
pos = 0;
count = 0;
}
public void resetForReading() {
count = pos;
pos = 0;
}
public void reset(byte[] b) {
pos = 0;
buf = b;
count = b.length;
}
public byte[] toByteArray() {
byte[] d = new byte[pos];
System.arraycopy(buf, 0, d, 0, pos);
return d;
}
public int available() {
return count - pos;
}
public void readFully(byte[] b) throws IOException {
readFully(b, 0, b.length);
}
public void readFully(byte[] b, int off, int len) throws IOException {
System.arraycopy(buf, pos, b, off, len);
pos += len;
}
public int skipBytes(int n) throws IOException {
pos += n;
return n;
}
public boolean readBoolean() throws IOException {
return buf[pos++] == 1;
}
public byte readByte() throws IOException {
return buf[pos++];
}
public int readUnsignedByte() throws IOException {
return buf[pos++] & 0xff;
}
public short readShort() throws IOException {
return (short)
(((short) (buf[pos++] & 0xff) << 8) |
((short) (buf[pos++] & 0xff) << 0));
}
public int readUnsignedShort() throws IOException {
return (((int) (buf[pos++] & 0xff) << 8) |
((int) (buf[pos++] & 0xff) << 0));
}
public char readChar() throws IOException {
return (char) readInt();
}
public int readInt() throws IOException {
return
(((buf[pos++] & 0xff) << 24) |
((buf[pos++] & 0xff) << 16) |
((buf[pos++] & 0xff) << 8) |
((buf[pos++] & 0xff) << 0));
}
public long readLong() throws IOException {
return
(((long) (buf[pos++] & 0xff) << 56) |
((long) (buf[pos++] & 0xff) << 48) |
((long) (buf[pos++] & 0xff) << 40) |
((long) (buf[pos++] & 0xff) << 32) |
((long) (buf[pos++] & 0xff) << 24) |
((long) (buf[pos++] & 0xff) << 16) |
((long) (buf[pos++] & 0xff) << 8) |
((long) (buf[pos++] & 0xff) << 0));
}
public float readFloat() throws IOException {
return Float.intBitsToFloat(readInt());
}
public double readDouble() throws IOException {
return Double.longBitsToDouble(readLong());
}
public String readLine() throws IOException {
return readUTF();
}
public String readUTF() throws IOException {
return Serialization.deserializeString(this);
}
/**
* make sure there will be enought space in buffer to write N bytes
*/
private void ensureAvail(int n) {
if (pos + n >= buf.length) {
int newSize = Math.max(pos + n, buf.length * 2);
buf = Arrays.copyOf(buf, newSize);
}
}
public void write(int b) throws IOException {
ensureAvail(1);
buf[pos++] = (byte) b;
}
public void write(byte[] b) throws IOException {
write(b, 0, b.length);
}
public void write(byte[] b, int off, int len) throws IOException {
ensureAvail(len);
System.arraycopy(b, off, buf, pos, len);
pos += len;
}
public void writeBoolean(boolean v) throws IOException {
ensureAvail(1);
buf[pos++] = (byte) (v ? 1 : 0);
}
public void writeByte(int v) throws IOException {
ensureAvail(1);
buf[pos++] = (byte) (v);
}
public void writeShort(int v) throws IOException {
ensureAvail(2);
buf[pos++] = (byte) (0xff & (v >> 8));
buf[pos++] = (byte) (0xff & (v >> 0));
}
public void writeChar(int v) throws IOException {
writeInt(v);
}
public void writeInt(int v) throws IOException {
ensureAvail(4);
buf[pos++] = (byte) (0xff & (v >> 24));
buf[pos++] = (byte) (0xff & (v >> 16));
buf[pos++] = (byte) (0xff & (v >> 8));
buf[pos++] = (byte) (0xff & (v >> 0));
}
public void writeLong(long v) throws IOException {
ensureAvail(8);
buf[pos++] = (byte) (0xff & (v >> 56));
buf[pos++] = (byte) (0xff & (v >> 48));
buf[pos++] = (byte) (0xff & (v >> 40));
buf[pos++] = (byte) (0xff & (v >> 32));
buf[pos++] = (byte) (0xff & (v >> 24));
buf[pos++] = (byte) (0xff & (v >> 16));
buf[pos++] = (byte) (0xff & (v >> 8));
buf[pos++] = (byte) (0xff & (v >> 0));
}
public void writeFloat(float v) throws IOException {
ensureAvail(4);
writeInt(Float.floatToIntBits(v));
}
public void writeDouble(double v) throws IOException {
ensureAvail(8);
writeLong(Double.doubleToLongBits(v));
}
public void writeBytes(String s) throws IOException {
writeUTF(s);
}
public void writeChars(String s) throws IOException {
writeUTF(s);
}
public void writeUTF(String s) throws IOException {
Serialization.serializeString(this, s);
}
/** helper method to write data directly from PageIo*/
public void writeFromByteBuffer(ByteBuffer b, int offset, int length) {
ensureAvail(length);
b.position(offset);
b.get(buf,pos,length);
pos+=length;
}
//temp var used for Externalizable
SerialClassInfo serializer;
//temp var used for Externalizable
Serialization.FastArrayList objectStack;
public Object readObject() throws ClassNotFoundException, IOException {
//is here just to implement ObjectInput
//Fake method which reads data from serializer.
//We could probably implement separate wrapper for this, but I want to safe class space
return serializer.deserialize(this, objectStack);
}
public int read() throws IOException {
//is here just to implement ObjectInput
return readUnsignedByte();
}
public int read(byte[] b) throws IOException {
//is here just to implement ObjectInput
readFully(b);
return b.length;
}
public int read(byte[] b, int off, int len) throws IOException {
//is here just to implement ObjectInput
readFully(b,off,len);
return len;
}
public long skip(long n) throws IOException {
//is here just to implement ObjectInput
pos += n;
return n;
}
public void close() throws IOException {
//is here just to implement ObjectInput
//do nothing
}
public void writeObject(Object obj) throws IOException {
//is here just to implement ObjectOutput
serializer.serialize(this,obj,objectStack);
}
public void flush() throws IOException {
//is here just to implement ObjectOutput
//do nothing
}
}

View File

@ -0,0 +1,215 @@
///*
//package org.apache.jdbm;
//
//import java.io.DataInput;
//import java.io.DataOutput;
//import java.io.IOException;
//import java.nio.Buffer;
//import java.nio.ByteBuffer;
//import java.util.Arrays;
//
//*/
///**
// * Utility class which implements DataInput and DataOutput on top of ByteBuffer
// * with minimal overhead
// * This class is not used, is left here in case we would ever need it.
// *
// * @author Jan Kotek
// *//*
//
//class DataInputOutput2 implements DataInput, DataOutput {
//
// private ByteBuffer buf;
//
//
// public DataInputOutput2() {
// buf = ByteBuffer.allocate(8);
// }
//
// public DataInputOutput2(ByteBuffer data) {
// buf = data;
// }
//
// public DataInputOutput2(byte[] data) {
// buf = ByteBuffer.wrap(data);
// }
//
//
// public int getPos() {
// return buf.position();
// }
//
//
// public void reset() {
// buf.rewind();
// }
//
//
// public void reset(byte[] b) {
// buf = ByteBuffer.wrap(b);
// }
//
// public void resetForReading() {
// buf.flip();
// }
//
//
// public byte[] toByteArray() {
// byte[] d = new byte[buf.position()];
// buf.position(0);
// buf.get(d); //reading N bytes restores to current position
//
// return d;
// }
//
// public int available() {
// return buf.remaining();
// }
//
//
// public void readFully(byte[] b) throws IOException {
// readFully(b, 0, b.length);
// }
//
// public void readFully(byte[] b, int off, int len) throws IOException {
// buf.get(b,off,len);
// }
//
// public int skipBytes(int n) throws IOException {
// buf.position(buf.position()+n);
// return n;
// }
//
// public boolean readBoolean() throws IOException {
// return buf.get()==1;
// }
//
// public byte readByte() throws IOException {
// return buf.get();
// }
//
// public int readUnsignedByte() throws IOException {
// return buf.get() & 0xff;
// }
//
// public short readShort() throws IOException {
// return buf.getShort();
// }
//
// public int readUnsignedShort() throws IOException {
// return (((int) (buf.get() & 0xff) << 8) |
// ((int) (buf.get() & 0xff) << 0));
// }
//
// public char readChar() throws IOException {
// return (char) readInt();
// }
//
// public int readInt() throws IOException {
// return buf.getInt();
// }
//
// public long readLong() throws IOException {
// return buf.getLong();
// }
//
// public float readFloat() throws IOException {
// return buf.getFloat();
// }
//
// public double readDouble() throws IOException {
// return buf.getDouble();
// }
//
// public String readLine() throws IOException {
// return readUTF();
// }
//
// public String readUTF() throws IOException {
// return Serialization.deserializeString(this);
// }
//
// */
///**
// * make sure there will be enough space in buffer to write N bytes
// *//*
//
// private void ensureAvail(int n) {
// int pos = buf.position();
// if (pos + n >= buf.limit()) {
// int newSize = Math.max(pos + n, buf.limit() * 2);
// byte[] b = new byte[newSize];
// buf.get(b);
// buf = ByteBuffer.wrap(b);
// buf.position(pos);
// }
// }
//
//
// public void write(final int b) throws IOException {
// ensureAvail(1);
// buf.put((byte) b);
// }
//
// public void write(final byte[] b) throws IOException {
// write(b, 0, b.length);
// }
//
// public void write(final byte[] b, final int off, final int len) throws IOException {
// ensureAvail(len);
// buf.put(b,off,len);
// }
//
// public void writeBoolean(final boolean v) throws IOException {
// ensureAvail(1);
// buf.put((byte) (v?1:0));
// }
//
// public void writeByte(final int v) throws IOException {
// ensureAvail(1);
// buf.put((byte) v);
// }
//
// public void writeShort(final short v) throws IOException {
// ensureAvail(2);
// buf.putShort(v);
// }
//
// public void writeChar(final int v) throws IOException {
// writeInt(v);
// }
//
// public void writeInt(final int v) throws IOException {
// ensureAvail(4);
// buf.putInt(v);
// }
//
// public void writeLong(final long v) throws IOException {
// ensureAvail(8);
// buf.putLong(v);
// }
//
// public void writeFloat(final float v) throws IOException {
// ensureAvail(4);
// buf.putFloat(v);
// }
//
// public void writeDouble(final double v) throws IOException {
// ensureAvail(8);
// buf.putDouble(v);
// }
//
// public void writeBytes(String s) throws IOException {
// writeUTF(s);
// }
//
// public void writeChars(String s) throws IOException {
// writeUTF(s);
// }
//
// public void writeUTF(String s) throws IOException {
// Serialization.serializeString(this, s);
// }
//
//}
//*/

View File

@ -0,0 +1,542 @@
/*******************************************************************************
* Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package org.apache.jdbm;
import java.io.*;
import java.util.*;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
/**
* Persistent HashMap implementation for DB.
* Implemented as an H*Tree structure.
*
* @author Alex Boisvert
* @author Jan Kotek
*/
class HTree<K, V> extends AbstractMap<K, V> implements ConcurrentMap<K, V> {
final Serializer SERIALIZER = new Serializer<Object>() {
public Object deserialize(DataInput ds2) throws IOException {
DataInputOutput ds = (DataInputOutput) ds2;
try {
int i = ds.readUnsignedByte();
if (i == SerializationHeader.HTREE_BUCKET) { //is HashBucket?
HTreeBucket ret = new HTreeBucket(HTree.this);
if (loadValues)
ret.readExternal(ds);
if (loadValues && ds.available() != 0)
throw new InternalError("bytes left: " + ds.available());
return ret;
} else if (i == SerializationHeader.HTREE_DIRECTORY) {
HTreeDirectory ret = new HTreeDirectory(HTree.this);
ret.readExternal(ds);
if (loadValues && ds.available() != 0)
throw new InternalError("bytes left: " + ds.available());
return ret;
} else {
throw new InternalError("Wrong HTree header: " + i);
}
} catch (ClassNotFoundException e) {
throw new IOException(e);
}
}
public void serialize(DataOutput out, Object obj) throws IOException {
if (obj instanceof HTreeBucket) {
out.write(SerializationHeader.HTREE_BUCKET);
HTreeBucket b = (HTreeBucket) obj;
b.writeExternal(out);
} else {
out.write(SerializationHeader.HTREE_DIRECTORY);
HTreeDirectory n = (HTreeDirectory) obj;
n.writeExternal(out);
}
}
};
final protected ReadWriteLock lock = new ReentrantReadWriteLock();
/**
* Listeners which are notified about changes in records
*/
protected RecordListener[] recordListeners = new RecordListener[0];
/**
* Serializer used to serialize index keys (optional)
*/
protected Serializer<K> keySerializer;
/**
* Serializer used to serialize index values (optional)
*/
protected Serializer<V> valueSerializer;
protected boolean readonly = false;
final long rootRecid;
DBAbstract db;
/** if false map contains only keys, used for set*/
boolean hasValues = true;
/**
* counts structural changes in tree at runtume. Is here to support fail-fast behaviour.
*/
int modCount;
/**
* indicates if values should be loaded during deserialization, set to true during defragmentation
*/
private boolean loadValues = true;
public Serializer<K> getKeySerializer() {
return keySerializer;
}
public Serializer<V> getValueSerializer() {
return valueSerializer;
}
/**
* cache writing buffer, so it does not have to be allocated on each write
*/
AtomicReference<DataInputOutput> writeBufferCache = new AtomicReference<DataInputOutput>();
/**
* Create a persistent hashtable.
*/
public HTree(DBAbstract db, Serializer<K> keySerializer, Serializer<V> valueSerializer, boolean hasValues)
throws IOException {
this.keySerializer = keySerializer;
this.valueSerializer = valueSerializer;
this.db = db;
this.hasValues = hasValues;
HTreeDirectory<K, V> root = new HTreeDirectory<K, V>(this, (byte) 0);
root.setPersistenceContext(0);
this.rootRecid = db.insert(root, this.SERIALIZER,false);
}
/**
* Load a persistent hashtable
*/
public HTree(DBAbstract db,long rootRecid, Serializer<K> keySerializer, Serializer<V> valueSerializer, boolean hasValues)
throws IOException {
this.db = db;
this.rootRecid = rootRecid;
this.keySerializer = keySerializer;
this.valueSerializer = valueSerializer;
this.hasValues = hasValues;
}
void setPersistenceContext(DBAbstract db) {
this.db = db;
}
public V put(K key, V value) {
if (readonly)
throw new UnsupportedOperationException("readonly");
lock.writeLock().lock();
try {
if (key == null || value == null)
throw new NullPointerException("Null key or value");
V oldVal = (V) getRoot().put(key, value);
if (oldVal == null) {
modCount++;
//increase size
HTreeDirectory root = getRoot();
root.size++;
db.update(rootRecid,root,SERIALIZER);
for (RecordListener<K, V> r : recordListeners)
r.recordInserted(key, value);
} else {
//notify listeners
for (RecordListener<K, V> r : recordListeners)
r.recordUpdated(key, oldVal, value);
}
return oldVal;
} catch (IOException e) {
throw new IOError(e);
}finally {
lock.writeLock().unlock();
}
}
public V get(Object key) {
if (key == null)
return null;
lock.readLock().lock();
try {
return getRoot().get((K) key);
} catch (ClassCastException e) {
return null;
} catch (IOException e) {
throw new IOError(e);
}finally {
lock.readLock().unlock();
}
}
public V remove(Object key) {
if (readonly)
throw new UnsupportedOperationException("readonly");
lock.writeLock().lock();
try {
if (key == null)
return null;
V val = (V) getRoot().remove(key);
modCount++;
if (val != null){
//decrease size
HTreeDirectory root = getRoot();
root.size--;
db.update(rootRecid,root,SERIALIZER);
for (RecordListener r : recordListeners)
r.recordRemoved(key, val);
}
return val;
} catch (ClassCastException e) {
return null;
} catch (IOException e) {
throw new IOError(e);
}finally {
lock.writeLock().unlock();
}
}
public boolean containsKey(Object key) {
if (key == null)
return false;
//no need for locking, get is already locked
V v = get((K) key);
return v != null;
}
public void clear() {
lock.writeLock().lock();
try {
Iterator<K> keyIter = keys();
while (keyIter.hasNext()) {
keyIter.next();
keyIter.remove();
}
} catch (IOException e) {
throw new IOError(e);
}finally {
lock.writeLock().unlock();
}
}
/**
* Returns an enumeration of the keys contained in this
*/
public Iterator<K> keys()
throws IOException {
lock.readLock().lock();
try{
return getRoot().keys();
}finally {
lock.readLock().unlock();
}
}
public DBAbstract getRecordManager() {
return db;
}
/**
* add RecordListener which is notified about record changes
*
* @param listener
*/
public void addRecordListener(RecordListener<K, V> listener) {
recordListeners = Arrays.copyOf(recordListeners, recordListeners.length + 1);
recordListeners[recordListeners.length - 1] = listener;
}
/**
* remove RecordListener which is notified about record changes
*
* @param listener
*/
public void removeRecordListener(RecordListener<K, V> listener) {
List l = Arrays.asList(recordListeners);
l.remove(listener);
recordListeners = (RecordListener[]) l.toArray(new RecordListener[1]);
}
public Set<Entry<K, V>> entrySet() {
return _entrySet;
}
private Set<Entry<K, V>> _entrySet = new AbstractSet<Entry<K, V>>() {
protected Entry<K, V> newEntry(K k, V v) {
return new SimpleEntry<K, V>(k, v) {
private static final long serialVersionUID = 978651696969194154L;
public V setValue(V arg0) {
//put is already locked
HTree.this.put(getKey(), arg0);
return super.setValue(arg0);
}
};
}
public boolean add(java.util.Map.Entry<K, V> e) {
if (readonly)
throw new UnsupportedOperationException("readonly");
if (e.getKey() == null)
throw new NullPointerException("Can not add null key");
lock.writeLock().lock();
try{
if (e.getValue().equals(get(e.getKey())))
return false;
HTree.this.put(e.getKey(), e.getValue());
return true;
}finally {
lock.writeLock().unlock();
}
}
@SuppressWarnings("unchecked")
public boolean contains(Object o) {
if (o instanceof Entry) {
Entry<K, V> e = (java.util.Map.Entry<K, V>) o;
//get is already locked
if (e.getKey() != null && HTree.this.get(e.getKey()) != null)
return true;
}
return false;
}
public Iterator<java.util.Map.Entry<K, V>> iterator() {
try {
final Iterator<K> br = keys();
return new Iterator<Entry<K, V>>() {
public boolean hasNext() {
return br.hasNext();
}
public java.util.Map.Entry<K, V> next() {
K k = br.next();
return newEntry(k, get(k));
}
public void remove() {
if (readonly)
throw new UnsupportedOperationException("readonly");
br.remove();
}
};
} catch (IOException e) {
throw new IOError(e);
}
}
@SuppressWarnings("unchecked")
public boolean remove(Object o) {
if (readonly)
throw new UnsupportedOperationException("readonly");
if (o instanceof Entry) {
Entry<K, V> e = (java.util.Map.Entry<K, V>) o;
//check for nulls
if (e.getKey() == null || e.getValue() == null)
return false;
lock.writeLock().lock();
try{
//get old value, must be same as item in entry
V v = get(e.getKey());
if (v == null || !e.getValue().equals(v))
return false;
HTree.this.remove(e.getKey());
return true;
}finally{
lock.writeLock().unlock();
}
}
return false;
}
@Override
public int size() {
lock.readLock().lock();
try {
int counter = 0;
Iterator<K> it = keys();
while (it.hasNext()) {
it.next();
counter++;
}
return counter;
} catch (IOException e) {
throw new IOError(e);
}finally {
lock.readLock().unlock();
}
}
};
HTreeDirectory<K, V> getRoot() {
//assumes that caller already holds read or write lock
try {
HTreeDirectory<K, V> root = (HTreeDirectory<K, V>) db.fetch(rootRecid, this.SERIALIZER);
root.setPersistenceContext(rootRecid);
return root;
} catch (IOException e) {
throw new IOError(e);
}
}
public static HTree deserialize(DataInput is, Serialization ser) throws IOException, ClassNotFoundException {
long rootRecid = LongPacker.unpackLong(is);
boolean hasValues = is.readBoolean();
Serializer keySerializer = (Serializer) ser.deserialize(is);
Serializer valueSerializer = (Serializer) ser.deserialize(is);
return new HTree(ser.db,rootRecid, keySerializer, valueSerializer, hasValues);
}
void serialize(DataOutput out) throws IOException {
LongPacker.packLong(out, rootRecid);
out.writeBoolean(hasValues);;
db.defaultSerializer().serialize(out, keySerializer);
db.defaultSerializer().serialize(out, valueSerializer);
}
static void defrag(Long recid, DBStore r1, DBStore r2) throws IOException {
//TODO should modCount be increased after defrag, revert or commit?
try {
byte[] data = r1.fetchRaw(recid);
r2.forceInsert(recid, data);
DataInput in = new DataInputStream(new ByteArrayInputStream(data));
HTree t = (HTree) r1.defaultSerializer().deserialize(in);
t.db = r1;
t.loadValues = false;
HTreeDirectory d = t.getRoot();
if (d != null) {
r2.forceInsert(t.rootRecid, r1.fetchRaw(t.rootRecid));
d.defrag(r1, r2);
}
} catch (ClassNotFoundException e) {
throw new IOError(e);
}
}
public int size(){
return (int) getRoot().size;
}
public boolean hasValues() {
return hasValues;
}
public V putIfAbsent(K key, V value) {
lock.writeLock().lock();
try{
if (!containsKey(key))
return put(key, value);
else
return get(key);
}finally {
lock.writeLock().unlock();
}
}
public boolean remove(Object key, Object value) {
lock.writeLock().lock();
try{
if (containsKey(key) && get(key).equals(value)) {
remove(key);
return true;
} else return false;
}finally {
lock.writeLock().unlock();
}
}
public boolean replace(K key, V oldValue, V newValue) {
lock.writeLock().lock();
try{
if (containsKey(key) && get(key).equals(oldValue)) {
put(key, newValue);
return true;
} else return false;
}finally {
lock.writeLock().unlock();
}
}
public V replace(K key, V value) {
lock.writeLock().lock();
try{
if (containsKey(key)) {
return put(key, value);
} else return null;
}finally {
lock.writeLock().unlock();
}
}
}

View File

@ -0,0 +1,352 @@
/*******************************************************************************
* Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package org.apache.jdbm;
import java.io.*;
import java.util.ArrayList;
/**
* A bucket is a placeholder for multiple (key, value) pairs. Buckets
* are used to store collisions (same hash value) at all levels of an
* H*tree.
* <p/>
* There are two types of buckets: leaf and non-leaf.
* <p/>
* Non-leaf buckets are buckets which hold collisions which happen
* when the H*tree is not fully expanded. Keys in a non-leaf buckets
* can have different hash codes. Non-leaf buckets are limited to an
* arbitrary size. When this limit is reached, the H*tree should create
* a new HTreeDirectory node and distribute keys of the non-leaf buckets into
* the newly created HTreeDirectory.
* <p/>
* A leaf bucket is a bucket which contains keys which all have
* the same <code>hashCode()</code>. Leaf buckets stand at the
* bottom of an H*tree because the hashing algorithm cannot further
* discriminate between different keys based on their hash code.
*
* @author Alex Boisvert
*/
final class HTreeBucket<K, V> {
/**
* The maximum number of elements (key, value) a non-leaf bucket
* can contain.
*/
public static final int OVERFLOW_SIZE = 16;
/**
* Depth of this bucket.
*/
private byte _depth;
/**
* Keys and values in this bucket. Keys are followed by values at KEYPOS+OVERFLOW_SIZE
*/
private Object[] _keysAndValues;
private byte size = 0;
private final HTree<K, V> tree;
/**
* Public constructor for serialization.
*/
public HTreeBucket(HTree<K, V> tree) {
this.tree = tree;
}
/**
* Construct a bucket with a given depth level. Depth level is the
* number of <code>HashDirectory</code> above this bucket.
*/
public HTreeBucket(HTree<K, V> tree, byte level) {
this.tree = tree;
if (level > HTreeDirectory.MAX_DEPTH + 1) {
throw new IllegalArgumentException(
"Cannot create bucket with depth > MAX_DEPTH+1. "
+ "Depth=" + level);
}
_depth = level;
_keysAndValues = new Object[OVERFLOW_SIZE * 2];
}
/**
* Returns the number of elements contained in this bucket.
*/
public int getElementCount() {
return size;
}
/**
* Returns whether or not this bucket is a "leaf bucket".
*/
public boolean isLeaf() {
return (_depth > HTreeDirectory.MAX_DEPTH);
}
/**
* Returns true if bucket can accept at least one more element.
*/
public boolean hasRoom() {
if (isLeaf()) {
return true; // leaf buckets are never full
} else {
// non-leaf bucket
return (size < OVERFLOW_SIZE);
}
}
/**
* Add an element (key, value) to this bucket. If an existing element
* has the same key, it is replaced silently.
*
* @return Object which was previously associated with the given key
* or <code>null</code> if no association existed.
*/
public V addElement(K key, V value) {
//find entry
byte existing = -1;
for (byte i = 0; i < size; i++) {
if (key.equals(_keysAndValues[i])) {
existing = i;
break;
}
}
if (existing != -1) {
// replace existing element
Object before = _keysAndValues[existing + OVERFLOW_SIZE];
if (before instanceof BTreeLazyRecord) {
BTreeLazyRecord<V> rec = (BTreeLazyRecord<V>) before;
before = rec.get();
rec.delete();
}
_keysAndValues[existing + OVERFLOW_SIZE] = value;
return (V) before;
} else {
// add new (key, value) pair
_keysAndValues[size] = key;
_keysAndValues[size + OVERFLOW_SIZE] = value;
size++;
return null;
}
}
/**
* Remove an element, given a specific key.
*
* @param key Key of the element to remove
* @return Removed element value, or <code>null</code> if not found
*/
public V removeElement(K key) {
//find entry
byte existing = -1;
for (byte i = 0; i < size; i++) {
if (key.equals(_keysAndValues[i])) {
existing = i;
break;
}
}
if (existing != -1) {
Object o = _keysAndValues[existing + OVERFLOW_SIZE];
if (o instanceof BTreeLazyRecord) {
BTreeLazyRecord<V> rec = (BTreeLazyRecord<V>) o;
o = rec.get();
rec.delete();
}
//move last element to existing
size--;
_keysAndValues[existing] = _keysAndValues[size];
_keysAndValues[existing + OVERFLOW_SIZE] = _keysAndValues[size + OVERFLOW_SIZE];
//and unset last element
_keysAndValues[size] = null;
_keysAndValues[size + OVERFLOW_SIZE] = null;
return (V) o;
} else {
// not found
return null;
}
}
/**
* Returns the value associated with a given key. If the given key
* is not found in this bucket, returns <code>null</code>.
*/
public V getValue(K key) {
//find entry
byte existing = -1;
for (byte i = 0; i < size; i++) {
if (key.equals(_keysAndValues[i])) {
existing = i;
break;
}
}
if (existing != -1) {
Object o = _keysAndValues[existing + OVERFLOW_SIZE];
if (o instanceof BTreeLazyRecord)
return ((BTreeLazyRecord<V>) o).get();
else
return (V) o;
} else {
// key not found
return null;
}
}
/**
* Obtain keys contained in this buckets. Keys are ordered to match
* their values, which be be obtained by calling <code>getValues()</code>.
* <p/>
* As an optimization, the Vector returned is the instance member
* of this class. Please don't modify outside the scope of this class.
*/
ArrayList<K> getKeys() {
ArrayList<K> ret = new ArrayList<K>();
for (byte i = 0; i < size; i++) {
ret.add((K) _keysAndValues[i]);
}
return ret;
}
/**
* Obtain values contained in this buckets. Values are ordered to match
* their keys, which be be obtained by calling <code>getKeys()</code>.
* <p/>
* As an optimization, the Vector returned is the instance member
* of this class. Please don't modify outside the scope of this class.
*/
ArrayList<V> getValues() {
ArrayList<V> ret = new ArrayList<V>();
for (byte i = 0; i < size; i++) {
ret.add((V) _keysAndValues[i + OVERFLOW_SIZE]);
}
return ret;
}
public void writeExternal(DataOutput out)
throws IOException {
out.write(_depth);
out.write(size);
DataInputOutput out3 = tree.writeBufferCache.getAndSet(null);
if (out3 == null)
out3 = new DataInputOutput();
else
out3.reset();
Serializer keySerializer = tree.keySerializer != null ? tree.keySerializer : tree.getRecordManager().defaultSerializer();
for (byte i = 0; i < size; i++) {
out3.reset();
keySerializer.serialize(out3, _keysAndValues[i]);
LongPacker.packInt(out, out3.getPos());
out.write(out3.getBuf(), 0, out3.getPos());
}
//write values
if(tree.hasValues()){
Serializer valSerializer = tree.valueSerializer != null ? tree.valueSerializer : tree.getRecordManager().defaultSerializer();
for (byte i = 0; i < size; i++) {
Object value = _keysAndValues[i + OVERFLOW_SIZE];
if (value == null) {
out.write(BTreeLazyRecord.NULL);
} else if (value instanceof BTreeLazyRecord) {
out.write(BTreeLazyRecord.LAZY_RECORD);
LongPacker.packLong(out, ((BTreeLazyRecord) value).recid);
} else {
//transform to byte array
out3.reset();
valSerializer.serialize(out3, value);
if (out3.getPos() > BTreeLazyRecord.MAX_INTREE_RECORD_SIZE) {
//store as separate record
long recid = tree.getRecordManager().insert(out3.toByteArray(), BTreeLazyRecord.FAKE_SERIALIZER,true);
out.write(BTreeLazyRecord.LAZY_RECORD);
LongPacker.packLong(out, recid);
} else {
out.write(out3.getPos());
out.write(out3.getBuf(), 0, out3.getPos());
}
}
}
}
tree.writeBufferCache.set(out3);
}
public void readExternal(DataInputOutput in) throws IOException, ClassNotFoundException {
_depth = in.readByte();
size = in.readByte();
//read keys
Serializer keySerializer = tree.keySerializer != null ? tree.keySerializer : tree.getRecordManager().defaultSerializer();
_keysAndValues = (K[]) new Object[OVERFLOW_SIZE * 2];
for (byte i = 0; i < size; i++) {
int expectedSize = LongPacker.unpackInt(in);
K key = (K) BTreeLazyRecord.fastDeser(in, keySerializer, expectedSize);
_keysAndValues[i] = key;
}
//read values
if(tree.hasValues()){
Serializer<V> valSerializer = tree.valueSerializer != null ? tree.valueSerializer : (Serializer<V>) tree.getRecordManager().defaultSerializer();
for (byte i = 0; i < size; i++) {
int header = in.readUnsignedByte();
if (header == BTreeLazyRecord.NULL) {
_keysAndValues[i + OVERFLOW_SIZE] = null;
} else if (header == BTreeLazyRecord.LAZY_RECORD) {
long recid = LongPacker.unpackLong(in);
_keysAndValues[i + OVERFLOW_SIZE] = (new BTreeLazyRecord(tree.getRecordManager(), recid, valSerializer));
} else {
_keysAndValues[i + OVERFLOW_SIZE] = BTreeLazyRecord.fastDeser(in, valSerializer, header);
}
}
}else{
for (byte i = 0; i < size; i++) {
if(_keysAndValues[i]!=null)
_keysAndValues[i+OVERFLOW_SIZE] = Utils.EMPTY_STRING;
}
}
}
}

View File

@ -0,0 +1,618 @@
/*******************************************************************************
* Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package org.apache.jdbm;
import java.io.*;
import java.util.*;
/**
* Hashtable directory page.
*
* @author Alex Boisvert
*/
final class HTreeDirectory<K, V> {
/**
* Maximum number of children in a directory.
* <p/>
* (Must be a power of 2 -- if you update this value, you must also
* update BIT_SIZE and MAX_DEPTH.)
* <p/>
* !!!! do not change this, it affects storage format, there are also magic numbers which relies on 255 !!!
*/
static final int MAX_CHILDREN = 256;
/**
* Number of significant bits per directory level.
*/
static final int BIT_SIZE = 8; // log2(256) = 8
/**
* Maximum number of levels (zero-based)
* <p/>
* (4 * 8 bits = 32 bits, which is the size of an "int", and as
* you know, hashcodes in Java are "ints")
*/
static final int MAX_DEPTH = 3; // 4 levels
/**
* Record ids of children nodes.
* It is saved in matrix to save memory, some subarrays may be null.
*/
private long[][] _children;
/**
* Depth of this directory page, zero-based
*/
private byte _depth;
/**
* This directory's record ID in the DB. (transient)
*/
private long _recid;
/** if this is root (depth=0), it contains size, otherwise -1*/
long size;
protected final HTree<K, V> tree;
/**
* Public constructor used by serialization
*/
public HTreeDirectory(HTree<K, V> tree) {
this.tree = tree;
}
/**
* Construct a HashDirectory
*
* @param depth Depth of this directory node.
*/
HTreeDirectory(HTree<K, V> tree, byte depth) {
this.tree = tree;
_depth = depth;
_children = new long[32][];
}
/**
* Sets persistence context. This method must be called before any
* persistence-related operation.
*
* @param recid Record id of this directory.
*/
void setPersistenceContext(long recid) {
this._recid = recid;
}
/**
* Get the record identifier used to load this hashtable.
*/
long getRecid() {
return _recid;
}
/**
* Returns whether or not this directory is empty. A directory
* is empty when it no longer contains buckets or sub-directories.
*/
boolean isEmpty() {
for (int i = 0; i < _children.length; i++) {
long[] sub = _children[i];
if (sub!=null){
for (int j = 0; j < 8; j++) {
if(sub[j] != 0) {
return false;
}
}
}
}
return true;
}
/**
* Returns the value which is associated with the given key. Returns
* <code>null</code> if there is not association for this key.
*
* @param key key whose associated value is to be returned
*/
V get(K key)
throws IOException {
int hash = hashCode(key);
long child_recid = getRecid(hash);
if (child_recid == 0) {
// not bucket/node --> not found
return null;
} else {
Object node = tree.db.fetch(child_recid, tree.SERIALIZER);
// System.out.println("HashDirectory.get() child is : "+node);
if (node instanceof HTreeDirectory) {
// recurse into next directory level
HTreeDirectory<K, V> dir = (HTreeDirectory<K, V>) node;
dir.setPersistenceContext(child_recid);
return dir.get(key);
} else {
// node is a bucket
HTreeBucket<K, V> bucket = (HTreeBucket) node;
return bucket.getValue(key);
}
}
}
private long getRecid(int hash) {
long[] sub = _children[hash>>>3];
return sub==null? 0 : sub[hash%8];
}
private void putRecid(int hash, long recid) {
long[] sub = _children[hash>>>3];
if(sub == null){
sub = new long[8];
_children[hash>>>3] = sub;
}
sub[hash%8] = recid;
}
/**
* Associates the specified value with the specified key.
*
* @param key key with which the specified value is to be assocated.
* @param value value to be associated with the specified key.
* @return object which was previously associated with the given key,
* or <code>null</code> if no association existed.
*/
Object put(final Object key, final Object value)
throws IOException {
if (value == null) {
return remove(key);
}
int hash = hashCode(key);
long child_recid = getRecid(hash);
if (child_recid == 0) {
// no bucket/node here yet, let's create a bucket
HTreeBucket bucket = new HTreeBucket(tree, (byte) (_depth + 1));
// insert (key,value) pair in bucket
Object existing = bucket.addElement(key, value);
long b_recid = tree.db.insert(bucket, tree.SERIALIZER,false);
putRecid(hash, b_recid);
tree.db.update(_recid, this, tree.SERIALIZER);
// System.out.println("Added: "+bucket);
return existing;
} else {
Object node = tree.db.fetch(child_recid, tree.SERIALIZER);
if (node instanceof HTreeDirectory) {
// recursive insert in next directory level
HTreeDirectory dir = (HTreeDirectory) node;
dir.setPersistenceContext(child_recid);
return dir.put(key, value);
} else {
// node is a bucket
HTreeBucket bucket = (HTreeBucket) node;
if (bucket.hasRoom()) {
Object existing = bucket.addElement(key, value);
tree.db.update(child_recid, bucket, tree.SERIALIZER);
// System.out.println("Added: "+bucket);
return existing;
} else {
// overflow, so create a new directory
if (_depth == MAX_DEPTH) {
throw new RuntimeException("Cannot create deeper directory. "
+ "Depth=" + _depth);
}
HTreeDirectory dir = new HTreeDirectory(tree, (byte) (_depth + 1));
long dir_recid = tree.db.insert(dir, tree.SERIALIZER,false);
dir.setPersistenceContext(dir_recid);
putRecid(hash, dir_recid);
tree.db.update(_recid, this, tree.SERIALIZER);
// discard overflown bucket
tree.db.delete(child_recid);
// migrate existing bucket elements
ArrayList keys = bucket.getKeys();
ArrayList values = bucket.getValues();
int entries = keys.size();
for (int i = 0; i < entries; i++) {
dir.put(keys.get(i), values.get(i));
}
// (finally!) insert new element
return dir.put(key, value);
}
}
}
}
/**
* Remove the value which is associated with the given key. If the
* key does not exist, this method simply ignores the operation.
*
* @param key key whose associated value is to be removed
* @return object which was associated with the given key, or
* <code>null</code> if no association existed with given key.
*/
Object remove(Object key) throws IOException {
int hash = hashCode(key);
long child_recid = getRecid(hash);
if (child_recid == 0) {
// not bucket/node --> not found
return null;
} else {
Object node = tree.db.fetch(child_recid, tree.SERIALIZER);
// System.out.println("HashDirectory.remove() child is : "+node);
if (node instanceof HTreeDirectory) {
// recurse into next directory level
HTreeDirectory dir = (HTreeDirectory) node;
dir.setPersistenceContext(child_recid);
Object existing = dir.remove(key);
if (existing != null) {
if (dir.isEmpty()) {
// delete empty directory
tree.db.delete(child_recid);
putRecid(hash, 0);
tree.db.update(_recid, this, tree.SERIALIZER);
}
}
return existing;
} else {
// node is a bucket
HTreeBucket bucket = (HTreeBucket) node;
Object existing = bucket.removeElement(key);
if (existing != null) {
if (bucket.getElementCount() >= 1) {
tree.db.update(child_recid, bucket, tree.SERIALIZER);
} else {
// delete bucket, it's empty
tree.db.delete(child_recid);
putRecid(hash, 0);
tree.db.update(_recid, this, tree.SERIALIZER);
}
}
return existing;
}
}
}
/**
* Calculates the hashcode of a key, based on the current directory
* depth.
*/
private int hashCode(Object key) {
int hashMask = hashMask();
int hash = key.hashCode();
hash = hash & hashMask;
hash = hash >>> ((MAX_DEPTH - _depth) * BIT_SIZE);
hash = hash % MAX_CHILDREN;
/*
System.out.println("HashDirectory.hashCode() is: 0x"
+Integer.toHexString(hash)
+" for object hashCode() 0x"
+Integer.toHexString(key.hashCode()));
*/
return hash;
}
/**
* Calculates the hashmask of this directory. The hashmask is the
* bit mask applied to a hashcode to retain only bits that are
* relevant to this directory level.
*/
int hashMask() {
int bits = MAX_CHILDREN - 1;
int hashMask = bits << ((MAX_DEPTH - _depth) * BIT_SIZE);
/*
System.out.println("HashDirectory.hashMask() is: 0x"
+Integer.toHexString(hashMask));
*/
return hashMask;
}
/**
* Returns an enumeration of the keys contained in this
*/
Iterator<K> keys()
throws IOException {
return new HDIterator(true);
}
/**
* Returns an enumeration of the values contained in this
*/
Iterator<V> values()
throws IOException {
return new HDIterator(false);
}
public void writeExternal(DataOutput out)
throws IOException {
out.writeByte(_depth);
if(_depth==0){
LongPacker.packLong(out,size);
}
int zeroStart = 0;
for (int i = 0; i < MAX_CHILDREN; i++) {
if (getRecid(i) != 0) {
zeroStart = i;
break;
}
}
out.write(zeroStart);
if (zeroStart == MAX_CHILDREN)
return;
int zeroEnd = 0;
for (int i = MAX_CHILDREN - 1; i >= 0; i--) {
if (getRecid(i) != 0) {
zeroEnd = i;
break;
}
}
out.write(zeroEnd);
for (int i = zeroStart; i <= zeroEnd; i++) {
LongPacker.packLong(out, getRecid(i));
}
}
public void readExternal(DataInputOutput in)
throws IOException, ClassNotFoundException {
_depth = in.readByte();
if(_depth==0)
size = LongPacker.unpackLong(in);
else
size = -1;
_children = new long[32][];
int zeroStart = in.readUnsignedByte();
int zeroEnd = in.readUnsignedByte();
for (int i = zeroStart; i <= zeroEnd; i++) {
long recid = LongPacker.unpackLong(in);
if(recid!=0)
putRecid(i,recid);
}
}
public void defrag(DBStore r1, DBStore r2) throws IOException, ClassNotFoundException {
for (long[] sub: _children) {
if(sub==null) continue;
for (long child : sub) {
if (child == 0) continue;
byte[] data = r1.fetchRaw(child);
r2.forceInsert(child, data);
Object t = tree.SERIALIZER.deserialize(new DataInputOutput(data));
if (t instanceof HTreeDirectory) {
((HTreeDirectory) t).defrag(r1, r2);
}
}
}
}
void deleteAllChildren() throws IOException {
for(long[] ll : _children){
if(ll!=null){
for(long l:ll ){
if(l!=0){
tree.db.delete(l);
}
}
}
}
}
////////////////////////////////////////////////////////////////////////
// INNER CLASS
////////////////////////////////////////////////////////////////////////
/**
* Utility class to enumerate keys/values in a HTree
*/
class HDIterator<A> implements Iterator<A> {
/**
* True if we're iterating on keys, False if enumerating on values.
*/
private boolean _iterateKeys;
/**
* Stacks of directories & last enumerated child position
*/
private ArrayList _dirStack;
private ArrayList _childStack;
/**
* Current HashDirectory in the hierarchy
*/
private HTreeDirectory _dir;
/**
* Current child position
*/
private int _child;
/**
* Current bucket iterator
*/
private Iterator<A> _iter;
private A next;
/**
* last item returned in next(), is used to remove() last item
*/
private A last;
private int expectedModCount;
/**
* Construct an iterator on this directory.
*
* @param iterateKeys True if iteration supplies keys, False
* if iterateKeys supplies values.
*/
HDIterator(boolean iterateKeys)
throws IOException {
_dirStack = new ArrayList();
_childStack = new ArrayList();
_dir = HTreeDirectory.this;
_child = -1;
_iterateKeys = iterateKeys;
expectedModCount = tree.modCount;
prepareNext();
next = next2();
}
/**
* Returns the next object.
*/
public A next2() {
A next = null;
if (_iter != null && _iter.hasNext()) {
next = _iter.next();
} else {
try {
prepareNext();
} catch (IOException except) {
throw new IOError(except);
}
if (_iter != null && _iter.hasNext()) {
return next2();
}
}
return next;
}
/**
* Prepare internal state so we can answer <code>hasMoreElements</code>
* <p/>
* Actually, this code prepares an Enumeration on the next
* Bucket to enumerate. If no following bucket is found,
* the next Enumeration is set to <code>null</code>.
*/
private void prepareNext() throws IOException {
long child_recid = 0;
// get next bucket/directory to enumerate
do {
_child++;
if (_child >= MAX_CHILDREN) {
if (_dirStack.isEmpty()) {
// no more directory in the stack, we're finished
return;
}
// try next node
_dir = (HTreeDirectory) _dirStack.remove(_dirStack.size() - 1);
_child = ((Integer) _childStack.remove(_childStack.size() - 1)).intValue();
continue;
}
child_recid = _dir.getRecid(_child);
} while (child_recid == 0);
if (child_recid == 0) {
throw new Error("child_recid cannot be 0");
}
Object node = tree.db.fetch(child_recid, tree.SERIALIZER);
// System.out.println("HDEnumeration.get() child is : "+node);
if (node instanceof HTreeDirectory) {
// save current position
_dirStack.add(_dir);
_childStack.add(new Integer(_child));
_dir = (HTreeDirectory) node;
_child = -1;
// recurse into
_dir.setPersistenceContext(child_recid);
prepareNext();
} else {
// node is a bucket
HTreeBucket bucket = (HTreeBucket) node;
if (_iterateKeys) {
ArrayList keys2 = bucket.getKeys();
_iter = keys2.iterator();
} else {
_iter = bucket.getValues().iterator();
}
}
}
public boolean hasNext() {
return next != null;
}
public A next() {
if (next == null) throw new NoSuchElementException();
if (expectedModCount != tree.modCount)
throw new ConcurrentModificationException();
last = next;
next = next2();
return last;
}
public void remove() {
if (last == null) throw new IllegalStateException();
if (expectedModCount != tree.modCount)
throw new ConcurrentModificationException();
//TODO current delete behaviour may change node layout. INVESTIGATE if this can happen!
tree.remove(last);
last = null;
expectedModCount++;
}
}
}

View File

@ -0,0 +1,47 @@
package org.apache.jdbm;
import java.util.AbstractSet;
import java.util.Iterator;
/**
* Wrapper for HTree to implement java.util.Map interface
*/
class HTreeSet<E> extends AbstractSet<E> {
final HTree<E, Object> map;
HTreeSet(HTree map) {
this.map = map;
}
public Iterator<E> iterator() {
return map.keySet().iterator();
}
public int size() {
return map.size();
}
public boolean isEmpty() {
return map.isEmpty();
}
public boolean contains(Object o) {
return map.containsKey(o);
}
public boolean add(E e) {
return map.put(e, Utils.EMPTY_STRING) == null;
}
public boolean remove(Object o) {
return map.remove(o) == Utils.EMPTY_STRING;
}
public void clear() {
map.clear();
}
}

View File

@ -0,0 +1,480 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.jdbm;
import java.io.*;
import java.util.*;
import java.util.concurrent.locks.ReentrantReadWriteLock;
/**
* LinkedList2 which stores its nodes on disk.
*
* @author Jan Kotek
*/
class LinkedList2<E> extends AbstractSequentialList<E> {
private DBAbstract db;
final long rootRecid;
/** size limit, is not currently used, but needs to be here for future compatibility.
* Zero means no limit.
*/
long sizeLimit = 0;
private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
static final class Root{
long first;
long last;
long size;
}
private static final Serializer<Root> ROOT_SERIALIZER= new Serializer<Root>(){
public void serialize(DataOutput out, Root obj) throws IOException {
LongPacker.packLong(out,obj.first);
LongPacker.packLong(out,obj.last);
LongPacker.packLong(out,obj.size);
}
public Root deserialize(DataInput in) throws IOException, ClassNotFoundException {
Root r = new Root();
r.first = LongPacker.unpackLong(in);
r.last = LongPacker.unpackLong(in);
r.size = LongPacker.unpackLong(in);
return r;
}
};
private Serializer<E> valueSerializer;
/**
* indicates that entry values should not be loaded during deserialization, used during defragmentation
*/
protected boolean loadValues = true;
/** constructor used for deserialization */
LinkedList2(DBAbstract db,long rootRecid, Serializer<E> valueSerializer) {
this.db = db;
this.rootRecid = rootRecid;
this.valueSerializer = valueSerializer;
}
/** constructor used to create new empty list*/
LinkedList2(DBAbstract db, Serializer<E> valueSerializer) throws IOException {
this.db = db;
if (valueSerializer != null && !(valueSerializer instanceof Serializable))
throw new IllegalArgumentException("Serializer does not implement Serializable");
this.valueSerializer = valueSerializer;
//create root
this.rootRecid = db.insert(new Root(), ROOT_SERIALIZER,false);
}
void setPersistenceContext(DBAbstract db) {
this.db = db;
}
public ListIterator<E> listIterator(int index) {
lock.readLock().lock();
try{
Root r = getRoot();
if (index < 0 || index > r.size)
throw new IndexOutOfBoundsException();
Iter iter = new Iter();
iter.next = r.first;
//scroll to requested position
//TODO scroll from end, if beyond half
for (int i = 0; i < index; i++) {
iter.next();
}
return iter;
}finally {
lock.readLock().unlock();
}
}
Root getRoot(){
//expect that caller already holds lock
try {
return db.fetch(rootRecid,ROOT_SERIALIZER);
} catch (IOException e) {
throw new IOError(e);
}
}
public int size() {
lock.readLock().lock();
try{
return (int) getRoot().size;
}finally {
lock.readLock().unlock();
}
}
public Iterator<E> descendingIterator() {
return null; //To change body of implemented methods use File | Settings | File Templates.
}
public boolean add(Object value) {
lock.writeLock().lock();
try {
Root r = getRoot();
Entry e = new Entry(r.last, 0, value);
long recid = db.insert(e, entrySerializer,false);
//update old last Entry to point to new record
if (r.last != 0) {
Entry oldLast = db.fetch(r.last, entrySerializer);
if (oldLast.next != 0) throw new Error();
oldLast.next = recid;
db.update(r.last, oldLast, entrySerializer);
}
//update linked list
r.last = recid;
if (r.first == 0) r.first = recid;
r.size++;
db.update(rootRecid, r, ROOT_SERIALIZER);
modCount++;
return true;
} catch (IOException e) {
throw new IOError(e);
}finally {
lock.writeLock().unlock();
}
}
private Entry<E> fetch(long recid) {
lock.readLock().lock();
try {
return db.fetch(recid, entrySerializer);
} catch (IOException e) {
throw new IOError(e);
}finally {
lock.readLock().unlock();
}
}
/**
* called from Serialization object
*/
static LinkedList2 deserialize(DataInput is, Serialization ser) throws IOException, ClassNotFoundException {
long rootrecid = LongPacker.unpackLong(is);
long sizeLimit = LongPacker.unpackLong(is);
if(sizeLimit!=0) throw new InternalError("LinkedList.sizeLimit not supported in this JDBM version");
Serializer serializer = (Serializer) ser.deserialize(is);
return new LinkedList2(ser.db,rootrecid, serializer);
}
void serialize(DataOutput out) throws IOException {
LongPacker.packLong(out, rootRecid);
LongPacker.packLong(out, sizeLimit);
db.defaultSerializer().serialize(out, valueSerializer);
}
private final Serializer<Entry> entrySerializer = new Serializer<Entry>() {
public void serialize(DataOutput out, Entry e) throws IOException {
LongPacker.packLong(out, e.prev);
LongPacker.packLong(out, e.next);
if (valueSerializer != null)
valueSerializer.serialize(out, (E) e.value);
else
db.defaultSerializer().serialize(out, e.value);
}
public Entry<E> deserialize(DataInput in) throws IOException, ClassNotFoundException {
long prev = LongPacker.unpackLong(in);
long next = LongPacker.unpackLong(in);
Object value = null;
if (loadValues)
value = valueSerializer == null ? db.defaultSerializer().deserialize(in) : valueSerializer.deserialize(in);
return new LinkedList2.Entry(prev, next, value);
}
};
static class Entry<E> {
long prev = 0;
long next = 0;
E value;
public Entry(long prev, long next, E value) {
this.prev = prev;
this.next = next;
this.value = value;
}
}
private final class Iter implements ListIterator<E> {
private int expectedModCount = modCount;
private int index = 0;
private long prev = 0;
private long next = 0;
private byte lastOper = 0;
public boolean hasNext() {
return next != 0;
}
public E next() {
if (next == 0) throw new NoSuchElementException();
checkForComodification();
Entry<E> e = fetch(next);
prev = next;
next = e.next;
index++;
lastOper = +1;
return e.value;
}
public boolean hasPrevious() {
return prev != 0;
}
public E previous() {
checkForComodification();
Entry<E> e = fetch(prev);
next = prev;
prev = e.prev;
index--;
lastOper = -1;
return e.value;
}
public int nextIndex() {
return index;
}
public int previousIndex() {
return index - 1;
}
public void remove() {
checkForComodification();
lock.writeLock().lock();
try {
if (lastOper == 1) {
//last operation was next() so remove previous element
lastOper = 0;
Entry<E> p = db.fetch(prev, entrySerializer);
//update entry before previous
if (p.prev != 0) {
Entry<E> pp = db.fetch(p.prev, entrySerializer);
pp.next = p.next;
db.update(p.prev, pp, entrySerializer);
}
//update entry after next
if (p.next != 0) {
Entry<E> pn = db.fetch(p.next, entrySerializer);
pn.prev = p.prev;
db.update(p.next, pn, entrySerializer);
}
//remove old record from db
db.delete(prev);
//update list
Root r = getRoot();
if (r.first == prev)
r.first = next;
if (r.last == prev)
r.last = next;
r.size--;
db.update(rootRecid, r,ROOT_SERIALIZER);
modCount++;
expectedModCount++;
//update iterator
prev = p.prev;
} else if (lastOper == -1) {
//last operation was prev() so remove next element
lastOper = 0;
Entry<E> n = db.fetch(next, entrySerializer);
//update entry before next
if (n.prev != 0) {
Entry<E> pp = db.fetch(n.prev, entrySerializer);
pp.next = n.next;
db.update(n.prev, pp, entrySerializer);
}
//update entry after previous
if (n.next != 0) {
Entry<E> pn = db.fetch(n.next, entrySerializer);
pn.prev = n.prev;
db.update(n.next, pn, entrySerializer);
}
//remove old record from db
db.delete(next);
//update list
Root r = getRoot();
if (r.last == next)
r.last = prev;
if (r.first == next)
r.first = prev;
r.size--;
db.update(rootRecid, r,ROOT_SERIALIZER);
modCount++;
expectedModCount++;
//update iterator
next = n.next;
} else
throw new IllegalStateException();
} catch (IOException e) {
throw new IOError(e);
}finally {
lock.writeLock().unlock();
}
}
public void set(E value) {
checkForComodification();
lock.writeLock().lock();
try {
if (lastOper == 1) {
//last operation was next(), so update previous item
lastOper = 0;
Entry<E> n = db.fetch(prev, entrySerializer);
n.value = value;
db.update(prev, n, entrySerializer);
} else if (lastOper == -1) {
//last operation was prev() so update next item
lastOper = 0;
Entry<E> n = db.fetch(next, entrySerializer);
n.value = value;
db.update(next, n, entrySerializer);
} else
throw new IllegalStateException();
} catch (IOException e) {
throw new IOError(e);
}finally {
lock.writeLock().unlock();
}
}
public void add(E value) {
checkForComodification();
//use more efficient method if possible
if (next == 0) {
LinkedList2.this.add(value);
expectedModCount++;
return;
}
lock.writeLock().lock();
try {
//insert new entry
Entry<E> e = new Entry<E>(prev, next, value);
long recid = db.insert(e, entrySerializer,false);
//update previous entry
if (prev != 0) {
Entry<E> p = db.fetch(prev, entrySerializer);
if (p.next != next) throw new Error();
p.next = recid;
db.update(prev, p, entrySerializer);
}
//update next entry
Entry<E> n = fetch(next);
if (n.prev != prev) throw new Error();
n.prev = recid;
db.update(next, n, entrySerializer);
//update List
Root r = getRoot();
r.size++;
db.update(rootRecid, r, ROOT_SERIALIZER);
//update iterator
expectedModCount++;
modCount++;
prev = recid;
} catch (IOException e) {
throw new IOError(e);
}finally {
lock.writeLock().unlock();
}
}
final void checkForComodification() {
if (modCount != expectedModCount)
throw new ConcurrentModificationException();
}
}
/**
* Copyes collection from one db to other, while keeping logical recids unchanged
*/
static void defrag(long recid, DBStore r1, DBStore r2) throws IOException {
try {
//move linked list itself
byte[] data = r1.fetchRaw(recid);
r2.forceInsert(recid, data);
DataInputOutput in = new DataInputOutput();
in.reset(data);
LinkedList2 l = (LinkedList2) r1.defaultSerializer().deserialize(in);
l.loadValues = false;
//move linkedlist root
if(l.rootRecid == 0) //empty list, done
return;
data = r1.fetchRaw(l.rootRecid);
r2.forceInsert(l.rootRecid, data);
in.reset(data);
Root r = ROOT_SERIALIZER.deserialize(in);
//move all other nodes in linked list
long current = r.first;
while (current != 0) {
data = r1.fetchRaw(current);
in.reset(data);
r2.forceInsert(current, data);
Entry e = (Entry) l.entrySerializer.deserialize(in);
current = e.next;
}
} catch (ClassNotFoundException e) {
throw new IOError(e);
}
}
}

View File

@ -0,0 +1,239 @@
/*******************************************************************************
* Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package org.apache.jdbm;
import java.io.IOException;
import java.util.Arrays;
/**
* This class manages the linked lists of logical rowid pages.
*/
final class LogicalRowIdManager {
// our record file and associated page manager
private final PageFile file;
private final PageManager pageman;
static final short ELEMS_PER_PAGE = (short) ((Storage.PAGE_SIZE - Magic.PAGE_HEADER_SIZE) / Magic.PhysicalRowId_SIZE);
private long[] freeRecordsInTransRowid = new long[4];
private int freeRecordsInTransSize = 0;
/** number of free logical rowids on logical free page, is SHORT*/
static final int OFFSET_FREE_COUNT = Magic.PAGE_HEADER_SIZE;
static final int FREE_HEADER_SIZE = Magic.PAGE_HEADER_SIZE + Magic.SZ_SHORT;
/** maximal number of free logical per page */
static final int FREE_RECORDS_PER_PAGE = (Storage.PAGE_SIZE -FREE_HEADER_SIZE)/6;
/**
* Creates a log rowid manager using the indicated record file and page manager
*/
LogicalRowIdManager(PageFile file, PageManager pageman) throws IOException {
this.file = file;
this.pageman = pageman;
}
/**
* Creates a new logical rowid pointing to the indicated physical id
*
* @param physloc physical location to point to
* @return logical recid
*/
long insert(final long physloc) throws IOException {
// check whether there's a free rowid to reuse
long retval = getFreeSlot();
if (retval == 0) {
// no. This means that we bootstrap things by allocating
// a new translation page and freeing all the rowids on it.
long firstPage = pageman.allocate(Magic.TRANSLATION_PAGE);
short curOffset = Magic.PAGE_HEADER_SIZE;
for (int i = 0; i < ELEMS_PER_PAGE; i++) {
putFreeSlot(((-firstPage) << Storage.PAGE_SIZE_SHIFT) + (long) curOffset);
curOffset += Magic.PhysicalRowId_SIZE;
}
retval = getFreeSlot();
if (retval == 0) {
throw new Error("couldn't obtain free translation");
}
}
// write the translation.
update(retval, physloc);
return retval;
}
/**
* Insert at forced location, use only for defragmentation !!
*
* @param logicalRowId
* @param physLoc
* @throws IOException
*/
void forceInsert(final long logicalRowId, final long physLoc) throws IOException {
if (fetch(logicalRowId) != 0)
throw new Error("can not forceInsert, record already exists: " + logicalRowId);
update(logicalRowId, physLoc);
}
/**
* Releases the indicated logical rowid.
*/
void delete(final long logicalrowid) throws IOException {
//zero out old location, is needed for defragmentation
final long pageId = -(logicalrowid>>> Storage.PAGE_SIZE_SHIFT);
final PageIo xlatPage = file.get(pageId);
xlatPage.pageHeaderSetLocation((short) (logicalrowid & Storage.OFFSET_MASK), 0);
file.release(pageId, true);
putFreeSlot(logicalrowid);
}
/**
* Updates the mapping
*
* @param logicalrowid The logical rowid
* @param physloc The physical rowid
*/
void update(final long logicalrowid, final long physloc) throws IOException {
final long pageId = -(logicalrowid>>> Storage.PAGE_SIZE_SHIFT);
final PageIo xlatPage = file.get(pageId);
xlatPage.pageHeaderSetLocation((short) (logicalrowid & Storage.OFFSET_MASK), physloc);
file.release(pageId, true);
}
/**
* Returns a mapping
*
* @param logicalrowid The logical rowid
* @return The physical rowid, 0 if does not exist
*/
long fetch(long logicalrowid) throws IOException {
final long pageId = -(logicalrowid>>> Storage.PAGE_SIZE_SHIFT);
final long last = pageman.getLast(Magic.TRANSLATION_PAGE);
if (last - 1 > pageId)
return 0;
final short offset = (short) (logicalrowid & Storage.OFFSET_MASK);
final PageIo xlatPage = file.get(pageId);
final long ret = xlatPage.pageHeaderGetLocation(offset);
file.release(pageId, false);
return ret;
}
void commit() throws IOException {
if(freeRecordsInTransSize==0) return;
long freeRecPageId = pageman.getLast(Magic.FREELOGIDS_PAGE);
if(freeRecPageId == 0){
//allocate new
freeRecPageId = pageman.allocate(Magic.FREELOGIDS_PAGE);
}
PageIo freeRecPage = file.get(freeRecPageId);
//write all uncommited free records
for(int rowPos = 0;rowPos<freeRecordsInTransSize;rowPos++){
short count = freeRecPage.readShort(OFFSET_FREE_COUNT);
if(count == FREE_RECORDS_PER_PAGE){
//allocate new free recid page
file.release(freeRecPage);
freeRecPageId = pageman.allocate(Magic.FREELOGIDS_PAGE);
freeRecPage = file.get(freeRecPageId);
freeRecPage.writeShort(FREE_RECORDS_PER_PAGE, (short)0);
count = 0;
}
final int offset = (count ) *6 + FREE_HEADER_SIZE;
//write free recid and increase counter
freeRecPage.writeSixByteLong(offset,freeRecordsInTransRowid[rowPos]);
count++;
freeRecPage.writeShort(OFFSET_FREE_COUNT, count);
}
file.release(freeRecPage);
clearFreeRecidsInTransaction();
}
private void clearFreeRecidsInTransaction() {
if(freeRecordsInTransRowid.length>128)
freeRecordsInTransRowid = new long[4];
freeRecordsInTransSize = 0;
}
void rollback() throws IOException {
clearFreeRecidsInTransaction();
}
/**
* Returns a free Logical rowid, or
* 0 if nothing was found.
*/
long getFreeSlot() throws IOException {
if (freeRecordsInTransSize != 0) {
return freeRecordsInTransRowid[--freeRecordsInTransSize];
}
final long logicFreePageId = pageman.getLast(Magic.FREELOGIDS_PAGE);
if(logicFreePageId == 0) {
return 0;
}
PageIo logicFreePage = file.get(logicFreePageId);
short recCount = logicFreePage.readShort(OFFSET_FREE_COUNT);
if(recCount <= 0){
throw new InternalError();
}
final int offset = (recCount -1) *6 + FREE_HEADER_SIZE;
final long ret = logicFreePage.readSixByteLong(offset);
recCount--;
if(recCount>0){
//decrease counter and zero out old record
logicFreePage.writeSixByteLong(offset,0);
logicFreePage.writeShort(OFFSET_FREE_COUNT, recCount);
file.release(logicFreePage);
}else{
//release this page
file.release(logicFreePage);
pageman.free(Magic.FREELOGIDS_PAGE,logicFreePageId);
}
return ret;
}
/**
* Puts the indicated rowid on the free list
*/
void putFreeSlot(long rowid) throws IOException {
//ensure capacity
if(freeRecordsInTransSize == freeRecordsInTransRowid.length)
freeRecordsInTransRowid = Arrays.copyOf(freeRecordsInTransRowid, freeRecordsInTransRowid.length * 4);
//add record and increase size
freeRecordsInTransRowid[freeRecordsInTransSize]=rowid;
freeRecordsInTransSize++;
}
}

View File

@ -0,0 +1,432 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.jdbm;
import java.io.Serializable;
import java.util.Arrays;
import java.util.Iterator;
import java.util.NoSuchElementException;
/**
* Hash Map which uses primitive long as key.
* Main advantage is new instanceof of Long does not have to be created for each lookup.
* <p/>
* This code comes from Android, which in turns comes from Apache Harmony.
* This class was modified to use primitive longs and stripped down to consume less space.
* <p/>
* Author of JDBM modifications: Jan Kotek
*/
class LongHashMap<V> implements Serializable {
private static final long serialVersionUID = 362499999763181265L;
private int elementCount;
private Entry<V>[] elementData;
private final float loadFactor;
private int threshold;
private int defaultSize = 16;
private transient Entry<V> reuseAfterDelete = null;
static final class Entry<V> implements Serializable{
private static final long serialVersionUID = 362445231113181265L;
Entry<V> next;
V value;
long key;
Entry(long theKey) {
this.key = theKey;
this.value = null;
}
}
static class HashMapIterator<V> implements Iterator<V> {
private int position = 0;
boolean canRemove = false;
Entry<V> entry;
Entry<V> lastEntry;
final LongHashMap<V> associatedMap;
HashMapIterator(LongHashMap<V> hm) {
associatedMap = hm;
}
public boolean hasNext() {
if (entry != null) {
return true;
}
Entry<V>[] elementData = associatedMap.elementData;
int length = elementData.length;
int newPosition = position;
boolean result = false;
while (newPosition < length) {
if (elementData[newPosition] == null) {
newPosition++;
} else {
result = true;
break;
}
}
position = newPosition;
return result;
}
public V next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
Entry<V> result;
Entry<V> _entry = entry;
if (_entry == null) {
result = lastEntry = associatedMap.elementData[position++];
entry = lastEntry.next;
} else {
if (lastEntry.next != _entry) {
lastEntry = lastEntry.next;
}
result = _entry;
entry = _entry.next;
}
canRemove = true;
return result.value;
}
public void remove() {
if (!canRemove) {
throw new IllegalStateException();
}
canRemove = false;
if (lastEntry.next == entry) {
while (associatedMap.elementData[--position] == null) {
// Do nothing
}
associatedMap.elementData[position] = associatedMap.elementData[position].next;
entry = null;
} else {
lastEntry.next = entry;
}
if (lastEntry != null) {
Entry<V> reuse = lastEntry;
lastEntry = null;
reuse.key = Long.MIN_VALUE;
reuse.value = null;
associatedMap.reuseAfterDelete = reuse;
}
associatedMap.elementCount--;
}
}
@SuppressWarnings("unchecked")
private Entry<V>[] newElementArray(int s) {
return new Entry[s];
}
/**
* Constructs a new empty {@code HashMap} instance.
*
* @since Android 1.0
*/
public LongHashMap() {
this(16);
}
/**
* Constructs a new {@code HashMap} instance with the specified capacity.
*
* @param capacity the initial capacity of this hash map.
* @throws IllegalArgumentException when the capacity is less than zero.
* @since Android 1.0
*/
public LongHashMap(int capacity) {
defaultSize = capacity;
if (capacity >= 0) {
elementCount = 0;
elementData = newElementArray(capacity == 0 ? 1 : capacity);
loadFactor = 0.75f; // Default load factor of 0.75
computeMaxSize();
} else {
throw new IllegalArgumentException();
}
}
// BEGIN android-changed
/**
* Removes all mappings from this hash map, leaving it empty.
*
* @see #isEmpty
* @see #size
* @since Android 1.0
*/
public void clear() {
if (elementCount > 0) {
elementCount = 0;
}
if(elementData.length>1024 && elementData.length>defaultSize)
elementData = new Entry[defaultSize];
else
Arrays.fill(elementData, null);
computeMaxSize();
}
// END android-changed
/**
* Returns a shallow copy of this map.
*
* @return a shallow copy of this map.
* @since Android 1.0
*/
private void computeMaxSize() {
threshold = (int) (elementData.length * loadFactor);
}
/**
* Returns the value of the mapping with the specified key.
*
* @param key the key.
* @return the value of the mapping with the specified key, or {@code null}
* if no mapping for the specified key is found.
* @since Android 1.0
*/
public V get(final long key) {
final int hash = powerHash(key);
final int index = (hash & 0x7FFFFFFF) % elementData.length;
//find non null entry
Entry<V> m = elementData[index];
while (m != null) {
if (key == m.key)
return m.value;
m = m.next;
}
return null;
}
/**
* Returns whether this map is empty.
*
* @return {@code true} if this map has no elements, {@code false}
* otherwise.
* @see #size()
* @since Android 1.0
*/
public boolean isEmpty() {
return elementCount == 0;
}
/**
* @return iterator over keys
*/
// public Iterator<K> keyIterator(){
// return new HashMapIterator<K, K, V>(
// new MapEntry.Type<K, K, V>() {
// public K get(Entry<K, V> entry) {
// return entry.key;
// }
// }, HashMap.this);
//
// }
/**
* Maps the specified key to the specified value.
*
* @param key the key.
* @param value the value.
* @return the value of any previous mapping with the specified key or
* {@code null} if there was no such mapping.
* @since Android 1.0
*/
public V put(final long key, final V value) {
int hash = powerHash(key);
int index = (hash & 0x7FFFFFFF) % elementData.length;
//find non null entry
Entry<V> entry = elementData[index];
while (entry != null && key != entry.key) {
entry = entry.next;
}
if (entry == null) {
if (++elementCount > threshold) {
rehash();
index = (hash & 0x7FFFFFFF) % elementData.length;
}
entry = createHashedEntry(key, index);
}
V result = entry.value;
entry.value = value;
return result;
}
Entry<V> createHashedEntry(final long key, final int index) {
Entry<V> entry = reuseAfterDelete;
if (entry == null) {
entry = new Entry<V>(key);
} else {
reuseAfterDelete = null;
entry.key = key;
entry.value = null;
}
entry.next = elementData[index];
elementData[index] = entry;
return entry;
}
void rehash(final int capacity) {
int length = (capacity == 0 ? 1 : capacity << 1);
Entry<V>[] newData = newElementArray(length);
for (int i = 0; i < elementData.length; i++) {
Entry<V> entry = elementData[i];
while (entry != null) {
int index = ((int) powerHash(entry.key) & 0x7FFFFFFF) % length;
Entry<V> next = entry.next;
entry.next = newData[index];
newData[index] = entry;
entry = next;
}
}
elementData = newData;
computeMaxSize();
}
void rehash() {
rehash(elementData.length);
}
/**
* Removes the mapping with the specified key from this map.
*
* @param key the key of the mapping to remove.
* @return the value of the removed mapping or {@code null} if no mapping
* for the specified key was found.
* @since Android 1.0
*/
public V remove(final long key) {
Entry<V> entry = removeEntry(key);
if (entry == null)
return null;
V ret = entry.value;
entry.value = null;
entry.key = Long.MIN_VALUE;
reuseAfterDelete = entry;
return ret;
}
Entry<V> removeEntry(final long key) {
Entry<V> last = null;
final int hash = powerHash(key);
final int index = (hash & 0x7FFFFFFF) % elementData.length;
Entry<V> entry = elementData[index];
while (true) {
if (entry == null) {
return null;
}
if (key == entry.key) {
if (last == null) {
elementData[index] = entry.next;
} else {
last.next = entry.next;
}
elementCount--;
return entry;
}
last = entry;
entry = entry.next;
}
}
/**
* Returns the number of elements in this map.
*
* @return the number of elements in this map.
* @since Android 1.0
*/
public int size() {
return elementCount;
}
/**
* @returns iterator over values in map
*/
public Iterator<V> valuesIterator() {
return new HashMapIterator<V>(this);
}
static final private int powerHash(final long key){
int h = (int)(key ^ (key >>> 32));
h ^= (h >>> 20) ^ (h >>> 12);
return h ^ (h >>> 7) ^ (h >>> 4);
}
}

Some files were not shown because too many files have changed in this diff Show More