/* Copyright (C) SYSTAP, LLC DBA Blazegraph 2006-2016. All rights reserved. Contact: SYSTAP, LLC DBA Blazegraph 2501 Calvert ST NW #106 Washington, DC 20008 licenses@blazegraph.com This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * Created on Jan 19, 2008 */ package com.bigdata.bfs; import java.io.IOException; import java.util.Iterator; import java.util.Map; import java.util.Random; import java.util.TreeMap; import java.util.concurrent.atomic.AtomicLong; import com.bigdata.bfs.BigdataFileSystem; import junit.framework.TestCase; /** * Unit tests for random block IO include read, write, overwrite (aka update), * and delete. There are also some unit tests for atomic append after random * write since random writes can leave "holes" in files. * * @author <a href="mailto:thompsonbry@users.sourceforge.net">Bryan Thompson</a> * @version $Id$ */ public class TestRandomBlockOps extends AbstractRepositoryTestCase { /** * */ public TestRandomBlockOps() { } /** * @param arg0 */ public TestRandomBlockOps(String arg0) { super(arg0); } /** * Tests random write of a block, read back of the data, overwrite of the * same block, atomic append after the random write, and random delete. * * @throws IOException */ public void test_write01() throws IOException { final String id = "test"; final int version = 0; final long block_12 = 12; final byte[] expected_12 = new byte[]{1,2,3}; assertEquals("blockCount",0L,repo.getBlockCount(id, version)); assertSameIterator("blockIds", new Long[] {}, repo.blocks(id, version)); assertNull("inputStream",repo.inputStream(id, version)); assertNull("readBlock",repo.readBlock(id, version, block_12)); assertNull("readHead", repo.readHead(id, version)); // write random block. assertFalse("overwrite", repo.writeBlock(id, version, block_12, expected_12, 0, 3)); assertEquals("blockCount", 1L, repo.getBlockCount(id, version)); assertSameIterator("blockIds", new Long[] { block_12 }, repo.blocks(id, version)); assertEquals("inputStream", expected_12, read(repo.inputStream(id, version))); assertEquals("readBlock", expected_12, repo.readBlock(id, version, block_12)); assertEquals("readHead", expected_12, repo.readHead(id, version)); /* * change the data and overwrite that block. */ System.arraycopy(new byte[]{4,5,6}, 0, expected_12, 0, 3); assertTrue("overwrite", repo.writeBlock(id, version, block_12, expected_12, 0, 3)); assertEquals("blockCount", 1L, repo.getBlockCount(id, version)); assertSameIterator("blockIds", new Long[] { block_12 }, repo.blocks(id, version)); assertEquals("inputStream", expected_12, read(repo.inputStream(id, version))); assertEquals("readBlock", expected_12, repo.readBlock(id, version, block_12)); assertEquals("readHead", expected_12, repo.readHead(id, version)); /* * Test atomic append after write leaving holes. */ final byte[] expected_13 = new byte[]{7,8,9}; final long block_13 = repo.appendBlock(id, version, expected_13, 0, 3); assertEquals("blockCount", 2L, repo.getBlockCount(id, version)); assertSameIterator("blockIds", new Long[] { block_12, block_13 }, repo .blocks(id, version)); assertEquals("readBlock", expected_13, repo.readBlock(id, version, block_13)); assertEquals("readHead", expected_12, repo.readHead(id, version)); assertEquals("inputStream", new byte[] { 4, 5, 6, 7, 8, 9 }, read(repo .inputStream(id, version))); /* * Atomic delete of block [12]. */ assertTrue("delete",repo.deleteBlock(id, version, block_12)); /* * Note: the block count is still (2) since the deleted block has not * been purged from the index. */ assertEquals("blockCount", 2L, repo.getBlockCount(id, version)); assertSameIterator("blockIds", new Long[] { block_13 }, repo.blocks(id, version)); assertEquals("inputStream", expected_13, read(repo.inputStream(id, version))); assertEquals("readBlock", null, repo.readBlock(id, version, block_12)); assertEquals("readBlock", expected_13, repo.readBlock(id, version, block_13)); assertEquals("readHead", expected_13, repo.readHead(id, version)); // re-delete returns false. assertFalse("delete",repo.deleteBlock(id, version, block_12)); } /** * Unit test where a block is written using a random write, replaced with an * empty block, and then replaced with a non-empty block. The test verifies * that the empty block in fact replaces the block (rather than being * interpreted as a delete of the block). * * @throws IOException */ public void test_write02() throws IOException { final String id = "test"; final int version = 0; final long block_12 = 12; final byte[] expected_12 = new byte[]{1,2,3}; assertEquals("blockCount",0L,repo.getBlockCount(id, version)); assertSameIterator("blockIds", new Long[] {}, repo.blocks(id, version)); assertNull("inputStream",repo.inputStream(id, version)); assertNull("readBlock",repo.readBlock(id, version, block_12)); assertNull("readHead", repo.readHead(id, version)); // write random block. assertFalse("overwrite", repo.writeBlock(id, version, block_12, expected_12, 0, 3)); assertEquals("blockCount", 1L, repo.getBlockCount(id, version)); assertSameIterator("blockIds", new Long[] { block_12 }, repo.blocks(id, version)); assertEquals("inputStream", expected_12, read(repo.inputStream(id, version))); assertEquals("readBlock", expected_12, repo.readBlock(id, version, block_12)); assertEquals("readHead", expected_12, repo.readHead(id, version)); // re-write as an empty block. assertTrue("overwrite", repo.writeBlock(id, version, block_12, expected_12, 0, 0/*len*/)); assertEquals("blockCount", 1L, repo.getBlockCount(id, version)); assertSameIterator("blockIds", new Long[] { block_12 }, repo.blocks(id, version)); assertEquals("inputStream", new byte[]{}, read(repo.inputStream(id, version))); assertEquals("readBlock", new byte[]{}, repo.readBlock(id, version, block_12)); assertEquals("readHead", new byte[]{}, repo.readHead(id, version)); } /** * Test for delete of the head block. */ public void test_deleteH() throws IOException { final String id = "test"; final int version = 0; final long block0 = 0; final byte[] expected0 = new byte[] { 1, 2, 3 }; assertEquals("blockCount", 0L, repo.getBlockCount(id, version)); assertSameIterator("blockIds", new Long[] {}, repo.blocks(id, version)); assertNull("inputStream", repo.inputStream(id, version)); assertNull("readBlock", repo.readBlock(id, version, block0)); assertNull("readHead", repo.readHead(id, version)); // atomic append assertEquals("block#", block0, repo.appendBlock(id, version, expected0, 0, 3)); assertEquals("blockCount", 1L, repo.getBlockCount(id, version)); assertSameIterator("blockIds", new Long[] { block0 }, repo.blocks(id, version)); assertEquals("inputStream", expected0, read(repo.inputStream(id, version))); assertEquals("readBlock", expected0, repo.readBlock(id, version, block0)); assertEquals("readHead", expected0, repo.readHead(id, version)); /* * write the 2nd block. */ final long block1 = 1; final byte[] expected1 = new byte[] { 4,5,6 }; // atomic append assertEquals("block#", block1, repo.appendBlock(id, version, expected1, 0, 3)); assertEquals("blockCount", 2L, repo.getBlockCount(id, version)); assertSameIterator("blockIds", new Long[] { block0, block1 }, repo.blocks(id, version)); assertEquals("inputStream", new byte[] { 1, 2, 3, 4, 5, 6 }, read(repo .inputStream(id, version))); assertEquals("readBlock", expected0, repo .readBlock(id, version, block0)); assertEquals("readBlock", expected1, repo .readBlock(id, version, block1)); assertEquals("readHead", expected0, repo.readHead(id, version)); /* * delete the head. */ assertEquals("block#", block0, repo.deleteHead(id, version)); // note: does not decrease. assertEquals("blockCount", 2L, repo.getBlockCount(id, version)); assertSameIterator("blockIds", new Long[] { block1 }, repo.blocks(id, version)); assertEquals("inputStream", expected1, read(repo.inputStream(id, version))); assertEquals("readBlock", expected1, repo .readBlock(id, version, block1)); assertEquals("readHead", expected1, repo.readHead(id, version)); /* * delete the head again. */ assertEquals("block#", block1, repo.deleteHead(id, version)); // note: does not decrease. assertEquals("blockCount", 2L, repo.getBlockCount(id, version)); assertSameIterator("blockIds", new Long[] {}, repo.blocks(id, version)); assertEquals("inputStream", new byte[] {}, read(repo.inputStream(id, version))); assertEquals("readHead", null, repo.readHead(id, version)); } /** * A small stress test for a single file version. */ public void test_stress() { Op gen = new Op(// .20f, // append .01f, // delete .05f, // deleteH .20f, // write .10f, // read .05f // readH ); new StressTest(repo,100/*limit*/, gen); } /** * Helper class generates a random sequence of operation codes obeying the * probability distribution described in the constuctor call. * * @todo blocks (iterator over block identifiers). * * @todo deleteBlocks, readTail, deleteTail. * * @author <a href="mailto:thompsonbry@users.sourceforge.net">Bryan Thompson</a> * @version $Id$ */ static class Op { /** * Atomic append */ static final public int append = 0; /** * Atomic delete */ static final public int delete = 1; /** * Atomic delete of the head of the file version. */ static final public int deleteH = 2; /** * Atomic random write (can be an overwrite or a write of an empty * block). */ static final public int write = 3; /** * Atomic random read. */ static final public int read = 4; /** * Atomic read of the head of the file version. */ static final public int readH = 5; /** * The last defined operator. */ static final int lastOp = readH; final private Random r = new Random(); final private float[] _dist; public Op(float appendRate, float deleteRate, float deleteHRate, float writeRate, float readRate, float readHRate) { if (appendRate < 0 || deleteRate < 0 || deleteHRate < 0 || writeRate < 0 || readRate < 0 || readHRate < 0 ) { throw new IllegalArgumentException("negative rate"); } float total = appendRate + deleteRate + deleteHRate + writeRate + readRate + readHRate; if (total == 0.0) { throw new IllegalArgumentException("all rates are zero."); } /* * Convert to normalized distribution in [0:1]. */ appendRate /= total; deleteRate /= total; deleteHRate /= total; writeRate /= total; readRate /= total; readHRate /= total; /* * Save distribution. */ int i = 0; _dist = new float[lastOp+1]; _dist[ i++ ] = appendRate; _dist[ i++ ] = deleteRate; _dist[ i++ ] = deleteHRate; _dist[ i++ ] = writeRate; _dist[ i++ ] = readRate; _dist[ i++ ] = readHRate; /* * Checksum. */ float sum = 0f; for( i = 0; i<_dist.length; i++ ) { sum += _dist[ i ]; } if( Math.abs( sum - 1f) > 0.01 ) { throw new AssertionError("sum of distribution is: "+sum+", but expecting 1.0"); } } /** * Return the name of the operator. * * @param op * @return */ public static String getName( int op ) { if( op < 0 || op > lastOp ) { throw new IllegalArgumentException(); } switch( op ) { case append: return "append"; case delete: return "delete"; case deleteH: return "deleteHead"; case write: return "write"; case read: return "read"; case readH: return "readHead"; default: throw new AssertionError("unknown: op="+op); } } /** * An array of normalized probabilities assigned to each operator. The * array may be indexed by the operator, e.g., dist[{@link #fetch}] * would be the probability of a fetch operation. * * @return The probability distribution over the defined operators. */ public float[] getDistribution() { return _dist; } /** * Generate a random operator according to the distribution described to * to the constructor. * * @return A declared operator selected according to a probability * distribution. */ public int nextOp() { final float rand = r.nextFloat(); // [0:1) float cumprob = 0f; for( int i=0; i<_dist.length; i++ ) { cumprob += _dist[ i ]; if( rand <= cumprob ) { return i; } } throw new AssertionError(); } } /** * Tests of the {@link Op} test helper class. * * @author <a href="mailto:thompsonbry@users.sourceforge.net">Bryan Thompson</a> * @version $Id$ */ public static class TestOp extends TestCase { public void test_Op() { Op gen = new Op(.2f, .05f, .2f,.1f, .0001f, .00001f); doOpTest(gen); } public void test_Op2() { Op gen = new Op(0f,0f,0f,1f,0f,0f); doOpTest(gen); } /** * Correct rejection test when all rates are zero. */ public void test_correctRejectionAllZero() { try { new Op(0f,0f,0f,0f,0f,0f); fail("Expecting: "+IllegalArgumentException.class); } catch(IllegalArgumentException ex) { log.info("Ignoring expected exception: "+ex); } } /** * Correct rejection test when one or more rates are negative. */ public void test_correctRejectionNegativeRate() { /* * create, delete, link, unlink, set, clear, rebuild, verify, commit, reopen. */ try { new Op(0f,0f,-1f,0f,1f,0f); fail("Expecting: "+IllegalArgumentException.class); } catch(IllegalArgumentException ex) { log.info("Ignoring expected exception: "+ex); } } /** * Verifies the {@link Op} class given an instance with some probability * distribution. */ void doOpTest(final Op gen) { final int limit = 10000; int[] ops = new int[limit]; int[] sums = new int[Op.lastOp + 1]; for (int i = 0; i < limit; i++) { int op = gen.nextOp(); assertTrue(op >= 0); assertTrue(op <= Op.lastOp); ops[i] = op; sums[op]++; } float[] expectedProbDistribution = gen.getDistribution(); float[] actualProbDistribution = new float[Op.lastOp + 1]; float sum = 0f; for (int i = 0; i <= Op.lastOp; i++) { sum += expectedProbDistribution[i]; actualProbDistribution[i] = (float) ((double) sums[i] / (double) limit); float diff = Math.abs(actualProbDistribution[i] - expectedProbDistribution[i]); System.err.println("expected[i=" + i + "]=" + expectedProbDistribution[i] + ", actual[i=" + i + "]=" + actualProbDistribution[i] + ", diff=" + ((int) (diff * 1000)) / 10f + "%"); assertTrue(diff < 0.02); // difference is less than 2% // percent. } assertTrue(Math.abs(sum - 1f) < 0.01); // essential 1.0 } } /** * Class provides total ordering over {file,version,blockId} tuples. * * @author <a href="mailto:thompsonbry@users.sourceforge.net">Bryan Thompson</a> * @version $Id$ */ private static class FileVersionBlock implements Comparable<FileVersionBlock> { final String id; final int version; final long block; public FileVersionBlock(String id, int version, long block) { assert id != null; assert id.length() > 0; assert block >= 0 && block <= BigdataFileSystem.MAX_BLOCK; this.id = id; this.version = version; this.block = block; } public String getId() { return id; } public int getVersion() { return version; } public long getBlockId() { return block; } public int compareTo(FileVersionBlock o) { int ret = id.compareTo(o.id); if(ret==0) { // Note: avoids overflow. ret = version < o.version ? -1 : version > o.version ? 1 : 0; if (ret == 0) { // Note: avoids overflow. ret = block < o.block ? -1 : block > o.block ? 1 : 0; } } return ret; } } /** * Stress test helper class. * * @todo add create and delete of file versions. * * @todo modify to test operations on more than one file version (add a hash * map from {file,version} to the ground truth stats for that file * version) * * @todo modify test to use a thread pool for concurrent operations and * concurrency safe ground truth data. the deleteHead operation will * have to be truely atomic for this to work. must synchronize, use a * lock, etc. to ensure that the ground truth state update is atomic * and consistent with the requested update on the repo. * * @author <a href="mailto:thompsonbry@users.sourceforge.net">Bryan Thompson</a> * @version $Id$ */ protected static class StressTest { final BigdataFileSystem repo; final int limit; final Op gen; final String id = "test"; final int version = 0; final Random r = new Random(); final Map<FileVersionBlock, byte[]> blocks = new TreeMap<FileVersionBlock, byte[]>(); // // the 1st block in the file or -1L iff there are no blocks in the file. // final AtomicLong firstBlockId = new AtomicLong(-1L); // // // the last block in the file or -1L iff there are no blocks in the file. // final AtomicLong lastBlockId = new AtomicLong(-1L); /* * This is the value that should be reported by getBlockCount(). * * Note: This value never decreases unless there is a compacting merge * that eradicates blocks for a file version. * * Note: The #of non-deleted blocks that should be in the file version * is blocks.size(). */ final AtomicLong blockCount = new AtomicLong(0L); public StressTest(BigdataFileSystem repo,int limit,Op gen) { this.repo = repo; this.limit = limit; this.gen = gen; run(); } // run a set of random operations. void run() { for(int i=0; i<limit; i++) { final int op = gen.nextOp(); switch(op) { case Op.append: { /* * @todo make empty, near empty, full, and near full blocks * relatively likely while also exercising the offset and * length parameters. The same changes should be applied to * the writeBlock() case below. */ int capacity = r.nextInt(repo.getBlockSize()); // [0:blockSize-1] int off = r.nextInt(capacity); // [0:capacity-1] int len = r.nextInt(capacity - off+1); byte[] b = new byte[capacity]; r.nextBytes(b); // @todo if lastBlock == MAX_BLOCKS then deny append. long block = repo.appendBlock(id, version, b, off, len); blocks.put(new FileVersionBlock(id,version,block), copy(b,off,len)); blockCount.incrementAndGet(); break; } case Op.delete: { final long block = getRandomBlock(id, version); if (block == -1L) { /* * Since there are no blocks try deleting one at random * and verify that it was not deleted. */ assertFalse("Not expecting block to be deleted", repo .deleteBlock(id, version, r .nextInt(Integer.MAX_VALUE))); } else { // delete a block known to exist. assertTrue("Expecting block to be deleted", repo .deleteBlock(id, version, block)); blocks.remove(new FileVersionBlock(id, version, block)); } break; } case Op.deleteH: { final long block = getFirstBlock(id,version); final long deleted = repo.deleteHead(id,version); if(block==-1L) { assertEquals("Expecting nothing deleted",-1L,deleted); } else { assertEquals("Expecting block="+block+" to be deleted",block,deleted); blocks.remove(new FileVersionBlock(id,version,block)); } break; } case Op.write: { int capacity = r.nextInt(repo.getBlockSize()); // [0:blockSize-1] int off = r.nextInt(capacity); // [0:capacity-1] int len = r.nextInt(capacity - off+1); byte[] b = new byte[capacity]; r.nextBytes(b); // Note: choose random write with small range to make overwrite likely. long block = r.nextInt(20); boolean overwrite = repo.writeBlock(id, version, block, b, off, len); byte[] old = blocks.put(new FileVersionBlock(id,version,block), copy(b,off,len)); if(old == null ) { assertFalse("Not expecting overwrite", overwrite); } else { assertTrue("Expecting overwrite", overwrite); } blockCount.incrementAndGet(); break; } case Op.read: { final long block = getRandomBlock(id,version); if(block==-1L) { // empty file version so true reading a random block. assertNull("Expecting nothing to read", repo.readBlock( id, version, r.nextInt(Integer.MAX_VALUE))); } else { byte[] actual = repo.readBlock(id, version, block); byte[] expected = blocks.get(new FileVersionBlock(id,version,block)); assert expected != null; assertEquals("data",expected,actual); } break; } case Op.readH: { final long block = getFirstBlock(id,version); if(block==-1L) { assertNull("Expecting nothing to read",repo.readHead(id, version)); } else { byte[] actual = repo.readHead(id, version); byte[] expected = blocks.get(new FileVersionBlock(id,version,block)); assert expected != null; assertEquals("data",expected,actual); } break; } default: throw new AssertionError("Unknown operation code: " + Op.getName(op)); } } } /** * Return a copy of those the bytes between <i>off</i> (inclusive) and * <i>off+len</i> (exclusive). * * @param b * The bytes. * @param off * The offset of the first byte to copy. * @param len * The #of bytes to copy * . * @return The copy of those bytes. */ protected byte[] copy(byte[] b, int off, int len) { byte[] x = new byte[len]; System.arraycopy(b, off, x, 0, len); return x; } /** * Return the first non-deleted block identifier for the given file * version. * * @param id * The file identifier. * @param version * The version identifier. * * @return The 1st non-deleted block identifier -or- <code>-1L</code> * iff there are no blocks for that file version. */ protected long getFirstBlock(String id, int version) { Iterator<FileVersionBlock> itr = blocks.keySet().iterator(); if(!itr.hasNext()) return -1L; return itr.next().getBlockId(); } /** * Return a randomly selected block identifier from among the * non-deleted blocks for the given file version. * * @param id * The file identifier. * @param version * The version identifier. * * @return The block identifier -or- <code>-1L</code> iff there are no * non-deleted blocks. */ protected long getRandomBlock(String id, int version) { final int nblocks = blocks.size(); if(nblocks==0) return -1L; // choose at random. final int n = r.nextInt(nblocks); // return block identifier for the nth block. Iterator<FileVersionBlock> itr = blocks.keySet().iterator(); FileVersionBlock x = null; for (int i = 0; i <= n; i++) { x = itr.next(); } return x.getBlockId(); } } }