/*
Copyright (c) 2012 James Ahlborn
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
USA
Original File: com.healthmarketscience.jackcess.MemFileChannel
*/
package automately.core.file.nio;
import automately.core.Automately;
import automately.core.file.VirtualFile;
import com.hazelcast.core.ILock;
import io.jsync.app.core.Cluster;
import io.jsync.buffer.Buffer;
import java.io.*;
import java.nio.ByteBuffer;
import java.nio.MappedByteBuffer;
import java.nio.channels.*;
import static automately.core.file.VirtualFileService.getFileStore;
public class UserFileChannel extends FileChannel {
private static final byte[][] EMPTY_DATA = new byte[0][];
// use largest possible Jet "page size" to ensure that reads/writes will
// always be within a single chunk
private static final int CHUNK_SIZE = 4096;
// this ensures that an "empty" mdb will fit in the initial chunk table
private static final int INIT_CHUNKS = 128;
/**
* current read/write position
*/
private long _position;
/**
* current amount of actual data in the file
*/
private long _size;
/**
* chunks containing the file data. the length of the chunk array is
* always a power of 2 and the chunks are always CHUNK_SIZE.
*/
private byte[][] _data;
private VirtualFile fileRef;
private UserFileChannel() {
this(null, 0L, 0L, EMPTY_DATA);
}
private UserFileChannel(VirtualFile fileRef, long position, long size, byte[][] data) {
this.fileRef = fileRef;
_position = position;
_size = size;
_data = data;
}
/**
* Creates a new read/write, empty UserFileChannel.
*/
public static UserFileChannel newChannel() {
return new UserFileChannel();
}
/**
* Creates a new read/write, empty UserFileChannel.
*/
public static UserFileChannel newChannel(VirtualFile file) throws IOException {
return newChannel(file, "rw");
}
/**
* Creates a new UserFileChannel containing the contents of the
* given ReadableByteChannel with the given mode (for mode details see
* {@link RandomAccessFile#RandomAccessFile(File, String)}).
*/
public static UserFileChannel newChannel(VirtualFile file, String mode)
throws IOException {
UserFileChannel channel;
Buffer buf = getFileStore().readRawData(file);
if(buf != null){
channel = new UserFileChannel();
channel.fileRef = file;
channel.transferFrom(Channels.newChannel(new ByteArrayInputStream(buf.getBytes())), 0L, Long.MAX_VALUE);
} else {
channel = new UserFileChannel(file, 0L, 0L, EMPTY_DATA);
}
if (!mode.contains("w") && !mode.equals("rw")) {
channel = new ReadOnlyChannel(channel);
}
return channel;
}
private static int getChunkIndex(long pos) {
return (int) (pos / CHUNK_SIZE);
}
private static int getChunkOffset(long pos) {
return (int) (pos % CHUNK_SIZE);
}
private static int getNumChunks(long size) {
return getChunkIndex(size + CHUNK_SIZE - 1);
}
@Override
public int read(ByteBuffer dst) throws IOException {
int bytesRead = read(dst, _position);
if (bytesRead > 0) {
_position += bytesRead;
}
return bytesRead;
}
@Override
public int read(ByteBuffer dst, long position) throws IOException {
if (position >= _size) {
return -1;
}
int numBytes = (int) Math.min(dst.remaining(), _size - position);
int rem = numBytes;
while (rem > 0) {
byte[] chunk = _data[getChunkIndex(position)];
int chunkOffset = getChunkOffset(position);
int bytesRead = Math.min(rem, CHUNK_SIZE - chunkOffset);
dst.put(chunk, chunkOffset, bytesRead);
rem -= bytesRead;
position += bytesRead;
}
return numBytes;
}
@Override
public int write(ByteBuffer src) throws IOException {
int bytesWritten = write(src, _position);
_position += bytesWritten;
return bytesWritten;
}
@Override
public int write(ByteBuffer src, long position) throws IOException {
int numBytes = src.remaining();
long newSize = position + numBytes;
ensureCapacity(newSize);
int rem = numBytes;
while (rem > 0) {
byte[] chunk = _data[getChunkIndex(position)];
int chunkOffset = getChunkOffset(position);
int bytesWritten = Math.min(rem, CHUNK_SIZE - chunkOffset);
src.get(chunk, chunkOffset, bytesWritten);
rem -= bytesWritten;
position += bytesWritten;
}
if (newSize > _size) {
_size = newSize;
}
// If there is a fileRef we are going to go ahead and write
// that data to the stream
if(fileRef != null){
Buffer buf = new Buffer();
for(byte[] b : _data) {
if (b != null) {
for (byte b1 : b) {
if (buf.length() < _size) {
buf.appendByte(b1);
}
}
}
}
fileRef.size = buf.length();
Cluster cluster = Automately.activeInstance().cluster();
ILock fileLock = cluster.hazelcast().getLock("fs.file.lock." + fileRef.token());
// We cannot delete this file if it is locked.
if(fileLock.isLocked()){
throw new IOException(this + " cannot be modified!");
}
cluster.data().persistentMap("files").set(fileRef.token(), fileRef);
getFileStore().writeRawData(fileRef, buf);
}
return numBytes;
}
@Override
public long position() throws IOException {
return _position;
}
@Override
public FileChannel position(long newPosition) throws IOException {
if (newPosition < 0L) {
throw new IllegalArgumentException("negative position");
}
_position = newPosition;
return this;
}
@Override
public long size() throws IOException {
return _size;
}
@Override
public FileChannel truncate(long newSize) throws IOException {
if (newSize < 0L) {
throw new IllegalArgumentException("negative size");
}
if (newSize < _size) {
// we'll optimize for memory over speed and aggressively free unused
// chunks
for (int i = getNumChunks(newSize); i < getNumChunks(_size); ++i) {
_data[i] = null;
}
_size = newSize;
}
_position = Math.min(newSize, _position);
return this;
}
@Override
public void force(boolean metaData) throws IOException {
// nothing to do
}
/**
* Convenience method for writing the entire contents of this channel to the
* given destination channel.
*
* @see #transferTo(long, long, WritableByteChannel)
*/
public long transferTo(WritableByteChannel dst)
throws IOException {
return transferTo(0L, _size, dst);
}
@Override
public long transferTo(long position, long count, WritableByteChannel dst)
throws IOException {
if (position >= _size) {
return 0L;
}
count = Math.min(count, _size - position);
int chunkIndex = getChunkIndex(position);
int chunkOffset = getChunkOffset(position);
long numBytes = 0L;
while (count > 0L) {
int chunkBytes = (int) Math.min(count, CHUNK_SIZE - chunkOffset);
ByteBuffer src = ByteBuffer.wrap(_data[chunkIndex], chunkOffset,
chunkBytes);
do {
int bytesWritten = dst.write(src);
if (bytesWritten == 0L) {
// dst full
return numBytes;
}
numBytes += bytesWritten;
count -= bytesWritten;
} while (src.hasRemaining());
++chunkIndex;
chunkOffset = 0;
}
return numBytes;
}
/**
* Convenience method for writing the entire contents of this channel to the
* given destination stream.
*
* @see #transferTo(long, long, WritableByteChannel)
*/
public long transferTo(OutputStream dst)
throws IOException {
return transferTo(0L, _size, dst);
}
/**
* Convenience method for writing the selected portion of this channel to
* the given destination stream.
*
* @see #transferTo(long, long, WritableByteChannel)
*/
public long transferTo(long position, long count, OutputStream dst)
throws IOException {
return transferTo(position, count, Channels.newChannel(dst));
}
@Override
public long transferFrom(ReadableByteChannel src,
long position, long count)
throws IOException {
int chunkIndex = getChunkIndex(position);
int chunkOffset = getChunkOffset(position);
long numBytes = 0L;
while (count > 0L) {
ensureCapacity(position + numBytes + 1);
int chunkBytes = (int) Math.min(count, CHUNK_SIZE - chunkOffset);
ByteBuffer dst = ByteBuffer.wrap(_data[chunkIndex], chunkOffset,
chunkBytes);
do {
int bytesRead = src.read(dst);
if (bytesRead <= 0) {
// src empty
return numBytes;
}
numBytes += bytesRead;
count -= bytesRead;
_size = Math.max(_size, position + numBytes);
} while (dst.hasRemaining());
++chunkIndex;
chunkOffset = 0;
}
return numBytes;
}
@Override
protected void implCloseChannel() throws IOException {
// release data
_data = EMPTY_DATA;
_size = _position = 0L;
}
private void ensureCapacity(long newSize) {
if (newSize <= _size) {
// nothing to do
return;
}
int newNumChunks = getNumChunks(newSize);
int numChunks = getNumChunks(_size);
if (newNumChunks > _data.length) {
// need to extend chunk array (use powers of 2)
int newDataLen = Math.max(_data.length, INIT_CHUNKS);
while (newDataLen < newNumChunks) {
newDataLen <<= 1;
}
byte[][] newData = new byte[newDataLen][];
// copy existing chunks
System.arraycopy(_data, 0, newData, 0, numChunks);
_data = newData;
}
// allocate new chunks
for (int i = numChunks; i < newNumChunks; ++i) {
_data[i] = new byte[CHUNK_SIZE];
}
}
@Override
public long write(ByteBuffer[] srcs, int offset, int length)
throws IOException {
long numBytes = 0L;
for (int i = offset; i < offset + length; ++i) {
numBytes += write(srcs[i]);
}
return numBytes;
}
@Override
public long read(ByteBuffer[] dsts, int offset, int length)
throws IOException {
long numBytes = 0L;
for (int i = offset; i < offset + length; ++i) {
if (_position >= _size) {
return ((numBytes > 0L) ? numBytes : -1L);
}
numBytes += read(dsts[i]);
}
return numBytes;
}
@Override
public MappedByteBuffer map(MapMode mode, long position, long size)
throws IOException {
throw new UnsupportedOperationException();
}
@Override
public FileLock lock(long position, long size, boolean shared)
throws IOException {
throw new UnsupportedOperationException();
}
@Override
public FileLock tryLock(long position, long size, boolean shared)
throws IOException {
throw new UnsupportedOperationException();
}
/**
* Subclass of UserFileChannel which is read-only.
*/
private static final class ReadOnlyChannel extends UserFileChannel {
private ReadOnlyChannel(UserFileChannel channel) {
super(channel.fileRef, channel._position, channel._size, channel._data);
}
@Override
public int write(ByteBuffer src, long position) throws IOException {
throw new NonWritableChannelException();
}
@Override
public FileChannel truncate(long newSize) throws IOException {
throw new NonWritableChannelException();
}
@Override
public long transferFrom(ReadableByteChannel src,
long position, long count)
throws IOException {
throw new NonWritableChannelException();
}
}
}