/*
* Copyright (C) 2006-2008 Alfresco Software Limited.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
* As a special exception to the terms and conditions of version 2.0 of
* the GPL, you may redistribute this Program in connection with Free/Libre
* and Open Source Software ("FLOSS") applications as described in Alfresco's
* FLOSS exception. You should have recieved a copy of the text describing
* the FLOSS exception, and it is also available here:
* http://www.alfresco.com/legal/licensing"
*/
package org.alfresco.jlan.server.filesys.db;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.List;
import java.util.jar.JarEntry;
import java.util.jar.JarFile;
import java.util.jar.JarOutputStream;
import org.alfresco.jlan.debug.Debug;
import org.alfresco.jlan.server.SrvSession;
import org.alfresco.jlan.server.core.DeviceContext;
import org.alfresco.jlan.server.filesys.DiskDeviceContext;
import org.alfresco.jlan.server.filesys.FileName;
import org.alfresco.jlan.server.filesys.FileOpenParams;
import org.alfresco.jlan.server.filesys.FileStatus;
import org.alfresco.jlan.server.filesys.NetworkFile;
import org.alfresco.jlan.server.filesys.cache.FileState;
import org.alfresco.jlan.server.filesys.cache.FileStateCache;
import org.alfresco.jlan.server.filesys.cache.FileStateListener;
import org.alfresco.jlan.server.filesys.loader.BackgroundFileLoader;
import org.alfresco.jlan.server.filesys.loader.CachedFileInfo;
import org.alfresco.jlan.server.filesys.loader.FileLoader;
import org.alfresco.jlan.server.filesys.loader.FileLoaderException;
import org.alfresco.jlan.server.filesys.loader.FileProcessor;
import org.alfresco.jlan.server.filesys.loader.FileProcessorList;
import org.alfresco.jlan.server.filesys.loader.FileRequest;
import org.alfresco.jlan.server.filesys.loader.FileRequestQueue;
import org.alfresco.jlan.server.filesys.loader.FileSegment;
import org.alfresco.jlan.server.filesys.loader.FileSegmentInfo;
import org.alfresco.jlan.server.filesys.loader.MemorySegmentList;
import org.alfresco.jlan.server.filesys.loader.MultipleFileRequest;
import org.alfresco.jlan.server.filesys.loader.SingleFileRequest;
import org.alfresco.jlan.util.MemorySize;
import org.springframework.extensions.config.ConfigElement;
/**
* Database File Data Loader Class
*
* <p>The database file data loader loads/saves file data to a BLOB field in a seperate database table to the
* main filesystem structure. The file data is indexed using the file id generated by the main database disk driver.
*
* <p>The file data may be split up into several BLOB fields.
*
* <p>This class relies on a seperate DBDataInterface implementation to provide the methods to load and save
* the file data to the database table.
*
* @author gkspencer
*/
public class DBFileLoader implements FileLoader, BackgroundFileLoader, FileStateListener {
// Status codes returned from the load/save worker thread processing
public final static int StsSuccess = 0;
public final static int StsRequeue = 1;
public final static int StsError = 2;
// Temporary sub-directory/file/Jar prefix
public static final String TempDirPrefix = "ldr";
public static final String TempFilePrefix = "ldr_";
public static final String JarFilePrefix = "jar_";
// Maximum files per temporary sub-directory
private static final int MaximumFilesPerSubDir = 500;
// Attributes attached to the file state
public static final String DBFileSegmentInfo = "DBFileSegmentInfo";
// Default/minimum/maximum number of worker threads to use
public static final int DefaultWorkerThreads = 4;
public static final int MinimumWorkerThreads = 1;
public static final int MaximumWorkerThreads = 50;
// Default/minimum files per jar and jar size settings
public static final int DefaultFilesPerJar = 25;
public static final int MinimumFilesPerJar = 5;
public static final int DefaultSizePerJar = 200000;
public static final int MinimumSizePerJar = 100000;
// File state timeout values
public static final long SequentialFileExpire = 3000L; // milliseconds
public static final long RequestProcessedExpire = 3000L; // "
public static final long RequestQueuedExpire = 10000L; // "
// Transaction minimum file size
public static final int TransactionMinimumFileSize = 1024;
// Transaction timeout default, minimum and maximum values
public static final long DefaultTransactionTimeout = 5000L; // milliseconds
public static final long MinimumTransactionTimeout = 2000L; // "
public static final long MaximumTransactionTimeout = 60000L; // "
// Default file data fragment size
public final static long DEFAULT_FRAGSIZE = 512L * 1024L; // 1/2Mb
public final static long MIN_FRAGSIZE = 64L * 1024L; // 64Kb
public final static long MAX_FRAGSIZE = 1024L * 1024L * 1024L;// 1Gb
// Memory buffer maximum size
public final static long MAX_MEMORYBUFFER = 512L * 1024L; // 1/2Mb
// Prefix for Jar files when added to the file state cache. The files must not be accessible from the
// client so we use invalid file name characters to prefix the name.
public static final String JarStatePrefix = "**JAR";
// Default Jar file cache timeout
public static final long JarStateTimeout = 300000L; // 5 minutes
// Default Jar compression level
public static final int JarDefaultCompression = 0; // no compression
// Name, used to prefix worker thread names
private String m_name;
// Maximum in-memory file request size and low water mark
private int m_maxQueueSize;
private int m_lowQueueSize;
// Enable debug output
private boolean m_debug;
// Number of worker threads to create for read/write requests
private int m_readWorkers;
private int m_writeWorkers;
// Small file threshold, files below this threshold will be queued to the transaction queue
// to be bundled together into a multiple file request when there are 'n' files in the transaction
// and/or the total file size is greater than the maximum size threshold.
//
// A value of zero indicates that transaction queueing is disabled.
private long m_smallFileSize;
private int m_filesPerJar;
private int m_sizePerJar;
// Database device context
private DBDeviceContext m_dbCtx;
// File state cache, from device context
private FileStateCache m_stateCache;
// Database data interface used to load/save the file and Jar file data
private DBDataInterface m_dbDataInterface;
// Worker thread pool for loading/saving file data
private BackgroundLoadSave m_backgroundLoader;
// File data fragment size
private long m_fragSize = DEFAULT_FRAGSIZE;
// Keep Jar files created for multiple file transaction requests
private boolean m_keepJars;
// Jar file compression level, 0 = no compression, 9 = highest compression
private int m_jarCompressLevel;
// Temporary file area
private String m_tempDirName;
private File m_tempDir;
// Temporary directory/file prefixes
private String m_tempDirPrefix = TempDirPrefix;
private String m_tempFilePrefix = TempFilePrefix;
// Current temporary sub-directory
private String m_curTempName;
private File m_curTempDir;
private int m_curTempIdx;
// Maximum/current number of files in a temporary directory
private int m_tempCount;
private int m_tempMax;
// Current transaction id, cumulative file size, file count and time last file was added to the current transaction.
// Transaction lock used to synchronize access to the values.
private int m_tranId;
private int m_totFileSize;
private int m_totFiles;
private long m_lastTranFile;
private Object m_tranLock = new Object();
// Time to wait for more files to be added to a transaction before it is sent to be processed, in milliseconds and
// transaction timer thread
private long m_tranTimeout;
private TransactionTimer m_transTimer;
// List of file processors that process cached files before storing and after loading.
private FileProcessorList m_fileProcessors;
/**
* Transaction Timer Thread Inner Class
*/
protected class TransactionTimer implements Runnable {
// Transaction timer thread
private Thread m_thread;
// Transaction timeout and thread wakeup interval
private long m_timeout;
private long m_wakeup;
// Shutdown flag
private boolean m_shutdown = false;
/**
* Class constructor
*
* @param name String
* @param timeout long
*/
public TransactionTimer(String name, long timeout) {
// Set the transaction timeout and thread wakeup interval
m_timeout = timeout;
m_wakeup = timeout/2;
// Create the thread and start it
m_thread = new Thread(this);
m_thread.setName(name);
m_thread.setDaemon(true);
m_thread.start();
}
/**
* Request the worker thread to shutdown
*/
public final void shutdownRequest() {
m_shutdown = true;
try {
m_thread.interrupt();
}
catch (Exception ex) {
}
}
/**
* Run the thread
*/
public void run() {
// Loop until shutdown
while ( m_shutdown == false) {
try {
Thread.sleep(m_wakeup);
}
catch (InterruptedException ex) {
}
// Check if a shutdown has been requested
if ( m_shutdown)
break;
// Check if the current transaction should be flushed to the processing queue
if ( m_lastTranFile != 0L) {
// Get the current time
long timeNow = System.currentTimeMillis();
synchronized ( m_tranLock) {
// Check if the transaction has timed out
if ((m_lastTranFile + m_timeout) < timeNow) {
// Wakeup the transaction loader to send the current transaction
m_backgroundLoader.flushTransaction(m_tranId);
// Update the current transaction details
m_tranId++;
m_totFiles = 0;
m_totFileSize = 0;
m_lastTranFile = 0L;
// DEBUG
if ( Debug.EnableInfo && hasDebug())
Debug.println("BackgroundLoadSave Transaction timed out, queued for loading");
}
}
}
}
// DEBUG
if ( Debug.EnableInfo && hasDebug())
Debug.println("BackgroundLoadSave Transaction timer shutdown");
}
};
/**
* Class constructor
*
*/
public DBFileLoader() {
}
/**
* Return the database features required by this file loader. Return zero if no database features
* are required by the loader.
*
* @return int
*/
public int getRequiredDBFeatures() {
// Return the database features required by the loader
return DBInterface.FeatureData + DBInterface.FeatureJarData + DBInterface.FeatureQueue;
}
/**
* Return the database device context
*
* @return DBDeviceContext
*/
public final DBDeviceContext getContext() {
return m_dbCtx;
}
/**
* Return the Jar compression level
*
* @return int
*/
public final int getJarCompressionLevel() {
return m_jarCompressLevel;
}
/**
* Return the file state cache
*
* @return FileStateCache
*/
protected final FileStateCache getStateCache() {
return m_stateCache;
}
/**
* Return the temporary directory name
*
* @return String
*/
public final String getTemporaryDirectoryPath() {
return m_tempDirName;
}
/**
* Return the temporary directory
*
* @return File
*/
public final File getTemporaryDirectory() {
return m_tempDir;
}
/**
* Return the current temporry sub-directory
*
* @return File
*/
public final File getCurrentTempDirectory() {
return m_curTempDir;
}
/**
* Check if Jars files should be kept in the temporary area
*
* @return boolean
*/
public final boolean hasKeepJars() {
return m_keepJars;
}
/**
* Return the database data interface
*
* @return DBDataInterface
*/
public final DBDataInterface getDBDataInterface() {
return m_dbDataInterface;
}
/**
* Add a file processor to process files before storing and after loading.
*
* @param fileProc
* @throws FileLoaderException
*/
public void addFileProcessor(FileProcessor fileProc)
throws FileLoaderException {
// Check if the file processor list has been allocated
if ( m_fileProcessors == null)
m_fileProcessors = new FileProcessorList();
// Add the file processor
m_fileProcessors.addProcessor(fileProc);
}
/**
* Determine if there are any file processors configured
*
* @return boolean
*/
public final boolean hasFileProcessors() {
return m_fileProcessors != null ? true : false;
}
/**
* Check if debug output is enabled
*
* @return boolean
*/
public final boolean hasDebug() {
return m_debug;
}
/**
* Return the maximum in-memory file request queue size
*
* @return int
*/
public final int getMaximumQueueSize() {
return m_maxQueueSize;
}
/**
* Return the in-memory file request queue low water mark level
*
* @return int
*/
public final int getLowQueueSize() {
return m_lowQueueSize;
}
/**
* Return the worker thread prefix
*
* @return String
*/
public final String getName() {
return m_name;
}
/**
* Get the small file threshold size
*
* @return long
*/
public final long getSmallFileSize() {
return m_smallFileSize;
}
/**
* Get the number of files per Jar
*
* @return int
*/
public final int getFilesPerJar() {
return m_filesPerJar;
}
/**
* Get the file size limit for packing into Jars
*
* @return int
*/
public final int getJarFileSize() {
return m_sizePerJar;
}
/**
* Get the transaction timeout value, in milliseconds
*
* @return long
*/
public final long getTransactionTimeout() {
return m_tranTimeout;
}
/**
* Return the temporary sub-directory prefix
*
* @return String
*/
public final String getTempDirectoryPrefix() {
return m_tempDirPrefix;
}
/**
* Return the temporary file prefix
*
* @return String
*/
public final String getTempFilePrefix() {
return m_tempFilePrefix;
}
/**
* Set the worker thread name prefix
*
* @param name String
*/
protected final void setName(String name) {
m_name = name;
}
/**
* Create a network file for the specified file
*
* @param params FileOpenParams
* @param fid int
* @param stid int
* @param did int
* @param create boolean
* @param dir boolean
* @exception IOException
* @exception FileNotFoundException
*/
public NetworkFile openFile(FileOpenParams params, int fid, int stid, int did, boolean create, boolean dir)
throws IOException, FileNotFoundException {
// Split the file name to get the name only
String fullName = params.getFullPath();
String[] paths = FileName.splitPath(params.getPath());
String name = paths[1];
// Find, or create, the file state for the file/directory
FileState fstate = m_stateCache.findFileState(params.getFullPath(), true);
fstate.setExpiryTime(System.currentTimeMillis() + getContext().getCacheTimeout());
// Check if the file is a directory
DBNetworkFile netFile = null;
if ( dir == false) {
// Create the network file and associated file segment
CachedNetworkFile cacheFile = createNetworkFile(fstate, params, name, fid, stid, did);
netFile = cacheFile;
// Check if the file is being opened for sequential access and the data has not yet been loaded
FileSegment fileSeg = cacheFile.getFileSegment();
if ( create == true || params.isOverwrite() == true) {
// Indicate that the file data is available, this is a new file or the existing file is being overwritten
// so there is no data to load.
fileSeg.setStatus(FileSegmentInfo.Available);
}
else if ( params.isSequentialAccessOnly() && fileSeg.isDataLoading() == false) {
synchronized ( cacheFile.getFileState()) {
// Create the temporary file
cacheFile.openFile(create);
cacheFile.closeFile();
// Queue a file data load request
if ( fileSeg.isDataLoading() == false)
queueFileRequest(new SingleFileRequest(FileRequest.LOAD, cacheFile.getFileId(), cacheFile.getStreamId(), fileSeg.getInfo(),
cacheFile.getFullNameStream(), fstate));
}
// DEBUG
if ( Debug.EnableInfo && hasDebug())
Debug.println("## FileLoader Queued file load, SEQUENTIAL access");
}
}
else {
// Create a placeholder network file for the directory
netFile = new DirectoryNetworkFile(name, fid, did, fstate);
// Debug
if ( Debug.EnableInfo && hasDebug())
Debug.println("DBFileLoader.openFile() DIR state=" + fstate);
}
// Return the network file
return netFile;
}
/**
* Close the network file
*
* @param sess SrvSession
* @param netFile NetworkFile
* @exception IOException
*/
public void closeFile(SrvSession sess, NetworkFile netFile)
throws IOException {
// Close the cached network file
if ( netFile instanceof CachedNetworkFile) {
// Get the cached network file
CachedNetworkFile cacheFile = (CachedNetworkFile) netFile;
cacheFile.closeFile();
// Get the file segment details
FileSegment fileSeg = cacheFile.getFileSegment();
// Check if the file data has been updated, if so then queue a file save
if ( fileSeg.isUpdated()) {
// Set the modified date/time and file size for the file
File tempFile = new File(fileSeg.getTemporaryFile());
netFile.setModifyDate(tempFile.lastModified());
netFile.setFileSize(tempFile.length());
// Queue a file save request to save the data back to the repository, if not already queued
if ( fileSeg.isSaveQueued() == false) {
// Create a file save request for the updated file segment
SingleFileRequest fileReq = new SingleFileRequest(FileRequest.SAVE, cacheFile.getFileId(), cacheFile.getStreamId(), fileSeg.getInfo(),
netFile.getFullNameStream(), cacheFile.getFileState());
// Set the file segment status
fileSeg.setStatus(FileSegmentInfo.SaveWait, true);
// Check if the request should be part of a transaction
if ( getSmallFileSize() > 0 && netFile.getFileSize() < getSmallFileSize()) {
// Make the file request into a transaction request
createTransactionRequest(fileReq, netFile);
}
// Queue the file save request
queueFileRequest(fileReq);
}
else if ( Debug.EnableInfo && hasDebug()) {
// DEBUG
Debug.println("## FileLoader Save already queued for " + fileSeg);
}
}
// Update the cache timeout for the temporary file if there are no references to the file. If the file was
// opened for sequential access only it will be expired quicker.
else if ( cacheFile.getFileState().getOpenCount() == 0) {
// If the file was opened for sequential access only then we can delete it from the temporary area sooner
long tmo = System.currentTimeMillis();
if ( cacheFile.isSequentialOnly())
tmo += SequentialFileExpire;
else
tmo += getContext().getCacheTimeout();
// Set the file state expiry, the local file data will be deleted when the file state expires (if there
// are still no references to the file).
cacheFile.getFileState().setExpiryTime(tmo);
}
}
}
/**
* Delete the specified file data
*
* @param fname String
* @param fid int
* @param stid int
* @exception IOException
*/
public void deleteFile(String fname, int fid, int stid)
throws IOException {
// Delete the file data from the database
try {
// Find the associated file state
FileState fstate = m_stateCache.findFileState(fname, false);
if ( fstate != null) {
// Get the file segment details
FileSegmentInfo fileSegInfo = (FileSegmentInfo) fstate.removeAttribute(DBFileSegmentInfo);
if ( fileSegInfo != null) {
try {
// Delete the temporary file
fileSegInfo.deleteTemporaryFile();
}
catch (Exception ex) {
// DEBUG
if ( Debug.EnableInfo && hasDebug())
Debug.println("## DBFileLoader failed to delete temp file " + fileSegInfo.getTemporaryFile());
}
}
}
// Delete the data from the database table
getDBDataInterface().deleteFileData(fid, stid);
}
catch (Exception ex) {
// DEBUG
if ( Debug.EnableInfo && hasDebug())
Debug.println("## DBFileLoader deleteFile() error, " + ex.toString());
}
}
/**
* Request file data to be loaded/saved
*
* @param req FileRequest
*/
public void queueFileRequest(FileRequest req) {
// Pass the request to the background load/save thread pool
m_backgroundLoader.queueFileRequest(req);
}
/**
* Load a file
*
* @param req FileRequest2
* @return int
* @exception Exception
*/
public int loadFile(FileRequest req)
throws Exception {
// DEBUG
long startTime = 0L;
SingleFileRequest loadReq = (SingleFileRequest) req;
if ( Debug.EnableInfo && hasDebug()) {
Debug.println("## DBFileLoader loadFile() req=" + loadReq.toString() + ", thread=" + Thread.currentThread().getName());
startTime = System.currentTimeMillis();
}
// Check if the temporary file still exists, if not then the file has been deleted from the filesystem
File tempFile = new File(loadReq.getTemporaryFile());
FileSegment fileSeg = findFileSegmentForPath(loadReq.getVirtualPath());
if ( tempFile.exists() == false || fileSeg == null) {
// DEBUG
if ( Debug.EnableInfo && hasDebug())
Debug.println(" Temporary file deleted");
// Return an error status
fileSeg.setStatus(FileSegmentInfo.Error, false);
return StsError;
}
// Load the file data
FileOutputStream fileOut = null;
int loadSts = StsRequeue;
try {
// Update the segment status
fileSeg.setStatus(FileSegmentInfo.Loading);
// Get the file data details
DBDataDetails dataDetails = getDBDataInterface().getFileDataDetails(loadReq.getFileId(), loadReq.getStreamId());
// DEBUG
if ( Debug.EnableInfo && hasDebug())
Debug.println(" Data details: " + dataDetails);
// Check if the file is packaged in a Jar
if ( dataDetails.isStoredInJar()) {
// Load the file data from a Jar file
loadSts = loadFileFromJar(loadReq, tempFile, dataDetails);
// Update the file status, and clear the queued flag
fileSeg.setStatus(FileSegmentInfo.Available, false);
fileSeg.signalDataAvailable();
}
else {
// Load the file data from the main file record(s)
getDBDataInterface().loadFileData(loadReq.getFileId(), loadReq.getStreamId(), fileSeg);
// Set the load status
loadSts = StsSuccess;
// DEBUG
if ( Debug.EnableInfo && hasDebug()) {
long endTime = System.currentTimeMillis();
Debug.println("## DBFileLoader loaded fid=" + loadReq.getFileId() + ", stream=" + loadReq.getStreamId() + ", frags=" + dataDetails.numberOfDataFragments() + ", time=" + (endTime-startTime) + "ms");
}
}
}
catch ( DBException ex) {
// DEBUG
if ( Debug.EnableError && hasDebug())
Debug.println(ex);
// Indicate the file load failed
loadSts = StsError;
}
catch ( IOException ex) {
// DEBUG
if ( Debug.EnableError && hasDebug())
Debug.println(ex);
// Indicate the file load failed
loadSts = StsError;
}
// Check if the file was loaded successfully
if ( loadSts == StsSuccess) {
// Signal that the file data is available
fileSeg.signalDataAvailable();
// Update the file status
fileSeg.setStatus(FileSegmentInfo.Available, false);
// Run the file load processors
runFileLoadedProcessors(getContext(), loadReq.getFileState(), fileSeg);
}
// Return the load file status
return loadSts;
}
/**
* Load the requested file from a Jar file on the Centera. The Jar file must first be loaded from the Centera, then
* the file data is unpacked to the temporary file. The Jar file is cached as there may be other files accessed in
* the same Jar file.
*
* @param loadReq SingleFileRequest
* @param tempFile File
* @param dataDetails DBDataDetails
* @return int
*/
protected final int loadFileFromJar(SingleFileRequest loadReq, File tempFile, DBDataDetails dataDetails) {
// Check if the Jar file has already been loaded, if so there will be a file state
int loadSts = StsError;
String jarStateName = JarStatePrefix + dataDetails.getJarId();
FileState jarState = getStateCache().findFileState(jarStateName);
FileSegmentInfo segInfo = null;
FileSegment jarSeg = null;
File jarFile = null;
// Check if the Jar already exists in the temporary file area, if not then load the Jar from the Centera
boolean loadJarReq = false;
if ( jarState == null) {
// Create a new file state for the Jar file
jarFile = new File(getCurrentTempDirectory(), JarFilePrefix + dataDetails.getJarId() + ".jar");
jarState = createFileStateForRequest(-2, jarFile.getAbsolutePath(), jarStateName, FileSegmentInfo.LoadWait);
jarState.setExpiryTime(System.currentTimeMillis() + JarStateTimeout);
segInfo = new FileSegmentInfo(jarFile.getAbsolutePath());
jarState.addAttribute(DBFileSegmentInfo, segInfo);
jarSeg = new FileSegment(segInfo, true);
// Indicate that the Jar requires loading
loadJarReq = true;
// DEBUG
if ( Debug.EnableInfo && hasDebug())
Debug.println("Creating new state for Jar, jar=" + jarFile.getAbsolutePath());
}
else {
// Get the file segment status
segInfo = (FileSegmentInfo) jarState.findAttribute(DBFileSegmentInfo);
if ( segInfo != null && segInfo.hasStatus() == FileSegmentInfo.Initial) {
// Set the Jar file
jarFile = new File(jarSeg.getTemporaryFile());
// Indicate that the Jar file must be loaded
loadJarReq = true;
// DEBUG
if ( Debug.EnableInfo && hasDebug())
Debug.println("Jar file state requires Jar load, state=Initial");
}
else if (jarSeg == null) {
synchronized ( jarState) {
// Create a new file segment
jarFile = new File(getCurrentTempDirectory(), dataDetails.getJarId() + ".jar");
segInfo = new FileSegmentInfo();
segInfo.setTemporaryFile(jarFile.getAbsolutePath());
jarSeg = new FileSegment(segInfo, true);
jarSeg.setStatus(FileSegmentInfo.LoadWait, true);
// Add the segment to the file state cache
jarState.addAttribute(DBFileSegmentInfo, segInfo);
}
// Indicate that the Jar file must be loaded
loadJarReq = true;
// DEBUG
if ( Debug.EnableInfo && hasDebug())
Debug.println("Jar file segment created, load required");
}
else {
// DEBUG
if ( Debug.EnableInfo && hasDebug())
Debug.println("jarSeg=" + jarSeg);
}
}
// Check if the Jar file requires loading
if ( loadJarReq == true) {
try {
// Get the load lock for the Jar file, if we get the lock then load the file data
if ( jarSeg.getLoadLock() == true) {
// DEBUG
if ( Debug.EnableInfo && hasDebug())
Debug.println("Loading Jar, got load lock ...");
try {
// Load the Jar file data from the database
getDBDataInterface().loadJarData(dataDetails.getJarId(), jarSeg);
// Set the Jar file segment status to indicate that the data has been loaded
jarSeg.setStatus(FileSegmentInfo.Available, false);
// Indicate load successful
loadSts = StsSuccess;
// DEBUG
if ( Debug.EnableInfo && hasDebug())
Debug.println("## DBFileLoader JAR loaded " + loadReq.toString() + ", jarId=" + dataDetails.getJarId());
}
catch (Exception ex) {
Debug.println(ex);
}
finally {
// Wakeup any other threads waiting on the Jar file load
synchronized ( jarState) {
jarState.notifyAll();
}
}
}
else {
// Check if the file data is now available
if ( jarSeg.hasStatus() == FileSegmentInfo.Available) {
// Indicate that the Jar load was successful
loadSts = StsSuccess;
// DEBUG
if ( Debug.EnableInfo && hasDebug())
Debug.println("Waited for load, threadId=" + loadReq.getThreadId());
}
else
loadSts = StsRequeue;
}
}
catch (InterruptedException ex) {
loadSts = StsRequeue;
}
}
else {
// Bump the Jar file state expiry so that it stays in the cache a while longer, might get more hits
jarState.setExpiryTime(System.currentTimeMillis() + JarStateTimeout);
// Get the Jar file segment
segInfo = (FileSegmentInfo) jarState.findAttribute(DBFileSegmentInfo);
jarSeg = new FileSegment(segInfo, true);
// Check if the Jar data is available
if ( jarSeg.hasStatus() != FileSegmentInfo.Available) {
// DEBUG
if ( Debug.EnableInfo && hasDebug())
Debug.println("## DBFileLoader Jar not yet available, " + jarSeg.getTemporaryFile());
// Wait until the Jar has been loaded
int retryCnt = 0;
while ( retryCnt++ < 50 && jarSeg.hasStatus() != FileSegmentInfo.Available) {
// Sleep for a while
try {
Thread.sleep(250);
}
catch (InterruptedException ex) {
}
}
// Check if the Jar file has been loaded
if ( jarSeg.hasStatus() != FileSegmentInfo.Available) {
// Requeue the file load request
loadSts = StsRequeue;
}
else {
// DEBUG
if ( Debug.EnableInfo && hasDebug())
Debug.println(" Jar file loaded, waited " + (250 * retryCnt) + "ms");
}
}
else {
// Indicate that the Jar file is loaded
loadSts = StsSuccess;
// DEBUG
if ( Debug.EnableInfo && hasDebug())
Debug.println("## DBFileLoader Jar cache hit, re-using " + jarSeg.getTemporaryFile());
}
}
// If the Jar file has been loaded, or is available, then extract the required file
if ( loadSts == StsSuccess) {
// Open the Jar file and copy the required file data to the temporary file
JarFile jar = null;
FileOutputStream outFile = null;
InputStream jarIn = null;
try {
// Open the Jar file
jar = new JarFile(jarSeg.getTemporaryFile());
// Find the required entry in the Jar
JarEntry jarEntry = jar.getJarEntry(TempFilePrefix + loadReq.getFileId() + ".tmp");
if ( jarEntry != null) {
// Open the Jar entry to read the data and temporary file to write the data to
jarIn = jar.getInputStream(jarEntry);
outFile = new FileOutputStream(tempFile);
// Create a buffer to read/write the file data
long startTime = System.currentTimeMillis();
byte[] buf = new byte[1024];
int totLen = 0;
int rdlen = jarIn.read(buf);
while ( rdlen > 0) {
// Write the data to the temporary file
outFile.write(buf,0,rdlen);
totLen += rdlen;
// Read another buffer of data from the Jar file
rdlen = jarIn.read(buf);
}
long stopTime = System.currentTimeMillis();
// DEBUG
if ( Debug.EnableInfo && hasDebug())
Debug.println("Loaded file " + jarEntry.getName() + ", size=" + totLen + ", in " + (stopTime-startTime) + "ms");
// Close the Jar stream and output file
jarIn.close();
jarIn = null;
outFile.close();
outFile = null;
}
else {
// DEBUG
if ( Debug.EnableInfo && hasDebug())
Debug.println("## DBFileLoader Failed to find file in Jar, fid=" + loadReq.getFileId() + ", jar=" + jarSeg.getTemporaryFile());
// Set the load status to indicate error
loadSts = StsError;
}
}
catch (Exception ex) {
if ( Debug.EnableError) {
Debug.println("Error in worker thread=" + loadReq.getThreadId());
Debug.println(ex);
}
}
finally {
// Close the Jar entry
if ( jarIn != null) {
try {
jarIn.close();
}
catch (IOException ex) {
}
}
// Close the Jar file
if ( jar != null) {
try {
jar.close();
}
catch (IOException ex) {
}
}
// Close the output file
if ( outFile != null) {
try {
outFile.close();
}
catch (IOException ex) {
Debug.println(ex);
}
}
}
}
// Return the load file status
return loadSts;
}
/**
* Store a file
*
* @param req FileRequest
* @return int
* @exception Exception
*/
public int storeFile(FileRequest req)
throws Exception {
// Check for a single file request
int saveSts = StsError;
if ( req instanceof SingleFileRequest) {
// Process a single file save request
saveSts = storeSingleFile((SingleFileRequest) req);
}
// Check for a multi file request
else if ( req instanceof MultipleFileRequest) {
// Process a multi file save request
saveSts = storeMultipleFile((MultipleFileRequest) req);
}
else {
// Unknown request type
if ( Debug.EnableError)
Debug.println("## DBFileLoader Unknown save type - " + req.getClass().getName());
}
// Return the data save status
return saveSts;
}
/**
* Process a store single file request
*
* @param saveReq SingleFileRequest
* @return int
* @throws Exception
*/
protected final int storeSingleFile(SingleFileRequest saveReq)
throws Exception {
// DEBUG
long startTime = 0L;
if ( Debug.EnableInfo && hasDebug()) {
Debug.println("## DBFileLoader storeFile() req=" + saveReq.toString() + ", thread=" + Thread.currentThread().getName());
startTime = System.currentTimeMillis();
}
// Check if the temporary file still exists, if not then the file has been deleted from the filesystem
File tempFile = new File(saveReq.getTemporaryFile());
FileSegment fileSeg = findFileSegmentForPath(saveReq.getVirtualPath());
if ( tempFile.exists() == false || fileSeg == null) {
// DEBUG
if ( Debug.EnableInfo && hasDebug())
Debug.println(" Temporary file deleted");
// Return an error status
return StsError;
}
// Run any file store processors
runFileStoreProcessors(m_dbCtx, saveReq.getFileState(), fileSeg);
// Get the temporary file size
long fileSize = tempFile.length();
// DEBUG
if ( Debug.EnableInfo && hasDebug())
Debug.println("## DBFileLoader fileSize=" + fileSize);
// Update the segment status, and clear the updated flag
fileSeg.setStatus(FileSegmentInfo.Saving);
fileSeg.getInfo().setUpdated(false);
try {
// Save the file data to the database
getDBDataInterface().saveFileData(saveReq.getFileId(), saveReq.getStreamId(), fileSeg);
}
catch (DBException ex) {
Debug.println(ex);
}
catch (IOException ex) {
Debug.println(ex);
}
// DEBUG
if ( Debug.EnableInfo && hasDebug()) {
long endTime = System.currentTimeMillis();
Debug.println("## DBFileLoader saved file=" + saveReq.toString() + ", time=" + (endTime-startTime) + "ms");
}
// Update the segment status
fileSeg.setStatus(FileSegmentInfo.Saved, false);
// Indicate that the file save request was processed
return StsSuccess;
}
/**
* Process a store multiple file request
*
* @param saveReq MultipleFileRequest
* @return int
* @throws Exception
*/
protected final int storeMultipleFile(MultipleFileRequest saveReq)
throws Exception {
// Create the Jar file and pack all temporary files
File jarFile = null;
JarOutputStream outJar = null;
try {
// Create the Jar file in the temporary cache area
jarFile = File.createTempFile("JAR_", ".jar", getCurrentTempDirectory());
FileOutputStream outFile = new FileOutputStream(jarFile);
outJar = new JarOutputStream(outFile);
// Set the Jar compression level (0=no compression, 9=highest compression)
outJar.setLevel(getJarCompressionLevel());
// Create a read buffer
byte[] inbuf = new byte[65000];
// Write each temporary file to the Jar file
for ( int i = 0; i < saveReq.getNumberOfFiles(); i++) {
// Get the current temporary file
CachedFileInfo finfo = saveReq.getFileInfo(i);
FileState fstate = finfo.getFileState();
// DEBUG
if ( Debug.EnableInfo && hasDebug())
Debug.println("DBFileLoader storeMultipleFile() info=" + finfo + ", fstate=" + fstate);
if ( fstate != null && fstate.fileExists() == true) {
// Create a Jar entry for the temporary file and write the entry to the Jar file
String entryName = null;
if ( finfo.getStreamId() > 0)
entryName = getTempFilePrefix() + finfo.getFileId() + "_" + finfo.getStreamId() + ".tmp";
else
entryName = getTempFilePrefix() + finfo.getFileId() + ".tmp";
JarEntry jarEntry = new JarEntry(entryName);
outJar.putNextEntry(jarEntry);
// Open the temporary file
FileInputStream tempFile = new FileInputStream(finfo.getTemporaryPath());
// Write the temporary file data to the Jar file
int rdlen = tempFile.read(inbuf);
while ( rdlen > 0) {
outJar.write(inbuf,0,rdlen);
rdlen = tempFile.read(inbuf);
}
// Close the temporary file, close the Jar file entry
tempFile.close();
outJar.closeEntry();
}
else if ( Debug.EnableInfo && hasDebug()) {
// DEBUG
Debug.println("## DBFileLoader storeMultipleFile() ignored file " + finfo.getTemporaryPath() + ", exists=false");
}
}
}
catch ( IOException ex) {
Debug.println(ex);
}
finally {
// Close the Jar file
if ( outJar != null) {
try {
outJar.close();
}
catch (Exception ex) {
}
}
}
// Write the Jar file to the database
int saveSts = StsRequeue;
try {
// Create a list of the files/streams contained in the Jar file
DBDataDetailsList fileList = new DBDataDetailsList();
for ( int i = 0; i < saveReq.getNumberOfFiles(); i++) {
// Get the current cached file
CachedFileInfo finfo = saveReq.getFileInfo(i);
// Add details of the file/stream to the Jar file list
fileList.addFile(new DBDataDetails(finfo.getFileId(), finfo.getStreamId()));
}
// Save the Jar file data to the database
getDBDataInterface().saveJarData( jarFile.getAbsolutePath(), fileList);
// Indicate that the database update was successful
saveSts = StsSuccess;
// Delete the temporary Jar file
if ( hasKeepJars() == false)
jarFile.delete();
// Update the file segment state for all files in the transaction
for ( int i = 0; i < saveReq.getNumberOfFiles(); i++) {
// Get the current cached file
CachedFileInfo finfo = saveReq.getFileInfo(i);
// Clear the cached file state
if ( finfo.hasFileState()) {
FileSegmentInfo fileSegInfo = (FileSegmentInfo) finfo.getFileState().findAttribute(DBFileSegmentInfo);
if ( fileSegInfo != null) {
fileSegInfo.setQueued(false);
fileSegInfo.setUpdated(false);
fileSegInfo.setStatus(FileSegmentInfo.Saved);
}
}
}
}
catch (DBException ex) {
Debug.println(ex);
}
catch (IOException ex) {
Debug.println(ex);
}
// Return the data save status
return saveSts;
}
/**
* Initialize the file loader using the specified parameters
*
* @param params ConfigElement
* @param ctx DeviceContext
* @exception FileLoaderException
* @exception IOException
*/
public void initializeLoader(ConfigElement params, DeviceContext ctx)
throws FileLoaderException, IOException {
// Debug output enable
if ( params.getChild("Debug") != null)
m_debug = true;
// Get the count of worker threads to create
ConfigElement nameVal = params.getChild("ThreadPoolSize");
if ( nameVal != null && (nameVal.getValue() == null || nameVal.getValue().length() == 0))
throw new FileLoaderException("FileLoader ThreadPoolSize parameter is null");
// Convert the thread pool size parameter, or use the default value
m_readWorkers = DefaultWorkerThreads;
m_writeWorkers = DefaultWorkerThreads;
if ( nameVal != null) {
try {
// Check for a single value or split read/write values
String numVal = nameVal.getValue();
int rdCnt = -1;
int wrtCnt = -1;
int pos = numVal.indexOf(':');
if ( pos == -1) {
// Use the same number of read and write worker threads
rdCnt = Integer.parseInt(numVal);
wrtCnt = rdCnt;
}
else {
// Split the string value into read and write values, and convert to integers
String val = numVal.substring(0,pos);
rdCnt = Integer.parseInt(val);
val = numVal.substring(pos + 1);
wrtCnt = Integer.parseInt(val);
}
// Set the read/write thread pool sizes
m_readWorkers = rdCnt;
m_writeWorkers = wrtCnt;
}
catch (NumberFormatException ex) {
throw new FileLoaderException("DBFileLoader Invalid ThreadPoolSize value, " + ex.toString());
}
}
// Range check the thread pool size
if ( m_readWorkers < MinimumWorkerThreads || m_readWorkers > MaximumWorkerThreads)
throw new FileLoaderException("DBFileLoader Invalid ThreadPoolSize (read), valid range is " + MinimumWorkerThreads + "-" + MaximumWorkerThreads);
if ( m_writeWorkers < MinimumWorkerThreads || m_writeWorkers > MaximumWorkerThreads)
throw new FileLoaderException("DBFileLoader Invalid ThreadPoolSize (write), valid range is " + MinimumWorkerThreads + "-" + MaximumWorkerThreads);
// Get the temporary file data directory
ConfigElement tempArea = params.getChild("TempDirectory");
if ( tempArea == null || tempArea.getValue() == null || tempArea.getValue().length() == 0)
throw new FileLoaderException("FileLoader TempDirectory not specified or null");
// Validate the temporary directory
m_tempDirName = tempArea.getValue();
if ( m_tempDirName != null && m_tempDirName.endsWith(File.separator) == false)
m_tempDirName = m_tempDirName + File.separator;
m_tempDir = new File(m_tempDirName);
if ( m_tempDir.exists() == false || m_tempDir.isDirectory() == false)
throw new FileLoaderException("FileLoader TempDirectory does not exist, or is not a directory, " + m_tempDirName);
if ( m_tempDir.canWrite() == false)
throw new FileLoaderException("FileLoader TempDirectory is not writeable, " + m_tempDirName);
// Create the starting temporary sub-directory
createNewTempDirectory();
// Check if the maxmimum files per sub-directory has been specified
ConfigElement maxFiles = params.getChild("MaximumFilesPerDirectory");
if ( maxFiles != null) {
try {
m_tempMax = Integer.parseInt(maxFiles.getValue());
// Range check the maximum files per sub-directory
if ( m_tempMax < 10 || m_tempMax > 20000)
throw new FileLoaderException("FileLoader MaximumFilesPerDirectory out of valid range (10-20000)");
}
catch (NumberFormatException ex) {
throw new FileLoaderException("FileLoader MaximumFilesPerDirectory invalid, " + maxFiles.getValue());
}
}
else
m_tempMax = MaximumFilesPerSubDir;
// Check if transaction support should be enabled. If enabled small files are bundled together into a single
// file request for special processing by the file loader storeFile() method.
nameVal = params.getChild("SmallFileSize");
if ( nameVal != null) {
// Use the default settings unless specified
m_sizePerJar = DefaultSizePerJar;
m_filesPerJar = DefaultFilesPerJar;
// Parse/validate the small file size parameter
try {
// Convert the small file size
m_smallFileSize = MemorySize.getByteValue(nameVal.getValue());
// Range check the small file size
if ( m_smallFileSize < 0)
throw new FileLoaderException("Invalid small file size value, " + nameVal.getValue());
}
catch (NumberFormatException ex) {
throw new FileLoaderException("Invalid small file size value, " + nameVal.getValue());
}
// Check if the files per Jar setting has been specified
nameVal = params.getChild("FilesPerJar");
if ( nameVal != null) {
try {
// Convert the files per Jar value
m_filesPerJar = Integer.parseInt(nameVal.getValue());
// Range check the files per Jar value
if ( m_filesPerJar < MinimumFilesPerJar)
throw new FileLoaderException("Files per jar setting is below minimum of " + MinimumFilesPerJar);
}
catch (NumberFormatException ex) {
throw new FileLoaderException("Invalid files per Jar setting, " + nameVal.getValue());
}
}
// Check if the size per Jar setting has been specified
nameVal = params.getChild("SizePerJar");
if ( nameVal != null) {
try {
// Convert the size per Jar value
m_sizePerJar = MemorySize.getByteValueInt(nameVal.getValue());
// Range check the size per Jar value
if ( m_sizePerJar < MinimumSizePerJar)
throw new FileLoaderException("Size per jar setting is below minimum of " + MinimumSizePerJar);
}
catch (NumberFormatException ex) {
throw new FileLoaderException("Invalid size per Jar setting, " + nameVal.getValue());
}
}
// Check if the transaction timeout setting has been specified
m_tranTimeout = DefaultTransactionTimeout;
nameVal = params.getChild("TransactionTimeout");
if ( nameVal != null) {
try {
// Convert the transaction timeout value
m_tranTimeout = Long.parseLong(nameVal.getValue()) * 1000L;
// Range check the transaction timeout value
if ( m_tranTimeout < MinimumTransactionTimeout || m_tranTimeout > MaximumTransactionTimeout)
throw new FileLoaderException("Invalid transaction timeout value, out of valid range, " + nameVal.getValue());
}
catch (NumberFormatException ex) {
throw new FileLoaderException("Invalid transaction timeout value, " + nameVal.getValue());
}
}
}
// Check if there are any file processors configured
ConfigElement fileProcs = params.getChild("FileProcessors");
if ( fileProcs != null && fileProcs.hasChildren()) {
// Validate the file processor classes and add to the file loader
List<ConfigElement> procList = fileProcs.getChildren();
for ( ConfigElement procElem : procList) {
// Get the current file processor class name
if ( procElem.getValue() == null || procElem.getValue().length() == 0)
throw new FileLoaderException("Empty file processor class name");
// Validate the file processor class name and create an instance of the file processor
try {
// Create the file processor instance
Object procObj = Class.forName(procElem.getValue()).newInstance();
// Check that it is a file processor implementation
if ( procObj instanceof FileProcessor) {
// Add to the list of file processors
addFileProcessor((FileProcessor) procObj);
}
else
throw new FileLoaderException("Class " + procElem.getValue() + " is not a FileProcessor implementation");
}
catch (ClassNotFoundException ex) {
throw new FileLoaderException("File processor class not found, " + procElem.getValue());
}
catch (InstantiationException ex) {
throw new FileLoaderException("File processor exception, " + ex.toString());
}
catch (IllegalAccessException ex) {
throw new FileLoaderException("File processor exception, " + ex.toString());
}
}
}
// Check if the fragment size has been specified
ConfigElement nv = params.getChild("FragmentSize");
if ( nv != null) {
// Set the file data fragment size
m_fragSize = MemorySize.getByteValue(nv.getValue());
// Range check the value
if ( m_fragSize < MIN_FRAGSIZE || m_fragSize > MAX_FRAGSIZE)
throw new FileLoaderException("FragmentSize is out of valid range (64K - 20Mb");
}
// Check if transaction request Jar files should be kept in the temporary area
nv = params.getChild("KeepJars");
if ( nv != null)
m_keepJars = true;
// Check if the Jar compression level has been specified
m_jarCompressLevel = JarDefaultCompression;
nv = params.getChild("JarCompressionLevel");
if ( nv != null) {
try {
// Convert the compression level value
m_jarCompressLevel = Integer.parseInt(nv.getValue());
// Check if the compression level is valid
if ( m_jarCompressLevel < 0 || m_jarCompressLevel > 9)
throw new FileLoaderException("Invalid Jar compression level, valid range is 0 - 9");
}
catch (NumberFormatException ex) {
throw new FileLoaderException("Invalid Jar compression level, " + nv.getValue());
}
}
// Check if the database interface being used supports the required features
DBQueueInterface dbQueue = null;
if ( ctx instanceof DBDeviceContext) {
// Access the database device context
m_dbCtx = (DBDeviceContext) ctx;
// Check if the request queue is supported by the database interface
if ( getContext().getDBInterface().supportsFeature(DBInterface.FeatureQueue) == false)
throw new FileLoaderException("DBLoader requires queue support in database interface");
if ( getContext().getDBInterface() instanceof DBQueueInterface)
dbQueue = (DBQueueInterface) getContext().getDBInterface();
else
throw new FileLoaderException("Database interface does not implement queue interface");
// Check if the data store feature is supported by the database interface
if ( getContext().getDBInterface().supportsFeature(DBInterface.FeatureData) == false)
throw new FileLoaderException("DBLoader requires data support in database interface");
if ( getContext().getDBInterface() instanceof DBDataInterface)
m_dbDataInterface = (DBDataInterface) getContext().getDBInterface();
else
throw new FileLoaderException("Database interface does not implement data interface");
// Check if the Jar data store feature is supported by the database interface, if Jar files are enabled
if ( getSmallFileSize() > 0 && getContext().getDBInterface().supportsFeature(DBInterface.FeatureJarData) == false)
throw new FileLoaderException("DBLoader requires Jar data support in database interface");
}
else
throw new FileLoaderException("Requires database device context");
// Get the file state cache from the context
m_stateCache = getContext().getStateCache();
// Add the file loader as a file state listener so that we can cleanup temporary data files
m_stateCache.addStateListener(this);
// Check if background loader debug is enabled
boolean bgDebug = false;
if ( params.getChild("ThreadDebug") != null)
bgDebug = true;
// Perform a queue cleanup before starting the thread pool. This will check the temporary cache area and delete
// files that are not part of a queued save/transaction save request.
FileRequestQueue recoveredQueue = null;
try {
// Cleanup the temporary cache area and queue
recoveredQueue = dbQueue.performQueueCleanup(m_tempDir, TempDirPrefix, TempFilePrefix, JarFilePrefix);
// DEBUG
if ( recoveredQueue != null && Debug.EnableInfo && hasDebug())
Debug.println("[DBLoader] Cleanup recovered " + recoveredQueue.numberOfRequests() + " pending save files");
}
catch (DBException ex) {
// DEBUG
if ( Debug.EnableError && hasDebug())
Debug.println(ex);
}
// Check if there are any file save requests pending in the queue database
FileRequestQueue saveQueue = new FileRequestQueue();
try {
dbQueue.loadFileRequests( 1, FileRequest.SAVE, saveQueue, 1);
dbQueue.loadFileRequests(1, FileRequest.TRANSSAVE, saveQueue, 1);
}
catch ( DBException ex) {
}
// Create the background load/save thread pool
m_backgroundLoader = new BackgroundLoadSave("DBLdr", dbQueue, m_stateCache, this);
m_backgroundLoader.setReadWorkers(m_readWorkers);
m_backgroundLoader.setWriteWorkers(m_writeWorkers);
m_backgroundLoader.setDebug(bgDebug);
// Start the worker thread pool
m_backgroundLoader.startThreads( saveQueue.numberOfRequests());
// Check if transactions are enabled, if so then start the transaction timer thread
if ( getSmallFileSize() > 0) {
// Enable the transaction loader in the background loader
m_backgroundLoader.enableTransactions();
// Create the transaction timer thread to flush incomplete transaction requests
m_transTimer = new TransactionTimer("DBLdrTransTimer", getTransactionTimeout());
}
}
/**
* Shutdown the file loader and release all resources
*
* @param immediate boolean
*/
public void shutdownLoader(boolean immediate) {
// Shutdown the background load/save thread pool
if ( m_backgroundLoader != null)
m_backgroundLoader.shutdownThreads();
// Shutdown the transaction timer thread, if active
if ( m_transTimer != null)
m_transTimer.shutdownRequest();
}
/**
* Run the file store file processors
*
* @param context DiskDeviceContext
* @param state FileState
* @param segment FileSegment
*/
protected final void runFileStoreProcessors(DiskDeviceContext context, FileState state, FileSegment segment) {
// Check if there are any file processors configured
if ( m_fileProcessors == null || m_fileProcessors.numberOfProcessors() == 0)
return;
try {
// Run all of the file store processors
for ( int i = 0; i < m_fileProcessors.numberOfProcessors(); i++) {
// Get the current file processor
FileProcessor fileProc = m_fileProcessors.getProcessorAt(i);
// Run the file processor
fileProc.processStoredFile(context, state, segment);
}
// Make sure the file segment is closed after processing
segment.closeFile();
}
catch (Exception ex) {
// DEBUG
if ( Debug.EnableError && hasDebug()) {
Debug.println("$$ Store file processor exception");
Debug.println(ex);
}
}
}
/**
* Run the file load file processors
*
* @param context DiskDeviceContext
* @param state FileState
* @param segment FileSegment
*/
protected final void runFileLoadedProcessors(DiskDeviceContext context, FileState state, FileSegment segment) {
// Check if there are any file processors configured
if ( m_fileProcessors == null || m_fileProcessors.numberOfProcessors() == 0)
return;
try {
// Run all of the file load processors
for ( int i = 0; i < m_fileProcessors.numberOfProcessors(); i++) {
// Get the current file processor
FileProcessor fileProc = m_fileProcessors.getProcessorAt(i);
// Run the file processor
fileProc.processLoadedFile(context, state, segment);
}
// Make sure the file segment is closed after processing
segment.closeFile();
}
catch (Exception ex) {
// DEBUG
if ( Debug.EnableError && hasDebug()) {
Debug.println("$$ Load file processor exception");
Debug.println(ex);
}
}
}
/**
* Re-create, or attach, a file request to the file state.
*
* @param fid int
* @param tempPath String
* @param virtPath String
* @param sts int
* @return FileState
*/
protected final FileState createFileStateForRequest(int fid, String tempPath, String virtPath, int sts) {
// Find, or create, the file state for the file/directory
FileState state = m_stateCache.findFileState(virtPath, false);
if ( state == null) {
// Create a new file state for the path
state = m_stateCache.findFileState(virtPath, true);
synchronized ( state) {
// Prevent the file state from expiring whilst the request is queued against it
state.setExpiryTime(FileState.NoTimeout);
// Indicate that the file exists, set the unique file id
state.setFileStatus( FileStatus.FileExists);
state.setFileId(fid);
// Check if the file segment has been attached to the file state
FileSegmentInfo fileSegInfo = (FileSegmentInfo) state.findAttribute(DBFileSegmentInfo);
FileSegment fileSeg = null;
if ( fileSegInfo == null) {
// Create a new file segment
fileSegInfo = new FileSegmentInfo();
fileSegInfo.setTemporaryFile(tempPath);
fileSeg = new FileSegment(fileSegInfo, true);
fileSeg.setStatus(sts, true);
// Add the segment to the file state cache
state.addAttribute(DBFileSegmentInfo, fileSegInfo);
}
else {
// Make sure the file segment indicates its part of a queued request
fileSeg = new FileSegment(fileSegInfo, true);
fileSeg.setStatus(sts, true);
}
}
}
// Return the file state
return state;
}
/**
* Find the file segment for the specified virtual path
*
* @param virtPath String
* @return FileSegment
*/
protected final FileSegment findFileSegmentForPath(String virtPath) {
// Get the file state for the virtual path
FileState fstate = m_stateCache.findFileState(virtPath, false);
if ( fstate == null)
return null;
// Get the file segment
FileSegmentInfo segInfo = null;
FileSegment fileSeg = null;
synchronized (fstate) {
// Get the associated file segment
segInfo = (FileSegmentInfo) fstate.findAttribute(DBFileSegmentInfo);
fileSeg = new FileSegment(segInfo, true);
}
// Return the file segment
return fileSeg;
}
/**
* Determine if the loader supports NTFS streams
*
* @return boolean
*/
public boolean supportsStreams() {
// Check if the database implementation supports the NTFS streams feature
if ( getContext() != null)
return getContext().getDBInterface().supportsFeature(DBInterface.FeatureNTFS);
return true;
}
/**
* Create a new temporary sub-directory
*/
private final void createNewTempDirectory() {
// Create the starting temporary sub-directory
m_curTempName = m_tempDirName + getTempDirectoryPrefix() + m_curTempIdx++;
m_curTempDir = new File(m_curTempName);
if ( m_curTempDir.exists() == false)
m_curTempDir.mkdir();
// Clear the temporary file count
m_tempCount = 0;
// DEBUG
if ( Debug.EnableInfo && hasDebug())
Debug.println("DBFileLoader Created new temp directory - " + m_curTempName);
}
/**
* File state has expired. The listener can control whether the file state is removed
* from the cache, or not.
*
* @param state FileState
* @return true to remove the file state from the cache, or false to leave the file state in the cache
*/
public boolean fileStateExpired(FileState state) {
// Check if the file state has an associated file segment
FileSegmentInfo segInfo = (FileSegmentInfo) state.findAttribute(DBFileSegmentInfo);
boolean expire = true;
if ( segInfo != null) {
// Check if the file has a request queued
if ( segInfo.isQueued() == false) {
try {
// Delete the temporary file and reset the segment status so that the data may be loaded again
// if required.
if ( segInfo.hasStatus() != FileSegmentInfo.Initial) {
// Delete the temporary file
try {
segInfo.deleteTemporaryFile();
}
catch (IOException ex) {
// DEBUG
if ( Debug.EnableError) {
Debug.println("Delete temp file error: " + ex.toString());
File tempFile = new File(segInfo.getTemporaryFile());
Debug.println(" TempFile file=" + tempFile.getAbsolutePath() + ", exists=" + tempFile.exists());
Debug.println(" FileState state=" + state);
Debug.println(" FileSegmentInfo segInfo=" + segInfo);
Debug.println(" StateCache size=" + m_stateCache.numberOfStates());
}
}
// Remove the file segment, reset the file segment back to the initial state
state.removeAttribute(DBFileSegmentInfo);
segInfo.setStatus(FileSegmentInfo.Initial);
// Reset the file state to indicate file data load required
state.setStatus(FileState.FILE_LOADWAIT);
// Check if the temporary file sub-directory is now empty, and it is not the current temporary sub-directory
if ( segInfo.getTemporaryFile().startsWith(m_curTempName) == false) {
// Check if the sub-directory is empty
File tempFile = new File(segInfo.getTemporaryFile());
File subDir = tempFile.getParentFile();
String[] files = subDir.list();
if ( files == null || files.length == 0)
subDir.delete();
}
// Indicate that the file state should not be deleted
expire = false;
// Debug
if ( Debug.EnableInfo && hasDebug())
Debug.println("$$ Deleted temporary file " + segInfo.getTemporaryFile() + " [EXPIRED] $$");
}
// If the file state is not to be deleted reset the file state expiration timer
if ( expire == false)
state.setExpiryTime(System.currentTimeMillis() + getContext().getCacheTimeout());
}
catch ( Exception ex) {
// DEBUG
if ( Debug.EnableError) {
Debug.println("$$ " + ex.toString());
Debug.println(" state=" + state);
}
}
}
else {
// File state is queued, do not expire
expire = false;
}
}
else if ( state.isDirectory()) {
// Nothing to do when it's a directory, just allow it to expire
expire = true;
}
// Return true if the file state can be expired
return expire;
}
/**
* File state cache is closing down, any resources attached to the file state must be released.
*
* @param state FileState
*/
public void fileStateClosed(FileState state) {
// DEBUG
if ( state == null) {
Debug.println("%%%%% FileLoader.fileStateClosed() state=NULL %%%%%");
return;
}
// Check if the file state has an associated file
FileSegmentInfo segInfo = (FileSegmentInfo) state.findAttribute(DBFileSegmentInfo);
if ( segInfo != null && segInfo.isQueued() == false && segInfo.hasStatus() != FileSegmentInfo.SaveWait) {
try {
// Delete the temporary file
segInfo.deleteTemporaryFile();
// Debug
if ( Debug.EnableInfo && hasDebug())
Debug.println("$$ Deleted temporary file " + segInfo.getTemporaryFile() + " [CLOSED] $$");
}
catch ( IOException ex) {
}
}
}
/**
* Create a file segment to load/save the file data
*
* @param state FileState
* @param params FileOpenParams
* @param fname String
* @param fid int
* @param stid int
* @param did int
* @return CachedNetworkFile
* @exception IOException
*/
private final CachedNetworkFile createNetworkFile(FileState state, FileOpenParams params, String fname, int fid,
int stid, int did)
throws IOException {
// The file state is used to synchronize the creation of the file segment as there may be other
// sessions opening the file at the same time. We have to be careful that only one thread creates the
// file segment.
FileSegment fileSeg = null;
MemorySegmentList memList = null;
CachedNetworkFile netFile = null;
synchronized ( state) {
// Check if the file segment has been attached to the file state
FileSegmentInfo fileSegInfo = (FileSegmentInfo) state.findAttribute(DBFileSegmentInfo);
if ( fileSegInfo == null) {
// Check if we need to create a new temporary sub-drectory
if ( m_tempCount++ >= m_tempMax)
createNewTempDirectory();
// Create a unique temporary file name
StringBuffer tempName = new StringBuffer();
tempName.append(getTempFilePrefix());
tempName.append(fid);
if ( stid > 0) {
tempName.append("_");
tempName.append(stid);
// DEBUG
if ( Debug.EnableInfo)
Debug.println("## Temp file for stream ##");
}
tempName.append(".tmp");
// Create a new file segment
fileSegInfo = new FileSegmentInfo();
fileSeg = FileSegment.createSegment(fileSegInfo, tempName.toString(), m_curTempDir, params.isReadOnlyAccess() == false);
// Add the segment to the file state cache
state.addAttribute(DBFileSegmentInfo, fileSegInfo);
// Check if the file is zero length, if so then set the file segment state to indicate it is available
DBFileInfo finfo = (DBFileInfo) state.findAttribute(FileState.FileInformation);
if ( finfo != null && finfo.getSize() == 0)
fileSeg.setStatus(FileSegmentInfo.Available);
}
else {
// Create the file segment to map to the existing temporary file
fileSeg = new FileSegment(fileSegInfo, params.isReadOnlyAccess() == false);
// Check if the temporary file exists, if not then create it
File tempFile = new File(fileSeg.getTemporaryFile());
if ( tempFile.exists() == false) {
// Create the temporary file
tempFile.createNewFile();
// Reset the file segment state to indicate a file load is required
fileSeg.setStatus(FileSegmentInfo.Initial);
}
}
// Create the new network file
netFile = new CachedNetworkFile(fname, fid, stid, did, state, fileSeg, this);
netFile.setGrantedAccess(params.isReadOnlyAccess() ? NetworkFile.READONLY : NetworkFile.READWRITE);
netFile.setSequentialOnly(params.isSequentialAccessOnly());
netFile.setAttributes(params.getAttributes());
netFile.setFullName(params.getPath());
if ( stid != 0)
netFile.setStreamName(params.getStreamName());
}
// Return the network file
return netFile;
}
/**
* Create a transaction file request
*
* @param req SingleFileRequest
* @param netFile NetworkFile
*/
private final void createTransactionRequest(SingleFileRequest req, NetworkFile netFile) {
synchronized (m_tranLock) {
// Add the current file size to the current transaction size, and file count
m_totFiles++;
int fsize = netFile.getFileSizeInt();
if ( fsize < TransactionMinimumFileSize)
fsize = TransactionMinimumFileSize;
m_totFileSize += fsize;
// Check if a new transaction should be started
boolean lastFile = false;
if ( getFilesPerJar() > 0 && m_totFiles > getFilesPerJar())
lastFile = true;
else if ( getJarFileSize() > 0 && m_totFileSize >= getJarFileSize())
lastFile = true;
// Set the transaction id for the request
req.setTransactionId(m_tranId, lastFile);
// Store the time this file was added to the transaction
m_lastTranFile = System.currentTimeMillis();
// Start a new transaction if this is the last file
if ( lastFile) {
m_tranId++;
m_totFiles = 0;
m_totFileSize = 0;
m_lastTranFile = 0L;
}
}
}
}