Java Examples for org.apache.hadoop.util.GSet

The following java examples will help you to understand the usage of org.apache.hadoop.util.GSet. These source code samples are taken from different open source projects.

Example 1
Project: hadoop-release-2.6.0-master  File: TestCacheDirectives.java View source code
@Override
public Boolean get() {
    int numCachedBlocks = 0, numCachedReplicas = 0;
    namesystem.readLock();
    try {
        GSet<CachedBlock, CachedBlock> cachedBlocks = cacheManager.getCachedBlocks();
        if (cachedBlocks != null) {
            for (Iterator<CachedBlock> iter = cachedBlocks.iterator(); iter.hasNext(); ) {
                CachedBlock cachedBlock = iter.next();
                numCachedBlocks++;
                numCachedReplicas += cachedBlock.getDatanodes(Type.CACHED).size();
            }
        }
    } finally {
        namesystem.readUnlock();
    }
    LOG.info(logString + " cached blocks: have " + numCachedBlocks + " / " + expectedCachedBlocks + ".  " + "cached replicas: have " + numCachedReplicas + " / " + expectedCachedReplicas);
    if (expectedCachedBlocks == -1 || numCachedBlocks == expectedCachedBlocks) {
        if (expectedCachedReplicas == -1 || numCachedReplicas == expectedCachedReplicas) {
            return true;
        }
    }
    return false;
}
Example 2
Project: HDP-2.2-Patched-master  File: INodeMap.java View source code
static INodeMap newInstance(INodeDirectory rootDir) {
    // Compute the map capacity by allocating 1% of total memory
    int capacity = LightWeightGSet.computeCapacity(1, "INodeMap");
    GSet<INode, INodeWithAdditionalFields> map = new LightWeightGSet<INode, INodeWithAdditionalFields>(capacity);
    map.put(rootDir);
    return new INodeMap(map);
}
Example 3
Project: hadaps-master  File: TestCacheDirectives.java View source code
@Override
public Boolean get() {
    int numCachedBlocks = 0, numCachedReplicas = 0;
    namesystem.readLock();
    try {
        GSet<CachedBlock, CachedBlock> cachedBlocks = cacheManager.getCachedBlocks();
        if (cachedBlocks != null) {
            for (Iterator<CachedBlock> iter = cachedBlocks.iterator(); iter.hasNext(); ) {
                CachedBlock cachedBlock = iter.next();
                numCachedBlocks++;
                numCachedReplicas += cachedBlock.getDatanodes(Type.CACHED).size();
            }
        }
    } finally {
        namesystem.readUnlock();
    }
    if (expectedCachedBlocks == -1 || numCachedBlocks == expectedCachedBlocks) {
        if (expectedCachedReplicas == -1 || numCachedReplicas == expectedCachedReplicas) {
            return true;
        }
    }
    LOG.info(logString + " cached blocks: have " + numCachedBlocks + " / " + expectedCachedBlocks + ".  " + "cached replicas: have " + numCachedReplicas + " / " + expectedCachedReplicas);
    return false;
}
Example 4
Project: hadoop-on-lustre2-master  File: TestCacheDirectives.java View source code
@Override
public Boolean get() {
    int numCachedBlocks = 0, numCachedReplicas = 0;
    namesystem.readLock();
    try {
        GSet<CachedBlock, CachedBlock> cachedBlocks = cacheManager.getCachedBlocks();
        if (cachedBlocks != null) {
            for (Iterator<CachedBlock> iter = cachedBlocks.iterator(); iter.hasNext(); ) {
                CachedBlock cachedBlock = iter.next();
                numCachedBlocks++;
                numCachedReplicas += cachedBlock.getDatanodes(Type.CACHED).size();
            }
        }
    } finally {
        namesystem.readUnlock();
    }
    if (expectedCachedBlocks == -1 || numCachedBlocks == expectedCachedBlocks) {
        if (expectedCachedReplicas == -1 || numCachedReplicas == expectedCachedReplicas) {
            return true;
        }
    }
    LOG.info(logString + " cached blocks: have " + numCachedBlocks + " / " + expectedCachedBlocks + ".  " + "cached replicas: have " + numCachedReplicas + " / " + expectedCachedReplicas);
    return false;
}
Example 5
Project: hadoop-master  File: TestCacheDirectives.java View source code
@Override
public Boolean get() {
    int numCachedBlocks = 0, numCachedReplicas = 0;
    namesystem.readLock();
    try {
        GSet<CachedBlock, CachedBlock> cachedBlocks = cacheManager.getCachedBlocks();
        if (cachedBlocks != null) {
            for (Iterator<CachedBlock> iter = cachedBlocks.iterator(); iter.hasNext(); ) {
                CachedBlock cachedBlock = iter.next();
                numCachedBlocks++;
                numCachedReplicas += cachedBlock.getDatanodes(Type.CACHED).size();
            }
        }
    } finally {
        namesystem.readUnlock();
    }
    LOG.info(logString + " cached blocks: have " + numCachedBlocks + " / " + expectedCachedBlocks + ".  " + "cached replicas: have " + numCachedReplicas + " / " + expectedCachedReplicas);
    if (expectedCachedBlocks == -1 || numCachedBlocks == expectedCachedBlocks) {
        if (expectedCachedReplicas == -1 || numCachedReplicas == expectedCachedReplicas) {
            return true;
        }
    }
    return false;
}