Java Examples for org.apache.hadoop.fs.permission.FsPermission
The following java examples will help you to understand the usage of org.apache.hadoop.fs.permission.FsPermission. These source code samples are taken from different open source projects.
Example 1
| Project: accumulo-master File: VolumeManagerImpl.java View source code |
@Override
public FSDataOutputStream createSyncable(Path logPath, int bufferSize, short replication, long blockSize) throws IOException {
Volume v = getVolumeByPath(logPath);
FileSystem fs = v.getFileSystem();
blockSize = correctBlockSize(fs.getConf(), blockSize);
bufferSize = correctBufferSize(fs.getConf(), bufferSize);
EnumSet<CreateFlag> set = EnumSet.of(CreateFlag.SYNC_BLOCK, CreateFlag.CREATE);
log.debug("creating " + logPath + " with CreateFlag set: " + set);
try {
return fs.create(logPath, FsPermission.getDefault(), set, bufferSize, replication, blockSize, null);
} catch (Exception ex) {
log.debug("Exception", ex);
return fs.create(logPath, true, bufferSize, replication, blockSize);
}
}Example 2
| Project: datacollector-master File: TestHDFSTargetWholeFile.java View source code |
@Test
public void testWholeFilePermission() throws Exception {
java.nio.file.Path filePath1 = Paths.get(getTestDir() + "/source_testWholeFilePermissionFiles1.txt");
java.nio.file.Path filePath2 = Paths.get(getTestDir() + "/source_testWholeFilePermissionFiles2.txt");
java.nio.file.Path filePath3 = Paths.get(getTestDir() + "/source_testWholeFilePermissionFiles3.txt");
Files.write(filePath1, "This is a sample file 1 with some text".getBytes());
Files.write(filePath2, "This is a sample file 2 with some text".getBytes());
Files.write(filePath3, "This is a sample file 3 with some text".getBytes());
HdfsTarget hdfsTarget = HdfsTargetUtil.newBuilder().hdfsUri(uri.toString()).dirPathTemplate(getTestDir()).timeDriver("${time:now()}").dataForamt(DataFormat.WHOLE_FILE).fileType(HdfsFileType.WHOLE_FILE).fileNameEL("${record:value('/fileInfo/filename')}").maxRecordsPerFile(1).maxFileSize(0).uniquePrefix("sdc-").idleTimeout("-1").permissionEL("${record:value('/fileInfo/permissions')}").lateRecordsAction(LateRecordsAction.SEND_TO_LATE_RECORDS_FILE).build();
TargetRunner runner = new TargetRunner.Builder(HdfsDTarget.class, hdfsTarget).setOnRecordError(OnRecordError.STOP_PIPELINE).build();
runner.runInit();
try {
runner.runWrite(Arrays.asList(getFileRefRecordForFile(filePath1, "755"), //posix style
getFileRefRecordForFile(filePath2, "rwxr--r--"), //unix style
getFileRefRecordForFile(filePath3, "-rw-rw----")));
org.apache.hadoop.fs.Path targetPath1 = new org.apache.hadoop.fs.Path(getTestDir() + "/sdc-" + filePath1.getFileName());
org.apache.hadoop.fs.Path targetPath2 = new org.apache.hadoop.fs.Path(getTestDir() + "/sdc-" + filePath2.getFileName());
org.apache.hadoop.fs.Path targetPath3 = new org.apache.hadoop.fs.Path(getTestDir() + "/sdc-" + filePath3.getFileName());
FileSystem fs = FileSystem.get(uri, new HdfsConfiguration());
Assert.assertTrue(fs.exists(targetPath1));
Assert.assertTrue(fs.exists(targetPath2));
Assert.assertTrue(fs.exists(targetPath3));
FsPermission actual1 = fs.listStatus(targetPath1)[0].getPermission();
FsPermission actual2 = fs.listStatus(targetPath2)[0].getPermission();
FsPermission actual3 = fs.listStatus(targetPath3)[0].getPermission();
FsPermission expected1 = new FsPermission("755");
FsPermission expected2 = FsPermission.valueOf("-rwxr--r--");
FsPermission expected3 = FsPermission.valueOf("-rw-rw----");
Assert.assertEquals(expected1, actual1);
Assert.assertEquals(expected2, actual2);
Assert.assertEquals(expected3, actual3);
} finally {
runner.runDestroy();
}
}Example 3
| Project: hadoop-20-master File: TestKillSubProcesses.java View source code |
void runTests(JobConf conf, JobTracker jt) throws IOException {
FileSystem fs = FileSystem.getLocal(mr.createJobConf());
Path rootDir = new Path(TEST_ROOT_DIR);
if (!fs.exists(rootDir)) {
fs.mkdirs(rootDir);
}
fs.setPermission(rootDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
runKillingJobAndValidate(jt, conf);
runFailingJobAndValidate(jt, conf);
runSuccessfulJobAndValidate(jt, conf);
}Example 4
| Project: HadoopUSC-master File: TestKillSubProcesses.java View source code |
void runTests(JobConf conf, JobTracker jt) throws IOException {
FileSystem fs = FileSystem.getLocal(mr.createJobConf());
Path rootDir = new Path(TEST_ROOT_DIR);
if (!fs.exists(rootDir)) {
fs.mkdirs(rootDir);
}
fs.setPermission(rootDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
runKillingJobAndValidate(jt, conf);
runFailingJobAndValidate(jt, conf);
runSuccessfulJobAndValidate(jt, conf);
}Example 5
| Project: RDFS-master File: DistCh.java View source code |
private boolean isDifferent(FileStatus original) {
if (owner != null && !owner.equals(original.getOwner())) {
return true;
}
if (group != null && !group.equals(original.getGroup())) {
return true;
}
if (permission != null) {
FsPermission orig = original.getPermission();
return original.isDir() ? !permission.equals(orig) : !permission.applyUMask(FILE_UMASK).equals(orig);
}
return false;
}Example 6
| Project: cdh3u3-with-mesos-master File: TestTaskTrackerInfoSuccessfulFailedJobs.java View source code |
//This creates the input directories in the dfs
private void createInput(Path inDir, Configuration conf) throws IOException {
String input = "Hadoop is framework for data intensive distributed " + "applications.\n" + "Hadoop enables applications to work with thousands of nodes.";
FileSystem fs = inDir.getFileSystem(conf);
if (!fs.mkdirs(inDir)) {
throw new IOException("Failed to create the input directory:" + inDir.toString());
}
fs.setPermission(inDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
DataOutputStream file = fs.create(new Path(inDir, "data.txt"));
int i = 0;
while (i < 1000 * 3000) {
file.writeBytes(input);
i++;
}
file.close();
}Example 7
| Project: hdfs-cloudera-cdh3u3-production-master File: TestTaskTrackerInfoSuccessfulFailedJobs.java View source code |
//This creates the input directories in the dfs
private void createInput(Path inDir, Configuration conf) throws IOException {
String input = "Hadoop is framework for data intensive distributed " + "applications.\n" + "Hadoop enables applications to work with thousands of nodes.";
FileSystem fs = inDir.getFileSystem(conf);
if (!fs.mkdirs(inDir)) {
throw new IOException("Failed to create the input directory:" + inDir.toString());
}
fs.setPermission(inDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
DataOutputStream file = fs.create(new Path(inDir, "data.txt"));
int i = 0;
while (i < 1000 * 3000) {
file.writeBytes(input);
i++;
}
file.close();
}Example 8
| Project: action-core-master File: HdfsWriter.java View source code |
public URI write(final InputStream inputStream, final String outputPath, final boolean overwrite, final short replication, final long blockSize, final String permission) throws IOException {
final long start = System.nanoTime();
log.info("Writing to HDFS: {}", outputPath);
final Path hdfsPath = new Path(outputPath);
FSDataOutputStream outputStream = null;
int bytesWritten = 0;
try {
new FsPermission(permission);
outputStream = fileSystemAccess.get().create(hdfsPath, new FsPermission(permission), overwrite, ONE_MEG, replication, blockSize, null);
byte[] buffer = new byte[ONE_MEG];
int bytesRead;
while ((bytesRead = inputStream.read(buffer)) > 0) {
outputStream.write(buffer, 0, bytesRead);
bytesWritten += bytesRead;
}
// GC-ready
//noinspection UnusedAssignment
buffer = null;
} finally {
if (outputStream != null) {
outputStream.close();
}
}
final long end = System.nanoTime();
log.info(String.format("Written %.3f Mb in %d sec. to %s", (double) bytesWritten / (1024 * 1024), (end - start) / 1000000000, outputPath));
return hdfsPath.toUri();
}Example 9
| Project: ambari-master File: HdfsApi.java View source code |
/**
* Change permissions
* @param path path
* @param permissions permissions in format rwxrwxrwx
* @throws IOException
* @throws InterruptedException
*/
public boolean chmod(final String path, final String permissions) throws IOException, InterruptedException {
return execute(new PrivilegedExceptionAction<Boolean>() {
public Boolean run() throws Exception {
try {
fs.setPermission(new Path(path), FsPermission.valueOf(permissions));
} catch (Exception ex) {
return false;
}
return true;
}
});
}Example 10
| Project: bigpetstore-master File: TestTaskKilling.java View source code |
private void createInput(Path inDir, Configuration conf) throws IOException {
String input = "Hadoop is framework for data intensive distributed " + "applications.\n" + "Hadoop enables applications to work with thousands of nodes.";
FileSystem fs = inDir.getFileSystem(conf);
if (!fs.mkdirs(inDir)) {
throw new IOException("Failed to create the input directory:" + inDir.toString());
}
fs.setPermission(inDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
DataOutputStream file = fs.create(new Path(inDir, "data.txt"));
int i = 0;
while (i < 1000 * 3000) {
file.writeBytes(input);
i++;
}
file.close();
}Example 11
| Project: commoncrawl-crawler-master File: CrawlDBMergeSortReducer.java View source code |
@Override
public FileStatus getFileStatus(Path f) throws IOException {
// get uri from path ...
URI uri = f.toUri();
// convert to s3 path ..
String key = uri.getPath().substring(1);
System.out.println("***uri path:" + key);
ObjectMetadata metadata = _s3Client.getObjectMetadata(uri.getHost(), key);
if (metadata != null) {
FileStatus fileStatus = new FileStatus(metadata.getContentLength(), false, 1, 0, metadata.getLastModified().getTime(), 0, FsPermission.getDefault(), "", "", f);
return fileStatus;
}
return null;
}Example 12
| Project: gobblin-master File: WriterUtils.java View source code |
/**
* Create the given dir as well as all missing ancestor dirs. All created dirs will have the given permission.
* This should be used instead of {@link FileSystem#mkdirs(Path, FsPermission)}, since that method only sets
* the permission for the given dir, and not recursively for the ancestor dirs.
*
* @param fs FileSystem
* @param path The dir to be created
* @param perm The permission to be set
* @throws IOException if failing to create dir or set permission.
*/
public static void mkdirsWithRecursivePermission(FileSystem fs, Path path, FsPermission perm) throws IOException {
if (fs.exists(path)) {
return;
}
if (path.getParent() != null && !fs.exists(path.getParent())) {
mkdirsWithRecursivePermission(fs, path.getParent(), perm);
}
if (!fs.mkdirs(path, perm)) {
throw new IOException(String.format("Unable to mkdir %s with permission %s", path, perm));
}
// Double check permission, since fs.mkdirs() may not guarantee to set the permission correctly
if (!fs.getFileStatus(path).getPermission().equals(perm)) {
fs.setPermission(path, perm);
}
}Example 13
| Project: hadoop-1.0.3-gpu-master File: TestTaskTrackerInfoSuccessfulFailedJobs.java View source code |
//This creates the input directories in the dfs
private void createInput(Path inDir, Configuration conf) throws IOException {
String input = "Hadoop is framework for data intensive distributed " + "applications.\n" + "Hadoop enables applications to work with thousands of nodes.";
FileSystem fs = inDir.getFileSystem(conf);
if (!fs.mkdirs(inDir)) {
throw new IOException("Failed to create the input directory:" + inDir.toString());
}
fs.setPermission(inDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
DataOutputStream file = fs.create(new Path(inDir, "data.txt"));
int i = 0;
while (i < 1000 * 3000) {
file.writeBytes(input);
i++;
}
file.close();
}Example 14
| Project: hadoop-release-2.6.0-master File: TestLinuxContainerExecutor.java View source code |
@Before
public void setup() throws Exception {
files = FileContext.getLocalFSFileContext();
Path workSpacePath = new Path(workSpace.getAbsolutePath());
files.mkdir(workSpacePath, null, true);
FileUtil.chmod(workSpace.getAbsolutePath(), "777");
File localDir = new File(workSpace.getAbsoluteFile(), "localDir");
files.mkdir(new Path(localDir.getAbsolutePath()), new FsPermission("777"), false);
File logDir = new File(workSpace.getAbsoluteFile(), "logDir");
files.mkdir(new Path(logDir.getAbsolutePath()), new FsPermission("777"), false);
String exec_path = System.getProperty("container-executor.path");
if (exec_path != null && !exec_path.isEmpty()) {
conf = new Configuration(false);
conf.setClass("fs.AbstractFileSystem.file.impl", org.apache.hadoop.fs.local.LocalFs.class, org.apache.hadoop.fs.AbstractFileSystem.class);
appSubmitter = System.getProperty("application.submitter");
if (appSubmitter == null || appSubmitter.isEmpty()) {
appSubmitter = "nobody";
}
conf.set(YarnConfiguration.NM_NONSECURE_MODE_LOCAL_USER_KEY, appSubmitter);
LOG.info("Setting " + YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH + "=" + exec_path);
conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH, exec_path);
exec = new LinuxContainerExecutor();
exec.setConf(conf);
conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_LOG_DIRS, logDir.getAbsolutePath());
dirsHandler = new LocalDirsHandlerService();
dirsHandler.init(conf);
List<String> localDirs = dirsHandler.getLocalDirs();
for (String dir : localDirs) {
Path userDir = new Path(dir, ContainerLocalizer.USERCACHE);
files.mkdir(userDir, new FsPermission("777"), false);
// $local/filecache
Path fileDir = new Path(dir, ContainerLocalizer.FILECACHE);
files.mkdir(fileDir, new FsPermission("777"), false);
}
}
}Example 15
| Project: hadoop-src-research-master File: TestTaskTrackerInfoSuccessfulFailedJobs.java View source code |
//This creates the input directories in the dfs
private void createInput(Path inDir, Configuration conf) throws IOException {
String input = "Hadoop is framework for data intensive distributed " + "applications.\n" + "Hadoop enables applications to work with thousands of nodes.";
FileSystem fs = inDir.getFileSystem(conf);
if (!fs.mkdirs(inDir)) {
throw new IOException("Failed to create the input directory:" + inDir.toString());
}
fs.setPermission(inDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
DataOutputStream file = fs.create(new Path(inDir, "data.txt"));
int i = 0;
while (i < 1000 * 3000) {
file.writeBytes(input);
i++;
}
file.close();
}Example 16
| Project: HadoopEKG-master File: DistCh.java View source code |
private boolean isDifferent(FileStatus original) {
if (owner != null && !owner.equals(original.getOwner())) {
return true;
}
if (group != null && !group.equals(original.getGroup())) {
return true;
}
if (permission != null) {
FsPermission orig = original.getPermission();
return original.isDirectory() ? !permission.equals(orig) : !permission.applyUMask(FILE_UMASK).equals(orig);
}
return false;
}Example 17
| Project: HDP-2.2-Patched-master File: DefaultContainerExecutor.java View source code |
@Override
public int launchContainer(Container container, Path nmPrivateContainerScriptPath, Path nmPrivateTokensPath, String userName, String appId, Path containerWorkDir, List<String> localDirs, List<String> logDirs) throws IOException {
FsPermission dirPerm = new FsPermission(APPDIR_PERM);
ContainerId containerId = container.getContainerId();
// create container dirs on all disks
String containerIdStr = ConverterUtils.toString(containerId);
String appIdStr = ConverterUtils.toString(containerId.getApplicationAttemptId().getApplicationId());
for (String sLocalDir : localDirs) {
Path usersdir = new Path(sLocalDir, ContainerLocalizer.USERCACHE);
Path userdir = new Path(usersdir, userName);
Path appCacheDir = new Path(userdir, ContainerLocalizer.APPCACHE);
Path appDir = new Path(appCacheDir, appIdStr);
Path containerDir = new Path(appDir, containerIdStr);
createDir(containerDir, dirPerm, true);
}
// Create the container log-dirs on all disks
createContainerLogDirs(appIdStr, containerIdStr, logDirs);
Path tmpDir = new Path(containerWorkDir, YarnConfiguration.DEFAULT_CONTAINER_TEMP_DIR);
createDir(tmpDir, dirPerm, false);
// copy launch script to work dir
Path launchDst = new Path(containerWorkDir, ContainerLaunch.CONTAINER_SCRIPT);
lfs.util().copy(nmPrivateContainerScriptPath, launchDst);
// copy container tokens to work dir
Path tokenDst = new Path(containerWorkDir, ContainerLaunch.FINAL_CONTAINER_TOKENS_FILE);
lfs.util().copy(nmPrivateTokensPath, tokenDst);
// Create new local launch wrapper script
LocalWrapperScriptBuilder sb = Shell.WINDOWS ? new WindowsLocalWrapperScriptBuilder(containerIdStr, containerWorkDir) : new UnixLocalWrapperScriptBuilder(containerWorkDir);
// Windows path length limitation.
if (Shell.WINDOWS && sb.getWrapperScriptPath().toString().length() > WIN_MAX_PATH) {
throw new IOException(String.format("Cannot launch container using script at path %s, because it exceeds " + "the maximum supported path length of %d characters. Consider " + "configuring shorter directories in %s.", sb.getWrapperScriptPath(), WIN_MAX_PATH, YarnConfiguration.NM_LOCAL_DIRS));
}
Path pidFile = getPidFilePath(containerId);
if (pidFile != null) {
sb.writeLocalWrapperScript(launchDst, pidFile);
} else {
LOG.info("Container " + containerIdStr + " was marked as inactive. Returning terminated error");
return ExitCode.TERMINATED.getExitCode();
}
// create log dir under app
// fork script
ShellCommandExecutor shExec = null;
try {
lfs.setPermission(launchDst, ContainerExecutor.TASK_LAUNCH_SCRIPT_PERMISSION);
lfs.setPermission(sb.getWrapperScriptPath(), ContainerExecutor.TASK_LAUNCH_SCRIPT_PERMISSION);
// Setup command to run
String[] command = getRunCommand(sb.getWrapperScriptPath().toString(), containerIdStr, this.getConf());
LOG.info("launchContainer: " + Arrays.toString(command));
shExec = new ShellCommandExecutor(command, new File(containerWorkDir.toUri().getPath()), // sanitized env
container.getLaunchContext().getEnvironment());
if (isContainerActive(containerId)) {
shExec.execute();
} else {
LOG.info("Container " + containerIdStr + " was marked as inactive. Returning terminated error");
return ExitCode.TERMINATED.getExitCode();
}
} catch (IOException e) {
if (null == shExec) {
return -1;
}
int exitCode = shExec.getExitCode();
LOG.warn("Exit code from container " + containerId + " is : " + exitCode);
if (exitCode != ExitCode.FORCE_KILLED.getExitCode() && exitCode != ExitCode.TERMINATED.getExitCode()) {
LOG.warn("Exception from container-launch with container ID: " + containerId + " and exit code: " + exitCode, e);
logOutput(shExec.getOutput());
String diagnostics = "Exception from container-launch: \n" + StringUtils.stringifyException(e) + "\n" + shExec.getOutput();
container.handle(new ContainerDiagnosticsUpdateEvent(containerId, diagnostics));
} else {
container.handle(new ContainerDiagnosticsUpdateEvent(containerId, "Container killed on request. Exit code is " + exitCode));
}
return exitCode;
} finally {
//
;
}
return 0;
}Example 18
| Project: hops-master File: TestLinuxContainerExecutor.java View source code |
@Before
public void setup() throws Exception {
files = FileContext.getLocalFSFileContext();
Path workSpacePath = new Path(workSpace.getAbsolutePath());
files.mkdir(workSpacePath, null, true);
FileUtil.chmod(workSpace.getAbsolutePath(), "777");
File localDir = new File(workSpace.getAbsoluteFile(), "localDir");
files.mkdir(new Path(localDir.getAbsolutePath()), new FsPermission("777"), false);
File logDir = new File(workSpace.getAbsoluteFile(), "logDir");
files.mkdir(new Path(logDir.getAbsolutePath()), new FsPermission("777"), false);
String exec_path = System.getProperty("container-executor.path");
if (exec_path != null && !exec_path.isEmpty()) {
conf = new Configuration(false);
conf.setClass("fs.AbstractFileSystem.file.impl", org.apache.hadoop.fs.local.LocalFs.class, org.apache.hadoop.fs.AbstractFileSystem.class);
conf.set(YarnConfiguration.NM_NONSECURE_MODE_LOCAL_USER_KEY, "xuan");
LOG.info("Setting " + YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH + "=" + exec_path);
conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH, exec_path);
exec = new LinuxContainerExecutor();
exec.setConf(conf);
conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_LOG_DIRS, logDir.getAbsolutePath());
dirsHandler = new LocalDirsHandlerService();
dirsHandler.init(conf);
}
appSubmitter = System.getProperty("application.submitter");
if (appSubmitter == null || appSubmitter.isEmpty()) {
appSubmitter = "nobody";
}
}Example 19
| Project: incubator-blur-master File: BlurSerDeTest.java View source code |
@BeforeClass
public static void startCluster() throws IOException {
System.setProperty("hadoop.log.dir", "./target/tmp_BlurSerDeTest_hadoop_log");
GCWatcher.init(0.60);
LocalFileSystem localFS = FileSystem.getLocal(new Configuration());
File testDirectory = new File(TMPDIR, "blur-SerDe-test").getAbsoluteFile();
testDirectory.mkdirs();
Path directory = new Path(testDirectory.getPath());
FsPermission dirPermissions = localFS.getFileStatus(directory).getPermission();
FsAction userAction = dirPermissions.getUserAction();
FsAction groupAction = dirPermissions.getGroupAction();
FsAction otherAction = dirPermissions.getOtherAction();
StringBuilder builder = new StringBuilder();
builder.append(userAction.ordinal());
builder.append(groupAction.ordinal());
builder.append(otherAction.ordinal());
String dirPermissionNum = builder.toString();
System.setProperty("dfs.datanode.data.dir.perm", dirPermissionNum);
testDirectory.delete();
miniCluster = new MiniCluster();
miniCluster.startBlurCluster(new File(testDirectory, "cluster").getAbsolutePath(), 2, 3, true, externalProcesses);
miniCluster.startMrMiniCluster();
}Example 20
| Project: lucene-solr-master File: MorphlineBasicMiniMRTest.java View source code |
@BeforeClass
public static void setupClass() throws Exception {
solrHomeDirectory = createTempDir().toFile();
assumeFalse("HDFS tests were disabled by -Dtests.disableHdfs", Boolean.parseBoolean(System.getProperty("tests.disableHdfs", "false")));
assumeFalse("This test fails on Java 9 (https://issues.apache.org/jira/browse/SOLR-8876)", Constants.JRE_IS_MINIMUM_JAVA9);
assumeFalse("FIXME: This test does not work with Windows because of native library requirements", Constants.WINDOWS);
AbstractZkTestCase.SOLRHOME = solrHomeDirectory;
FileUtils.copyDirectory(MINIMR_CONF_DIR, solrHomeDirectory);
File dataDir = createTempDir().toFile();
tempDir = dataDir.getAbsolutePath();
new File(tempDir).mkdirs();
FileUtils.copyFile(new File(RESOURCES_DIR + "/custom-mimetypes.xml"), new File(tempDir + "/custom-mimetypes.xml"));
AbstractSolrMorphlineTestBase.setupMorphline(tempDir, "test-morphlines/solrCellDocumentTypes", true);
System.setProperty("hadoop.log.dir", new File(solrHomeDirectory, "logs").getAbsolutePath());
int taskTrackers = 1;
int dataNodes = 2;
// String proxyUser = System.getProperty("user.name");
// String proxyGroup = "g";
// StringBuilder sb = new StringBuilder();
// sb.append("127.0.0.1,localhost");
// for (InetAddress i : InetAddress.getAllByName(InetAddress.getLocalHost().getHostName())) {
// sb.append(",").append(i.getCanonicalHostName());
// }
new File(dataDir, "nm-local-dirs").mkdirs();
System.setProperty("solr.hdfs.blockcache.enabled", "false");
System.setProperty("test.build.dir", dataDir + File.separator + "hdfs" + File.separator + "test-build-dir");
System.setProperty("test.build.data", dataDir + File.separator + "hdfs" + File.separator + "build");
System.setProperty("test.cache.data", dataDir + File.separator + "hdfs" + File.separator + "cache");
// Initialize AFTER test.build.dir is set, JarFinder uses it.
SEARCH_ARCHIVES_JAR = JarFinder.getJar(MapReduceIndexerTool.class);
JobConf conf = new JobConf();
conf.set("dfs.block.access.token.enable", "false");
conf.set("dfs.permissions", "true");
conf.set("hadoop.security.authentication", "simple");
conf.set(YarnConfiguration.NM_LOCAL_DIRS, dataDir.getPath() + File.separator + "nm-local-dirs");
conf.set(YarnConfiguration.DEFAULT_NM_LOG_DIRS, dataDir + File.separator + "nm-logs");
conf.set("testWorkDir", dataDir.getPath() + File.separator + "testWorkDir");
conf.set("mapreduce.jobhistory.minicluster.fixed.ports", "false");
conf.set("mapreduce.jobhistory.admin.address", "0.0.0.0:0");
dfsCluster = new MiniDFSCluster(conf, dataNodes, true, null);
FileSystem fileSystem = dfsCluster.getFileSystem();
fileSystem.mkdirs(new Path("/tmp"));
fileSystem.mkdirs(new Path("/user"));
fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
String nnURI = fileSystem.getUri().toString();
int numDirs = 1;
String[] racks = null;
String[] hosts = null;
mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}Example 21
| Project: multireducers-master File: MultiIT.java View source code |
public void testExample() throws Exception {
ExampleRunner exampleRunner = new ExampleRunner();
JobConf conf = createJobConf();
conf.setNumReduceTasks(10);
exampleRunner.setConf(conf);
final FileContext fc = FileContext.getFileContext(conf);
fc.mkdir(getInputDir(), FsPermission.getDefault(), true);
Path inputFile = new Path(getInputDir(), "input.txt");
int times = 1024 * 1024 + 1;
createInputFile(fc.create(inputFile, EnumSet.of(CreateFlag.CREATE)), times);
assertThat(exampleRunner.run(new String[] { getTestRootDir() + inputFile.toString(), getTestRootDir() + getOutputDir().toString() }), is(0));
FileStatus[] first = fc.util().listStatus(new Path(getOutputDir(), "first"), new GlobFilter("part-r-*"));
FileStatus[] second = fc.util().listStatus(new Path(getOutputDir(), "second"), new GlobFilter("part-r-*"));
Multiset<String> countFirst = HashMultiset.create();
Multiset<String> countSecond = HashMultiset.create();
fillMapFromFile(fc, first, countFirst);
fillMapFromFile(fc, second, countSecond);
assertThat(ImmutableMultiset.copyOf(countFirst), is(new ImmutableMultiset.Builder<String>().addCopies("john", 2 * times).addCopies("dough", times).addCopies("joe", times).addCopies("moe", times).addCopies("prefix_john", 2 * times).addCopies("prefix_dough", times).addCopies("prefix_joe", times).addCopies("prefix_moe", times).build()));
assertThat(ImmutableMultiset.copyOf(countSecond), is(new ImmutableMultiset.Builder<String>().addCopies("120", times).addCopies("130", 2 * times).addCopies("180", times).addCopies("190", times).build()));
}Example 22
| Project: nifi-master File: EventTestUtils.java View source code |
public static Event.CreateEvent createCreateEvent() {
return new Event.CreateEvent.Builder().ctime(new Date().getTime()).groupName("group_name").iNodeType(Event.CreateEvent.INodeType.DIRECTORY).overwrite(false).ownerName("ownerName").path("/some/path/create").perms(new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE)).replication(1).symlinkTarget("/some/symlink/target").build();
}Example 23
| Project: yarn-comment-master File: TestCopyCommitter.java View source code |
@Test
public void testPreserveStatus() {
TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config);
JobContext jobContext = new JobContextImpl(taskAttemptContext.getConfiguration(), taskAttemptContext.getTaskAttemptID().getJobID());
Configuration conf = jobContext.getConfiguration();
String sourceBase;
String targetBase;
FileSystem fs = null;
try {
OutputCommitter committer = new CopyCommitter(null, taskAttemptContext);
fs = FileSystem.get(conf);
FsPermission sourcePerm = new FsPermission((short) 511);
FsPermission initialPerm = new FsPermission((short) 448);
sourceBase = TestDistCpUtils.createTestSetup(fs, sourcePerm);
targetBase = TestDistCpUtils.createTestSetup(fs, initialPerm);
DistCpOptions options = new DistCpOptions(Arrays.asList(new Path(sourceBase)), new Path("/out"));
options.preserve(FileAttribute.PERMISSION);
options.appendToConf(conf);
CopyListing listing = new GlobbedCopyListing(conf, CREDENTIALS);
Path listingFile = new Path("/tmp1/" + String.valueOf(rand.nextLong()));
listing.buildListing(listingFile, options);
conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, targetBase);
committer.commitJob(jobContext);
if (!checkDirectoryPermissions(fs, targetBase, sourcePerm)) {
Assert.fail("Permission don't match");
}
//Test for idempotent commit
committer.commitJob(jobContext);
if (!checkDirectoryPermissions(fs, targetBase, sourcePerm)) {
Assert.fail("Permission don't match");
}
} catch (IOException e) {
LOG.error("Exception encountered while testing for preserve status", e);
Assert.fail("Preserve status failure");
} finally {
TestDistCpUtils.delete(fs, "/tmp1");
}
}Example 24
| Project: cdh-mesos-master File: TestTaskKilling.java View source code |
private void createInput(Path inDir, Configuration conf) throws IOException {
String input = "Hadoop is framework for data intensive distributed " + "applications.\n" + "Hadoop enables applications to work with thousands of nodes.";
FileSystem fs = inDir.getFileSystem(conf);
if (!fs.mkdirs(inDir)) {
throw new IOException("Failed to create the input directory:" + inDir.toString());
}
fs.setPermission(inDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
DataOutputStream file = fs.create(new Path(inDir, "data.txt"));
int i = 0;
while (i < 1000 * 3000) {
file.writeBytes(input);
i++;
}
file.close();
}Example 25
| Project: ciel-java-master File: SkywritingTaskFileSystem.java View source code |
@Override
public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException {
/*
* XXX: We don't perform any sanity-checking on the path name.
* We should really verify that the filename looks like "/out/*".
* Also, we shouldn't ignore bufferSize, replication, blockSize or progress.
*/
int index = Integer.parseInt(f.getName());
return new FSDataOutputStream(this.outputs[index], this.stats);
}Example 26
| Project: Grid-Appliance-Hadoop-master File: NativeS3FileSystem.java View source code |
@Override
public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException {
if (exists(f) && !overwrite) {
throw new IOException("File already exists:" + f);
}
Path absolutePath = makeAbsolute(f);
String key = pathToKey(absolutePath);
return new FSDataOutputStream(new NativeS3FsOutputStream(getConf(), store, key, progress, bufferSize), statistics);
}Example 27
| Project: hadaps-master File: DefaultContainerExecutor.java View source code |
@Override
public int launchContainer(Container container, Path nmPrivateContainerScriptPath, Path nmPrivateTokensPath, String userName, String appId, Path containerWorkDir, List<String> localDirs, List<String> logDirs) throws IOException {
FsPermission dirPerm = new FsPermission(APPDIR_PERM);
ContainerId containerId = container.getContainerId();
// create container dirs on all disks
String containerIdStr = ConverterUtils.toString(containerId);
String appIdStr = ConverterUtils.toString(containerId.getApplicationAttemptId().getApplicationId());
for (String sLocalDir : localDirs) {
Path usersdir = new Path(sLocalDir, ContainerLocalizer.USERCACHE);
Path userdir = new Path(usersdir, userName);
Path appCacheDir = new Path(userdir, ContainerLocalizer.APPCACHE);
Path appDir = new Path(appCacheDir, appIdStr);
Path containerDir = new Path(appDir, containerIdStr);
createDir(containerDir, dirPerm, true);
}
// Create the container log-dirs on all disks
createContainerLogDirs(appIdStr, containerIdStr, logDirs);
Path tmpDir = new Path(containerWorkDir, YarnConfiguration.DEFAULT_CONTAINER_TEMP_DIR);
createDir(tmpDir, dirPerm, false);
// copy launch script to work dir
Path launchDst = new Path(containerWorkDir, ContainerLaunch.CONTAINER_SCRIPT);
lfs.util().copy(nmPrivateContainerScriptPath, launchDst);
// copy container tokens to work dir
Path tokenDst = new Path(containerWorkDir, ContainerLaunch.FINAL_CONTAINER_TOKENS_FILE);
lfs.util().copy(nmPrivateTokensPath, tokenDst);
// Create new local launch wrapper script
LocalWrapperScriptBuilder sb = Shell.WINDOWS ? new WindowsLocalWrapperScriptBuilder(containerIdStr, containerWorkDir) : new UnixLocalWrapperScriptBuilder(containerWorkDir);
// Windows path length limitation.
if (Shell.WINDOWS && sb.getWrapperScriptPath().toString().length() > WIN_MAX_PATH) {
throw new IOException(String.format("Cannot launch container using script at path %s, because it exceeds " + "the maximum supported path length of %d characters. Consider " + "configuring shorter directories in %s.", sb.getWrapperScriptPath(), WIN_MAX_PATH, YarnConfiguration.NM_LOCAL_DIRS));
}
Path pidFile = getPidFilePath(containerId);
if (pidFile != null) {
sb.writeLocalWrapperScript(launchDst, pidFile);
} else {
LOG.info("Container " + containerIdStr + " was marked as inactive. Returning terminated error");
return ExitCode.TERMINATED.getExitCode();
}
// create log dir under app
// fork script
ShellCommandExecutor shExec = null;
try {
lfs.setPermission(launchDst, ContainerExecutor.TASK_LAUNCH_SCRIPT_PERMISSION);
lfs.setPermission(sb.getWrapperScriptPath(), ContainerExecutor.TASK_LAUNCH_SCRIPT_PERMISSION);
// Setup command to run
String[] command = getRunCommand(sb.getWrapperScriptPath().toString(), containerIdStr, this.getConf());
LOG.info("launchContainer: " + Arrays.toString(command));
shExec = new ShellCommandExecutor(command, new File(containerWorkDir.toUri().getPath()), // sanitized env
container.getLaunchContext().getEnvironment());
if (isContainerActive(containerId)) {
shExec.execute();
} else {
LOG.info("Container " + containerIdStr + " was marked as inactive. Returning terminated error");
return ExitCode.TERMINATED.getExitCode();
}
} catch (IOException e) {
if (null == shExec) {
return -1;
}
int exitCode = shExec.getExitCode();
LOG.warn("Exit code from container " + containerId + " is : " + exitCode);
if (exitCode != ExitCode.FORCE_KILLED.getExitCode() && exitCode != ExitCode.TERMINATED.getExitCode()) {
LOG.warn("Exception from container-launch with container ID: " + containerId + " and exit code: " + exitCode, e);
logOutput(shExec.getOutput());
String diagnostics = "Exception from container-launch: " + e + "\n" + StringUtils.stringifyException(e) + "\n" + shExec.getOutput();
container.handle(new ContainerDiagnosticsUpdateEvent(containerId, diagnostics));
} else {
container.handle(new ContainerDiagnosticsUpdateEvent(containerId, "Container killed on request. Exit code is " + exitCode));
}
return exitCode;
} finally {
//
;
}
return 0;
}Example 28
| Project: hadoop-20-warehouse-fix-master File: TestKillSubProcesses.java View source code |
void runTests(JobConf conf, JobTracker jt) throws IOException {
FileSystem fs = FileSystem.getLocal(mr.createJobConf());
Path rootDir = new Path(TEST_ROOT_DIR);
if (!fs.exists(rootDir)) {
fs.mkdirs(rootDir);
}
fs.setPermission(rootDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
runKillingJobAndValidate(jt, conf);
runFailingJobAndValidate(jt, conf);
runSuccessfulJobAndValidate(jt, conf);
}Example 29
| Project: hadoop-20-warehouse-master File: TestKillSubProcesses.java View source code |
void runTests(JobConf conf, JobTracker jt) throws IOException {
FileSystem fs = FileSystem.getLocal(mr.createJobConf());
Path rootDir = new Path(TEST_ROOT_DIR);
if (!fs.exists(rootDir)) {
fs.mkdirs(rootDir);
}
fs.setPermission(rootDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
runKillingJobAndValidate(jt, conf);
runFailingJobAndValidate(jt, conf);
runSuccessfulJobAndValidate(jt, conf);
}Example 30
| Project: hadoop-gpu-master File: NativeS3FileSystem.java View source code |
@Override
public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException {
if (exists(f) && !overwrite) {
throw new IOException("File already exists:" + f);
}
Path absolutePath = makeAbsolute(f);
String key = pathToKey(absolutePath);
return new FSDataOutputStream(new NativeS3FsOutputStream(getConf(), store, key, progress, bufferSize), statistics);
}Example 31
| Project: hadoop-master File: TestLinuxContainerExecutor.java View source code |
@Before
public void setup() throws Exception {
files = FileContext.getLocalFSFileContext();
Path workSpacePath = new Path(workSpace.getAbsolutePath());
files.mkdir(workSpacePath, null, true);
FileUtil.chmod(workSpace.getAbsolutePath(), "777");
File localDir = new File(workSpace.getAbsoluteFile(), "localDir");
files.mkdir(new Path(localDir.getAbsolutePath()), new FsPermission("777"), false);
File logDir = new File(workSpace.getAbsoluteFile(), "logDir");
files.mkdir(new Path(logDir.getAbsolutePath()), new FsPermission("777"), false);
String exec_path = System.getProperty("container-executor.path");
if (exec_path != null && !exec_path.isEmpty()) {
conf = new Configuration(false);
conf.setClass("fs.AbstractFileSystem.file.impl", org.apache.hadoop.fs.local.LocalFs.class, org.apache.hadoop.fs.AbstractFileSystem.class);
appSubmitter = System.getProperty("application.submitter");
if (appSubmitter == null || appSubmitter.isEmpty()) {
appSubmitter = "nobody";
}
conf.set(YarnConfiguration.NM_NONSECURE_MODE_LOCAL_USER_KEY, appSubmitter);
LOG.info("Setting " + YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH + "=" + exec_path);
conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH, exec_path);
exec = new LinuxContainerExecutor();
exec.setConf(conf);
conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_LOG_DIRS, logDir.getAbsolutePath());
dirsHandler = new LocalDirsHandlerService();
dirsHandler.init(conf);
List<String> localDirs = dirsHandler.getLocalDirs();
for (String dir : localDirs) {
Path userDir = new Path(dir, ContainerLocalizer.USERCACHE);
files.mkdir(userDir, new FsPermission("777"), false);
// $local/filecache
Path fileDir = new Path(dir, ContainerLocalizer.FILECACHE);
files.mkdir(fileDir, new FsPermission("777"), false);
}
}
}Example 32
| Project: hadoop-on-lustre2-master File: DefaultContainerExecutor.java View source code |
@Override
public int launchContainer(Container container, Path nmPrivateContainerScriptPath, Path nmPrivateTokensPath, String userName, String appId, Path containerWorkDir, List<String> localDirs, List<String> logDirs) throws IOException {
FsPermission dirPerm = new FsPermission(APPDIR_PERM);
ContainerId containerId = container.getContainerId();
// create container dirs on all disks
String containerIdStr = ConverterUtils.toString(containerId);
String appIdStr = ConverterUtils.toString(containerId.getApplicationAttemptId().getApplicationId());
for (String sLocalDir : localDirs) {
Path usersdir = new Path(sLocalDir, ContainerLocalizer.USERCACHE);
Path userdir = new Path(usersdir, userName);
Path appCacheDir = new Path(userdir, ContainerLocalizer.APPCACHE);
Path appDir = new Path(appCacheDir, appIdStr);
Path containerDir = new Path(appDir, containerIdStr);
createDir(containerDir, dirPerm, true);
}
// Create the container log-dirs on all disks
createContainerLogDirs(appIdStr, containerIdStr, logDirs);
Path tmpDir = new Path(containerWorkDir, YarnConfiguration.DEFAULT_CONTAINER_TEMP_DIR);
createDir(tmpDir, dirPerm, false);
// copy launch script to work dir
Path launchDst = new Path(containerWorkDir, ContainerLaunch.CONTAINER_SCRIPT);
lfs.util().copy(nmPrivateContainerScriptPath, launchDst);
// copy container tokens to work dir
Path tokenDst = new Path(containerWorkDir, ContainerLaunch.FINAL_CONTAINER_TOKENS_FILE);
lfs.util().copy(nmPrivateTokensPath, tokenDst);
// Create new local launch wrapper script
LocalWrapperScriptBuilder sb = Shell.WINDOWS ? new WindowsLocalWrapperScriptBuilder(containerIdStr, containerWorkDir) : new UnixLocalWrapperScriptBuilder(containerWorkDir);
// Windows path length limitation.
if (Shell.WINDOWS && sb.getWrapperScriptPath().toString().length() > WIN_MAX_PATH) {
throw new IOException(String.format("Cannot launch container using script at path %s, because it exceeds " + "the maximum supported path length of %d characters. Consider " + "configuring shorter directories in %s.", sb.getWrapperScriptPath(), WIN_MAX_PATH, YarnConfiguration.NM_LOCAL_DIRS));
}
Path pidFile = getPidFilePath(containerId);
if (pidFile != null) {
sb.writeLocalWrapperScript(launchDst, pidFile);
} else {
LOG.info("Container " + containerIdStr + " was marked as inactive. Returning terminated error");
return ExitCode.TERMINATED.getExitCode();
}
// create log dir under app
// fork script
ShellCommandExecutor shExec = null;
try {
lfs.setPermission(launchDst, ContainerExecutor.TASK_LAUNCH_SCRIPT_PERMISSION);
lfs.setPermission(sb.getWrapperScriptPath(), ContainerExecutor.TASK_LAUNCH_SCRIPT_PERMISSION);
// Setup command to run
String[] command = getRunCommand(sb.getWrapperScriptPath().toString(), containerIdStr, this.getConf());
LOG.info("launchContainer: " + Arrays.toString(command));
shExec = new ShellCommandExecutor(command, new File(containerWorkDir.toUri().getPath()), // sanitized env
container.getLaunchContext().getEnvironment());
if (isContainerActive(containerId)) {
shExec.execute();
} else {
LOG.info("Container " + containerIdStr + " was marked as inactive. Returning terminated error");
return ExitCode.TERMINATED.getExitCode();
}
} catch (IOException e) {
if (null == shExec) {
return -1;
}
int exitCode = shExec.getExitCode();
LOG.warn("Exit code from container " + containerId + " is : " + exitCode);
if (exitCode != ExitCode.FORCE_KILLED.getExitCode() && exitCode != ExitCode.TERMINATED.getExitCode()) {
LOG.warn("Exception from container-launch with container ID: " + containerId + " and exit code: " + exitCode, e);
logOutput(shExec.getOutput());
String diagnostics = "Exception from container-launch: " + e + "\n" + StringUtils.stringifyException(e) + "\n" + shExec.getOutput();
container.handle(new ContainerDiagnosticsUpdateEvent(containerId, diagnostics));
} else {
container.handle(new ContainerDiagnosticsUpdateEvent(containerId, "Container killed on request. Exit code is " + exitCode));
}
return exitCode;
} finally {
//
;
}
return 0;
}Example 33
| Project: hadoop_ekg-master File: NativeS3FileSystem.java View source code |
@Override
public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException {
if (exists(f) && !overwrite) {
throw new IOException("File already exists:" + f);
}
Path absolutePath = makeAbsolute(f);
String key = pathToKey(absolutePath);
return new FSDataOutputStream(new NativeS3FsOutputStream(getConf(), store, key, progress, bufferSize), statistics);
}Example 34
| Project: heliosearch-master File: MorphlineBasicMiniMRTest.java View source code |
@BeforeClass
public static void setupClass() throws Exception {
assumeTrue("Currently this test can only be run without the lucene test security policy in place", System.getProperty("java.security.manager", "").equals(""));
assumeFalse("HDFS tests were disabled by -Dtests.disableHdfs", Boolean.parseBoolean(System.getProperty("tests.disableHdfs", "false")));
assumeFalse("FIXME: This test does not work with Windows because of native library requirements", Constants.WINDOWS);
assumeFalse("FIXME: This test fails under Java 8 due to the Saxon dependency - see SOLR-1301", Constants.JRE_IS_MINIMUM_JAVA8);
assumeFalse("FIXME: This test fails under J9 due to the Saxon dependency - see SOLR-1301", System.getProperty("java.vm.info", "<?>").contains("IBM J9"));
AbstractZkTestCase.SOLRHOME = solrHomeDirectory;
FileUtils.copyDirectory(MINIMR_CONF_DIR, solrHomeDirectory);
tempDir = TEMP_DIR + "/test-morphlines-" + System.currentTimeMillis();
new File(tempDir).mkdirs();
FileUtils.copyFile(new File(RESOURCES_DIR + "/custom-mimetypes.xml"), new File(tempDir + "/custom-mimetypes.xml"));
AbstractSolrMorphlineTestBase.setupMorphline(tempDir, "test-morphlines/solrCellDocumentTypes", true);
System.setProperty("hadoop.log.dir", new File(solrHomeDirectory, "logs").getAbsolutePath());
int taskTrackers = 1;
int dataNodes = 2;
// String proxyUser = System.getProperty("user.name");
// String proxyGroup = "g";
// StringBuilder sb = new StringBuilder();
// sb.append("127.0.0.1,localhost");
// for (InetAddress i : InetAddress.getAllByName(InetAddress.getLocalHost().getHostName())) {
// sb.append(",").append(i.getCanonicalHostName());
// }
createTempDir();
new File(dataDir, "nm-local-dirs").mkdirs();
System.setProperty("solr.hdfs.blockcache.enabled", "false");
System.setProperty("test.build.dir", dataDir + File.separator + "hdfs" + File.separator + "test-build-dir");
System.setProperty("test.build.data", dataDir + File.separator + "hdfs" + File.separator + "build");
System.setProperty("test.cache.data", dataDir + File.separator + "hdfs" + File.separator + "cache");
JobConf conf = new JobConf();
conf.set("dfs.block.access.token.enable", "false");
conf.set("dfs.permissions", "true");
conf.set("hadoop.security.authentication", "simple");
conf.set(YarnConfiguration.NM_LOCAL_DIRS, dataDir.getPath() + File.separator + "nm-local-dirs");
conf.set(YarnConfiguration.DEFAULT_NM_LOG_DIRS, dataDir + File.separator + "nm-logs");
conf.set("testWorkDir", dataDir.getPath() + File.separator + "testWorkDir");
dfsCluster = new MiniDFSCluster(conf, dataNodes, true, null);
FileSystem fileSystem = dfsCluster.getFileSystem();
fileSystem.mkdirs(new Path("/tmp"));
fileSystem.mkdirs(new Path("/user"));
fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
String nnURI = fileSystem.getUri().toString();
int numDirs = 1;
String[] racks = null;
String[] hosts = null;
mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}Example 35
| Project: hive-master File: TestHCatMultiOutputFormat.java View source code |
private static void createTable(String tableName, String tablePerm) throws Exception {
Table tbl = new Table();
tbl.setDbName(DATABASE);
tbl.setTableName(tableName);
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(ColumnHolder.colMapping.get(tableName));
tbl.setSd(sd);
sd.setParameters(new HashMap<String, String>());
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setName(tbl.getTableName());
sd.getSerdeInfo().setParameters(new HashMap<String, String>());
sd.setInputFormat(org.apache.hadoop.hive.ql.io.RCFileInputFormat.class.getName());
sd.setOutputFormat(org.apache.hadoop.hive.ql.io.RCFileOutputFormat.class.getName());
sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
sd.getSerdeInfo().setSerializationLib(org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe.class.getName());
tbl.setPartitionKeys(ColumnHolder.partitionCols);
hmsc.createTable(tbl);
Path path = new Path(warehousedir, tableName);
FileSystem fs = path.getFileSystem(hiveConf);
fs.setPermission(path, new FsPermission(tablePerm));
}Example 36
| Project: hoop-master File: HoopFileSystem.java View source code |
/** * Opens an FSDataOutputStream at the indicated Path with write-progress * reporting. * <p/> * IMPORTANT: The <code>Progressable</code> parameter is not used. * * @param f the file name to open * @param permission * @param overwrite if a file with this name already exists, then if true, * the file will be overwritten, and if false an error will be thrown. * @param bufferSize the size of the buffer to be used. * @param replication required block replication for the file. * @param blockSize * @param progress * @throws IOException * @see #setPermission(Path, FsPermission) */ @Override public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { Map<String, String> params = new HashMap<String, String>(); params.put("op", "create"); params.put("overwrite", Boolean.toString(overwrite)); params.put("replication", Short.toString(replication)); params.put("blocksize", Long.toString(blockSize)); params.put("permission", permissionToString(permission)); HttpURLConnection conn = getConnection("POST", params, f); try { OutputStream os = new BufferedOutputStream(conn.getOutputStream(), bufferSize); return new HoopFSDataOutputStream(conn, os, HttpURLConnection.HTTP_CREATED, statistics); } catch (IOException ex) { validateResponse(conn, HttpURLConnection.HTTP_CREATED); throw ex; } }
Example 37
| Project: howl-master File: HowlOutputFormat.java View source code |
/**
* Set the info about the output to write for the Job. This queries the metadata server
* to find the StorageDriver to use for the table. Throws error if partition is already published.
* @param job the job object
* @param outputInfo the table output info
* @throws IOException the exception in communicating with the metadata server
*/
@SuppressWarnings("unchecked")
public static void setOutput(Job job, HowlTableInfo outputInfo) throws IOException {
HiveMetaStoreClient client = null;
try {
Configuration conf = job.getConfiguration();
client = createHiveClient(outputInfo.getServerUri(), conf);
Table table = client.getTable(outputInfo.getDatabaseName(), outputInfo.getTableName());
if (outputInfo.getPartitionValues() == null) {
outputInfo.setPartitionValues(new HashMap<String, String>());
} else {
//Convert user specified map to have lower case key names
Map<String, String> valueMap = new HashMap<String, String>();
for (Map.Entry<String, String> entry : outputInfo.getPartitionValues().entrySet()) {
valueMap.put(entry.getKey().toLowerCase(), entry.getValue());
}
outputInfo.setPartitionValues(valueMap);
}
//Handle duplicate publish
handleDuplicatePublish(job, outputInfo, client, table);
StorageDescriptor tblSD = table.getSd();
HowlSchema tableSchema = HowlUtil.extractSchemaFromStorageDescriptor(tblSD);
StorerInfo storerInfo = InitializeInput.extractStorerInfo(tblSD, table.getParameters());
List<String> partitionCols = new ArrayList<String>();
for (FieldSchema schema : table.getPartitionKeys()) {
partitionCols.add(schema.getName());
}
Class<? extends HowlOutputStorageDriver> driverClass = (Class<? extends HowlOutputStorageDriver>) Class.forName(storerInfo.getOutputSDClass());
HowlOutputStorageDriver driver = driverClass.newInstance();
String tblLocation = tblSD.getLocation();
String location = driver.getOutputLocation(job, tblLocation, partitionCols, outputInfo.getPartitionValues());
//Serialize the output info into the configuration
OutputJobInfo jobInfo = new OutputJobInfo(outputInfo, tableSchema, tableSchema, storerInfo, location, table);
conf.set(HOWL_KEY_OUTPUT_INFO, HowlUtil.serialize(jobInfo));
Path tblPath = new Path(tblLocation);
/* Set the umask in conf such that files/dirs get created with table-dir
* permissions. Following three assumptions are made:
* 1. Actual files/dirs creation is done by RecordWriter of underlying
* output format. It is assumed that they use default permissions while creation.
* 2. Default Permissions = FsPermission.getDefault() = 777.
* 3. UMask is honored by underlying filesystem.
*/
FsPermission.setUMask(conf, FsPermission.getDefault().applyUMask(tblPath.getFileSystem(conf).getFileStatus(tblPath).getPermission()));
if (UserGroupInformation.isSecurityEnabled()) {
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
// check if oozie has set up a howl deleg. token - if so use it
TokenSelector<? extends TokenIdentifier> tokenSelector = new DelegationTokenSelector();
// TODO: will oozie use a "service" called "oozie" - then instead of
// new Text() do new Text("oozie") below - if this change is made also
// remember to do:
// job.getConfiguration().set(HOWL_KEY_TOKEN_SIGNATURE, "oozie");
// Also change code in HowlOutputCommitter.cleanupJob() to cancel the
// token only if token.service is not "oozie" - remove the condition of
// HOWL_KEY_TOKEN_SIGNATURE != null in that code.
Token<? extends TokenIdentifier> token = tokenSelector.selectToken(new Text(), ugi.getTokens());
if (token != null) {
job.getCredentials().addToken(new Text(ugi.getUserName()), token);
} else {
// we did not get token set up by oozie, let's get them ourselves here.
// we essentially get a token per unique Output HowlTableInfo - this is
// done because through Pig, setOutput() method is called multiple times
// We want to only get the token once per unique output HowlTableInfo -
// we cannot just get one token since in multi-query case (> 1 store in 1 job)
// or the case when a single pig script results in > 1 jobs, the single
// token will get cancelled by the output committer and the subsequent
// stores will fail - by tying the token with the concatenation of
// dbname, tablename and partition keyvalues of the output
// TableInfo, we can have as many tokens as there are stores and the TokenSelector
// will correctly pick the right tokens which the committer will use and
// cancel.
String tokenSignature = getTokenSignature(outputInfo);
if (tokenMap.get(tokenSignature) == null) {
// get delegation tokens from howl server and store them into the "job"
// These will be used in the HowlOutputCommitter to publish partitions to
// howl
String tokenStrForm = client.getDelegationTokenWithSignature(ugi.getUserName(), tokenSignature);
Token<DelegationTokenIdentifier> t = new Token<DelegationTokenIdentifier>();
t.decodeFromUrlString(tokenStrForm);
tokenMap.put(tokenSignature, t);
}
job.getCredentials().addToken(new Text(ugi.getUserName() + tokenSignature), tokenMap.get(tokenSignature));
// this will be used by the outputcommitter to pass on to the metastore client
// which in turn will pass on to the TokenSelector so that it can select
// the right token.
job.getConfiguration().set(HOWL_KEY_TOKEN_SIGNATURE, tokenSignature);
}
}
} catch (Exception e) {
if (e instanceof HowlException) {
throw (HowlException) e;
} else {
throw new HowlException(ErrorType.ERROR_SET_OUTPUT, e);
}
} finally {
if (client != null) {
client.close();
}
}
}Example 38
| Project: RecordBreaker-master File: DataQuery.java View source code |
String grabTable(DataDescriptor desc) throws SQLException, IOException {
// Set up Hive table
Path p = desc.getFilename();
String tablename = tableCache.get(p);
if (tablename == null) {
tablename = "datatable" + Math.abs(r.nextInt());
Statement stmt = hiveCon.createStatement();
try {
String creatTxt = desc.getHiveCreateTableStatement(tablename);
LOG.info("Create: " + creatTxt);
stmt.execute(creatTxt);
tables.put(p, tablename);
} finally {
stmt.close();
}
// Copy avro version of data into secret location prior to Hive import
FileSystem fs = FileSystem.get(conf);
Path tmpTables = new Path(tmpTablesDir);
if (!fs.exists(tmpTables)) {
fs.mkdirs(tmpTables, new FsPermission("-rwxrwxrwx"));
}
Path secretDst = new Path(tmpTables, "r" + r.nextInt());
LOG.info("Preparing Avro data at " + secretDst);
desc.prepareAvroFile(fs, fs, secretDst, conf);
fs.setPermission(secretDst, new FsPermission("-rwxrwxrwx"));
// Import data
stmt = hiveCon.createStatement();
try {
LOG.info("Import data into Hive: " + desc.getHiveImportDataStatement(tablename, secretDst));
stmt.execute(desc.getHiveImportDataStatement(tablename, secretDst));
isLoaded.add(p);
} finally {
stmt.close();
}
// Refresh impala metadata
stmt = impalaCon.createStatement();
try {
try {
LOG.info("Rebuilding Impala metadata...");
stmt.execute("INVALIDATE METADATA");
} catch (Exception iex) {
LOG.info("Impala metadata rebuild failed: " + iex.toString());
}
} finally {
stmt.close();
}
// Insert into table cache
tableCache.put(p, tablename);
}
return tablename;
}Example 39
| Project: s3hdfs-master File: ObjectInfoRedirect.java View source code |
private S3HdfsFileStatus parseHdfsFileStatus(JsonNode element) {
return new S3HdfsFileStatus(element.get("length").getLongValue(), element.get("type").getTextValue().equalsIgnoreCase("DIRECTORY"), element.get("replication").getIntValue(), element.get("blockSize").getLongValue(), element.get("modificationTime").getLongValue(), element.get("accessTime").getLongValue(), FsPermission.createImmutable((short) element.get("permission").getIntValue()), element.get("owner").getTextValue(), element.get("group").getTextValue(), (element.get("symlink") == null) ? null : DFSUtil.string2Bytes(element.get("symlink").getTextValue()), DFSUtil.string2Bytes(element.get("pathSuffix").getTextValue()), element.get("fileId").getLongValue(), element.get("childrenNum").getIntValue());
}Example 40
| Project: sqoop-on-spark-master File: HiveConnectorTestCase.java View source code |
private void ensureWarehouseDirectory(Configuration conf) throws Exception {
String warehouseDirectory = conf.get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname);
StringBuilder dir = new StringBuilder();
for (String part : warehouseDirectory.split("/")) {
dir.append(part).append("/");
Path path = new Path(dir.toString());
if (!hdfsClient.exists(path)) {
hdfsClient.mkdirs(path);
}
}
hdfsClient.setPermission(new Path(dir.toString()), new FsPermission((short) 01777));
}Example 41
| Project: brisk-master File: NamenodePlugin.java View source code |
public void chmod(RequestContext ctx, final String path, final short mode) throws IOException {
LOG.debug("chmod(" + path + ", " + mode + "): Entering");
assumeUserContextAndExecute(ctx, new PrivilegedExceptionAction<Void>() {
public Void run() throws java.io.IOException {
namenode.setPermission(path, new FsPermission(mode));
return null;
}
});
}Example 42
| Project: clue-master File: HdfsDirectory.java View source code |
private static final OutputStream getOutputStream(FileSystem fileSystem, Path path) throws IOException {
Configuration conf = fileSystem.getConf();
FsServerDefaults fsDefaults = fileSystem.getServerDefaults(path);
EnumSet<CreateFlag> flags = EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE);
if (Boolean.getBoolean(HDFS_SYNC_BLOCK)) {
flags.add(CreateFlag.SYNC_BLOCK);
}
return fileSystem.create(path, FsPermission.getDefault().applyUMask(FsPermission.getUMask(conf)), flags, fsDefaults.getFileBufferSize(), fsDefaults.getReplication(), fsDefaults.getBlockSize(), null);
}Example 43
| Project: elasticsearch-hadoop-master File: HdpBootstrap.java View source code |
/**
* Hack to allow Hadoop client to run on windows (which otherwise fails due to some permission problem).
*/
public static void hackHadoopStagingOnWin() {
// do the assignment only on Windows systems
if (TestUtils.isWindows()) {
// 0655 = -rwxr-xr-x , 0650 = -rwxr-x---
JobSubmissionFiles.JOB_DIR_PERMISSION.fromShort((short) 0650);
JobSubmissionFiles.JOB_FILE_PERMISSION.fromShort((short) 0650);
Field field = null;
// handle distributed cache permissions on Hadoop < 2.4
try {
Class<?> jl = Class.forName("org.apache.hadoop.mapred.JobLocalizer");
field = ReflectionUtils.findField(jl, "privateCachePerms");
if (field != null) {
ReflectionUtils.makeAccessible(field);
FsPermission perm = (FsPermission) ReflectionUtils.getField(field, null);
perm.fromShort((short) 0650);
}
} catch (ClassNotFoundException cnfe) {
}
// handle jar permissions as well - temporarily disable for CDH 4 / YARN
try {
Class<?> tdcm = Class.forName("org.apache.hadoop.filecache.TrackerDistributedCacheManager");
field = ReflectionUtils.findField(tdcm, "PUBLIC_CACHE_OBJECT_PERM");
ReflectionUtils.makeAccessible(field);
FsPermission perm = (FsPermission) ReflectionUtils.getField(field, null);
perm.fromShort((short) 0650);
} catch (ClassNotFoundException cnfe) {
return;
} catch (Exception ex) {
LogFactory.getLog(TestUtils.class).warn("Cannot set permission for TrackerDistributedCacheManager", ex);
}
}
}Example 44
| Project: elasticsearch-master File: TestingFs.java View source code |
// wrap hadoop rawlocalfilesystem to behave less crazy
static RawLocalFileSystem wrap(final Path base) {
final FileSystemProvider baseProvider = base.getFileSystem().provider();
return new RawLocalFileSystem() {
private org.apache.hadoop.fs.Path box(Path path) {
return new org.apache.hadoop.fs.Path(path.toUri());
}
private Path unbox(org.apache.hadoop.fs.Path path) {
return baseProvider.getPath(path.toUri());
}
@Override
protected org.apache.hadoop.fs.Path getInitialWorkingDirectory() {
return box(base);
}
@Override
public void setPermission(org.apache.hadoop.fs.Path path, FsPermission permission) {
// no execution, thank you very much!
}
// pretend we don't support symlinks (which causes hadoop to want to do crazy things),
// returning the boolean does not seem to really help, link-related operations are still called.
@Override
public boolean supportsSymlinks() {
return false;
}
@Override
public FileStatus getFileLinkStatus(org.apache.hadoop.fs.Path path) throws IOException {
return getFileStatus(path);
}
@Override
public org.apache.hadoop.fs.Path getLinkTarget(org.apache.hadoop.fs.Path path) throws IOException {
return path;
}
@Override
public FileStatus getFileStatus(org.apache.hadoop.fs.Path path) throws IOException {
BasicFileAttributes attributes;
try {
attributes = Files.readAttributes(unbox(path), BasicFileAttributes.class);
} catch (NoSuchFileException e) {
FileNotFoundException fnfe = new FileNotFoundException("File " + path + " does not exist");
fnfe.initCause(e);
throw fnfe;
}
// we set similar values to raw local filesystem, except we are never a symlink
long length = attributes.size();
boolean isDir = attributes.isDirectory();
int blockReplication = 1;
long blockSize = getDefaultBlockSize(path);
long modificationTime = attributes.creationTime().toMillis();
return new FileStatus(length, isDir, blockReplication, blockSize, modificationTime, path);
}
};
}Example 45
| Project: falcon-master File: TestContext.java View source code |
public static void prepare(String clusterTemplate, boolean disableLineage) throws Exception {
// setup a logged in user
CurrentUser.authenticate(REMOTE_USER);
if (disableLineage) {
// disable recording lineage metadata
String services = StartupProperties.get().getProperty("application.services");
StartupProperties.get().setProperty("application.services", services.replace("org.apache.falcon.metadata.MetadataMappingService", ""));
}
Map<String, String> overlay = new HashMap<String, String>();
overlay.put("cluster", RandomStringUtils.randomAlphabetic(5));
overlay.put("colo", DeploymentUtil.getCurrentColo());
TestContext.overlayParametersOverTemplate(clusterTemplate, overlay);
EmbeddedCluster cluster = EmbeddedCluster.newCluster(overlay.get("cluster"), true);
cleanupStore();
// setup dependent workflow and lipath in hdfs
FileSystem fs = FileSystem.get(cluster.getConf());
mkdir(fs, new Path("/falcon"), new FsPermission((short) 511));
Path wfParent = new Path("/falcon/test");
fs.delete(wfParent, true);
Path wfPath = new Path(wfParent, "workflow");
mkdir(fs, wfPath);
mkdir(fs, new Path("/falcon/test/workflow/lib"));
fs.copyFromLocalFile(false, true, new Path(TestContext.class.getResource("/fs-workflow.xml").getPath()), new Path(wfPath, "workflow.xml"));
mkdir(fs, new Path(wfParent, "input/2012/04/20/00"));
Path outPath = new Path(wfParent, "output");
mkdir(fs, outPath, new FsPermission((short) 511));
// init cluster locations
initClusterLocations(cluster, fs);
}Example 46
| Project: hive_blinkdb-master File: TestMetaStoreAuthorization.java View source code |
public void testIsWritable() throws Exception {
setup();
conf = new HiveConf(this.getClass());
String testDir = System.getProperty("test.warehouse.dir", "/tmp");
Path testDirPath = new Path(testDir);
FileSystem fs = testDirPath.getFileSystem(conf);
Path top = new Path(testDirPath, "_foobarbaz12_");
try {
fs.mkdirs(top);
Warehouse wh = new Warehouse(conf);
FsPermission writePerm = FsPermission.createImmutable((short) 0777);
FsPermission noWritePerm = FsPermission.createImmutable((short) 0555);
fs.setPermission(top, writePerm);
assertTrue("Expected " + top + " to be writable", wh.isWritable(top));
fs.setPermission(top, noWritePerm);
assertTrue("Expected " + top + " to be not writable", !wh.isWritable(top));
} finally {
fs.delete(top, true);
}
}Example 47
| Project: Hive_optimization-master File: TestMetaStoreAuthorization.java View source code |
public void testIsWritable() throws Exception {
setup();
conf = new HiveConf(this.getClass());
String testDir = System.getProperty("test.warehouse.dir", "/tmp");
Path testDirPath = new Path(testDir);
FileSystem fs = testDirPath.getFileSystem(conf);
Path top = new Path(testDirPath, "_foobarbaz12_");
try {
fs.mkdirs(top);
Warehouse wh = new Warehouse(conf);
FsPermission writePerm = FsPermission.createImmutable((short) 0777);
FsPermission noWritePerm = FsPermission.createImmutable((short) 0555);
fs.setPermission(top, writePerm);
assertTrue("Expected " + top + " to be writable", wh.isWritable(top));
fs.setPermission(top, noWritePerm);
assertTrue("Expected " + top + " to be not writable", !wh.isWritable(top));
} finally {
fs.delete(top, true);
}
}Example 48
| Project: incubator-falcon-master File: TestContext.java View source code |
public static void prepare(String clusterTemplate) throws Exception {
// setup a logged in user
CurrentUser.authenticate(REMOTE_USER);
// disable recording lineage metadata if enabled
String services = StartupProperties.get().getProperty("application.services");
StartupProperties.get().setProperty("application.services", services.replace("org.apache.falcon.metadata.MetadataMappingService", ""));
Map<String, String> overlay = new HashMap<String, String>();
overlay.put("cluster", RandomStringUtils.randomAlphabetic(5));
overlay.put("colo", "gs");
TestContext.overlayParametersOverTemplate(clusterTemplate, overlay);
EmbeddedCluster cluster = EmbeddedCluster.newCluster(overlay.get("cluster"), true);
cleanupStore();
// setup dependent workflow and lipath in hdfs
FileSystem fs = FileSystem.get(cluster.getConf());
mkdir(fs, new Path("/falcon"), new FsPermission((short) 511));
Path wfParent = new Path("/falcon/test");
fs.delete(wfParent, true);
Path wfPath = new Path(wfParent, "workflow");
mkdir(fs, wfPath);
fs.copyFromLocalFile(false, true, new Path(TestContext.class.getResource("/fs-workflow.xml").getPath()), new Path(wfPath, "workflow.xml"));
mkdir(fs, new Path(wfParent, "input/2012/04/20/00"));
Path outPath = new Path(wfParent, "output");
mkdir(fs, outPath, new FsPermission((short) 511));
}Example 49
| Project: pentaho-hadoop-shims-master File: DistributedCacheUtilImplTest.java View source code |
@Test
public void findFiles_vfs_hdfs() throws Exception {
DistributedCacheUtilImpl ch = new DistributedCacheUtilImpl(TEST_CONFIG);
URL url = new URL("http://localhost:8020/path/to/file");
Configuration conf = mock(Configuration.class);
FileSystem fs = mock(FileSystem.class);
FileObject source = mock(FileObject.class);
Path dest = mock(Path.class);
FileObject hdfsDest = mock(FileObject.class);
Path root = mock(Path.class);
FileObject[] fileObjects = new FileObject[12];
for (int i = 0; i < fileObjects.length; i++) {
URL fileUrl = new URL("http://localhost:8020/path/to/file/" + i);
FileObject fileObject = mock(FileObject.class);
fileObjects[i] = fileObject;
doReturn(fileUrl).when(fileObject).getURL();
}
doReturn(url).when(source).getURL();
doReturn(conf).when(fs).getConf();
doReturn(0).when(conf).getInt(any(String.class), anyInt());
doReturn(true).when(source).exists();
doReturn(fileObjects).when(hdfsDest).findFiles(any(FileSelector.class));
doReturn(true).when(fs).delete(root, true);
doReturn(fileObjects.length).when(source).delete(any(AllFileSelector.class));
doNothing().when(fs).copyFromLocalFile(any(Path.class), any(Path.class));
doNothing().when(fs).setPermission(any(Path.class), any(FsPermission.class));
doReturn(true).when(fs).setReplication(any(Path.class), anyShort());
try {
try {
ch.stageForCache(source, fs, dest, true);
List<String> files = ch.findFiles(hdfsDest, null);
assertEquals(12, files.size());
} finally {
fs.delete(root, true);
}
} finally {
source.delete(new AllFileSelector());
}
}Example 50
| Project: sleuthkit-hadoop-master File: ExtractData.java View source code |
protected static void chmodR(FileSystem fs, Path p) throws IOException {
final FsPermission perm = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL);
final FileStatus[] list = fs.listStatus(p);
for (FileStatus f : list) {
if (f.isDir()) {
chmodR(fs, f.getPath());
}
fs.setPermission(f.getPath(), perm);
}
fs.setPermission(p, perm);
}Example 51
| Project: sqoop-master File: HBaseBulkImportJob.java View source code |
/**
* Perform the loading of Hfiles.
*/
@Override
protected void completeImport(Job job) throws IOException, ImportException {
super.completeImport(job);
// Make the bulk load files source directory accessible to the world
// so that the hbase user can deal with it
Path bulkLoadDir = getContext().getDestination();
FileSystem fileSystem = bulkLoadDir.getFileSystem(job.getConfiguration());
setPermission(fileSystem, fileSystem.getFileStatus(bulkLoadDir), FsPermission.createImmutable((short) 00777));
HTable hTable = new HTable(job.getConfiguration(), options.getHBaseTable());
// Load generated HFiles into table
try {
LoadIncrementalHFiles loader = new LoadIncrementalHFiles(job.getConfiguration());
loader.doBulkLoad(bulkLoadDir, hTable);
} catch (Exception e) {
String errorMessage = String.format("Unrecoverable error while " + "performing the bulk load of files in [%s]", bulkLoadDir.toString());
throw new ImportException(errorMessage, e);
}
}Example 52
| Project: tajo-master File: TestMasterRules.java View source code |
protected void createTajoDirectories(TajoConf tajoConf) throws Exception {
Path tajoRootDir = new Path(rootFilePath, "tajo-root");
FileSystem rootFs = tajoRootDir.getFileSystem(tajoConf);
FsPermission defaultPermission = FsPermission.createImmutable((short) 0700);
if (!rootFs.exists(tajoRootDir)) {
rootFs.mkdirs(tajoRootDir, new FsPermission(defaultPermission));
}
tajoConf.setVar(ConfVars.ROOT_DIR, tajoRootDir.toUri().toString());
Path tajoSystemDir = new Path(tajoRootDir, TajoConstants.SYSTEM_DIR_NAME);
if (!rootFs.exists(tajoSystemDir)) {
rootFs.mkdirs(tajoSystemDir, new FsPermission(defaultPermission));
}
Path tajoSystemResourceDir = new Path(tajoSystemDir, TajoConstants.SYSTEM_RESOURCE_DIR_NAME);
if (!rootFs.exists(tajoSystemResourceDir)) {
rootFs.mkdirs(tajoSystemResourceDir, new FsPermission(defaultPermission));
}
Path tajoWarehouseDir = new Path(tajoRootDir, TajoConstants.WAREHOUSE_DIR_NAME);
if (!rootFs.exists(tajoWarehouseDir)) {
rootFs.mkdirs(tajoWarehouseDir, new FsPermission(defaultPermission));
}
Path tajoStagingDir = new Path(tajoRootDir, "staging");
if (!rootFs.exists(tajoStagingDir)) {
rootFs.mkdirs(tajoStagingDir, new FsPermission(defaultPermission));
}
tajoConf.setVar(ConfVars.STAGING_ROOT_DIR, tajoStagingDir.toUri().toString());
}Example 53
| Project: alluxio-master File: HdfsUnderFileSystem.java View source code |
@Override
public OutputStream createDirect(String path, CreateOptions options) throws IOException {
IOException te = null;
RetryPolicy retryPolicy = new CountingRetry(MAX_TRY);
while (retryPolicy.attemptRetry()) {
try {
// TODO(chaomin): support creating HDFS files with specified block size and replication.
return new HdfsUnderFileOutputStream(FileSystem.create(mFileSystem, new Path(path), new FsPermission(options.getMode().toShort())));
} catch (IOException e) {
LOG.warn("Retry count {} : {} ", retryPolicy.getRetryCount(), e.getMessage());
te = e;
}
}
throw te;
}Example 54
| Project: tachyon-master File: FileSystemAclIntegrationTest.java View source code |
@Test
public void createFileWithPermission() throws Exception {
List<Integer> permissionValues = Lists.newArrayList(0111, 0222, 0333, 0444, 0555, 0666, 0777, 0755, 0733, 0644, 0533, 0511);
for (int value : permissionValues) {
Path file = new Path("/createfile" + value);
FsPermission permission = FsPermission.createImmutable((short) value);
FSDataOutputStream o = sTFS.create(file, permission, false, /* ignored */
10, /* ignored */
(short) 1, /* ignored */
512, /* ignored */
null);
o.writeBytes("Test Bytes");
o.close();
FileStatus fs = sTFS.getFileStatus(file);
Assert.assertEquals(permission, fs.getPermission());
}
}Example 55
| Project: aliyun-odps-java-sdk-master File: VolumeFileSystem.java View source code |
@Override
public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException {
Path absF = fixRelativePart(f);
String filePath = getPathName(absF);
if (!VolumeFSUtil.isValidName(filePath)) {
throw new IllegalArgumentException(VolumeFSErrorMessageGenerator.isNotAValidODPSVolumeFSFilename(filePath));
}
if (VolumeFSUtil.checkPathIsJustVolume(filePath)) {
throw new IOException(VolumeFSErrorMessageGenerator.theOpreationIsNotAllowed("Create file in the root path!"));
}
try {
return new FSDataOutputStream(new VolumeFSOutputStream(filePath, volumeClient, permission, overwrite, replication, blockSize, progress), statistics);
} catch (VolumeException e) {
logException(e);
throw wrapExceptions(filePath, e);
}
}Example 56
| Project: big-data-plugin-master File: HadoopFileSystemImpl.java View source code |
@Override
public void chmod(final HadoopFileSystemPath hadoopFileSystemPath, int permissions) throws IOException {
final int owner = permissions / 100;
if (owner < 0 || owner > 7) {
throw new IllegalArgumentException("Expected owner permissions between 0 and 7");
}
final int group = (permissions - (owner * 100)) / 10;
if (group < 0 || group > 7) {
throw new IllegalArgumentException("Expected group permissions between 0 and 7");
}
final int other = permissions - (owner * 100) - (group * 10);
if (other < 0 || other > 7) {
throw new IllegalArgumentException("Expected other permissions between 0 and 7");
}
callAndWrapExceptions(new IOExceptionCallable<Void>() {
@Override
public Void call() throws IOException {
getFileSystem().setPermission(HadoopFileSystemPathImpl.toHadoopFileSystemPathImpl(hadoopFileSystemPath).getRawPath(), new FsPermission(FsAction.values()[owner], FsAction.values()[group], FsAction.values()[other]));
return null;
}
});
}Example 57
| Project: cascading-master File: S3HttpFileSystem.java View source code |
@Override
public FSDataOutputStream create(final Path path, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException {
if (!overwrite && exists(path))
throw new IOException("file already exists: " + path);
if (LOG.isDebugEnabled())
LOG.debug("creating file: " + path);
final ByteArrayOutputStream stream = new ByteArrayOutputStream();
final DigestOutputStream digestStream = new DigestOutputStream(stream, getMD5Digest());
return new FSDataOutputStream(digestStream, null) {
@Override
public void close() throws IOException {
super.close();
S3Object object = S3Util.getObject(s3Service, s3Bucket, path, S3Util.Request.CREATE_OBJECT);
// todo use 'binary/octet-stream'
object.setContentType("text/plain");
object.setMd5Hash(digestStream.getMessageDigest().digest());
// todo buffer to disk instead
byte[] bytes = stream.toByteArray();
object.setDataInputStream(new ByteArrayInputStream(bytes));
object.setContentLength(bytes.length);
if (LOG.isDebugEnabled())
LOG.debug("putting file: " + path);
S3Util.putObject(s3Service, s3Bucket, object);
}
};
}Example 58
| Project: celos-master File: HiveTableDeployer.java View source code |
private Path createTempHdfsFileForInsertion(FixTable fixTable, TestRun testRun) throws Exception {
Path pathToParent = new Path(testRun.getHdfsPrefix(), ".hive");
Path pathTo = new Path(pathToParent, UUID.randomUUID().toString());
FileSystem fileSystem = testRun.getCiContext().getFileSystem();
fileSystem.mkdirs(pathTo.getParent());
FSDataOutputStream outputStream = fileSystem.create(pathTo);
CSVWriter writer = new CSVWriter(new OutputStreamWriter(outputStream), '\t', CSVWriter.NO_QUOTE_CHARACTER);
for (FixTable.FixRow fixRow : fixTable.getRows()) {
List<String> rowData = Lists.newArrayList();
for (String colName : fixTable.getColumnNames()) {
rowData.add(fixRow.getCells().get(colName));
}
String[] dataArray = rowData.toArray(new String[rowData.size()]);
writer.writeNext(dataArray);
}
writer.close();
fileSystem.setPermission(pathToParent, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
fileSystem.setPermission(pathTo, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
return pathTo;
}Example 59
| Project: Cloud-Stenography-master File: TestCopyFiles.java View source code |
public void testPreserveOption() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster(conf, 2, true, null);
String nnUri = FileSystem.getDefaultUri(conf).toString();
FileSystem fs = FileSystem.get(URI.create(nnUri), conf);
{
//test preserving user
MyFile[] files = createFiles(URI.create(nnUri), "/srcdat");
FileStatus[] srcstat = getFileStatus(nnUri, "/srcdat", files);
for (int i = 0; i < srcstat.length; i++) {
fs.setOwner(srcstat[i].getPath(), "u" + i, null);
}
ToolRunner.run(new DistCp(conf), new String[] { "-pu", nnUri + "/srcdat", nnUri + "/destdat" });
assertTrue("Source and destination directories do not match.", checkFiles(nnUri, "/destdat", files));
FileStatus[] dststat = getFileStatus(nnUri, "/destdat", files);
for (int i = 0; i < dststat.length; i++) {
assertEquals("i=" + i, "u" + i, dststat[i].getOwner());
}
deldir(nnUri, "/destdat");
deldir(nnUri, "/srcdat");
}
{
//test preserving group
MyFile[] files = createFiles(URI.create(nnUri), "/srcdat");
FileStatus[] srcstat = getFileStatus(nnUri, "/srcdat", files);
for (int i = 0; i < srcstat.length; i++) {
fs.setOwner(srcstat[i].getPath(), null, "g" + i);
}
ToolRunner.run(new DistCp(conf), new String[] { "-pg", nnUri + "/srcdat", nnUri + "/destdat" });
assertTrue("Source and destination directories do not match.", checkFiles(nnUri, "/destdat", files));
FileStatus[] dststat = getFileStatus(nnUri, "/destdat", files);
for (int i = 0; i < dststat.length; i++) {
assertEquals("i=" + i, "g" + i, dststat[i].getGroup());
}
deldir(nnUri, "/destdat");
deldir(nnUri, "/srcdat");
}
{
//test preserving mode
MyFile[] files = createFiles(URI.create(nnUri), "/srcdat");
FileStatus[] srcstat = getFileStatus(nnUri, "/srcdat", files);
FsPermission[] permissions = new FsPermission[srcstat.length];
for (int i = 0; i < srcstat.length; i++) {
permissions[i] = new FsPermission((short) (i & 0666));
fs.setPermission(srcstat[i].getPath(), permissions[i]);
}
ToolRunner.run(new DistCp(conf), new String[] { "-pp", nnUri + "/srcdat", nnUri + "/destdat" });
assertTrue("Source and destination directories do not match.", checkFiles(nnUri, "/destdat", files));
FileStatus[] dststat = getFileStatus(nnUri, "/destdat", files);
for (int i = 0; i < dststat.length; i++) {
assertEquals("i=" + i, permissions[i], dststat[i].getPermission());
}
deldir(nnUri, "/destdat");
deldir(nnUri, "/srcdat");
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}Example 60
| Project: Cubert-master File: AvroUtils.java View source code |
public static void createFileIfNotExists(BlockSchema fileSchema, String path) throws IOException {
Configuration conf = new JobConf();
FileSystem fs = FileSystem.get(conf);
if (fs.exists(new Path(path)))
return;
Schema avroSchema = convertFromBlockSchema("CUBERT_MV_RECORD", fileSchema);
System.out.println("Creating avro file with schema = " + avroSchema);
GenericDatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<GenericRecord>(avroSchema);
DataFileWriter<GenericRecord> writer = new DataFileWriter<GenericRecord>(datumWriter);
FSDataOutputStream fout = FileSystem.create(fs, new Path(path), new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.READ_EXECUTE));
writer.create(avroSchema, fout);
writer.flush();
writer.close();
}Example 61
| Project: DistCpV2-0.20.203-master File: TestCopyCommitter.java View source code |
@Test
public void testPreserveStatus() {
TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config);
JobContext jobContext = new JobContext(taskAttemptContext.getConfiguration(), taskAttemptContext.getTaskAttemptID().getJobID());
Configuration conf = jobContext.getConfiguration();
String sourceBase;
String targetBase;
FileSystem fs = null;
try {
OutputCommitter committer = new CopyCommitter(null, taskAttemptContext);
fs = FileSystem.get(conf);
FsPermission sourcePerm = new FsPermission((short) 511);
FsPermission initialPerm = new FsPermission((short) 448);
sourceBase = TestDistCpUtils.createTestSetup(fs, sourcePerm);
targetBase = TestDistCpUtils.createTestSetup(fs, initialPerm);
DistCpOptions options = new DistCpOptions(Arrays.asList(new Path(sourceBase)), new Path("/out"));
options.preserve(FileAttribute.PERMISSION);
options.appendToConf(conf);
CopyListing listing = new GlobbedCopyListing(conf, CREDENTIALS);
Path listingFile = new Path("/tmp1/" + String.valueOf(rand.nextLong()));
listing.buildListing(listingFile, options);
conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, targetBase);
committer.commitJob(jobContext);
if (!checkDirectoryPermissions(fs, targetBase, sourcePerm)) {
Assert.fail("Permission don't match");
}
//Test for idempotent commit
committer.commitJob(jobContext);
if (!checkDirectoryPermissions(fs, targetBase, sourcePerm)) {
Assert.fail("Permission don't match");
}
} catch (IOException e) {
LOG.error("Exception encountered while testing for preserve status", e);
Assert.fail("Preserve status failure");
} finally {
TestDistCpUtils.delete(fs, "/tmp1");
}
}Example 62
| Project: drill-master File: TestCTTAS.java View source code |
@BeforeClass
public static void init() throws Exception {
MockUp<UUID> uuidMockUp = mockRandomUUID(session_id);
Properties testConfigurations = cloneDefaultTestConfigProperties();
testConfigurations.put(ExecConstants.DEFAULT_TEMPORARY_WORKSPACE, TEMP_SCHEMA);
updateTestCluster(1, DrillConfig.create(testConfigurations));
uuidMockUp.tearDown();
StoragePluginRegistry pluginRegistry = getDrillbitContext().getStorage();
FileSystemConfig pluginConfig = (FileSystemConfig) pluginRegistry.getPlugin(test_schema).getConfig();
pluginConfig.workspaces.put(temp2_wk, new WorkspaceConfig(TestUtilities.createTempDir(), true, null));
pluginRegistry.createOrUpdate(test_schema, pluginConfig, true);
fs = FileSystem.get(new Configuration());
expectedFolderPermission = new FsPermission(StorageStrategy.TEMPORARY.getFolderPermission());
expectedFilePermission = new FsPermission(StorageStrategy.TEMPORARY.getFilePermission());
}Example 63
| Project: flume-master File: TestUtil.java View source code |
@Override
public FileStatus getFileStatus(Path path) throws IOException {
File file = pathToFile(path);
if (!file.exists()) {
throw new FileNotFoundException("Can't find " + path);
}
// get close enough
short mod = 0;
if (file.canRead()) {
mod |= 0444;
}
if (file.canWrite()) {
mod |= 0200;
}
if (file.canExecute()) {
mod |= 0111;
}
ShimLoader.getHadoopShims();
return new FileStatus(file.length(), file.isDirectory(), 1, 1024, file.lastModified(), file.lastModified(), FsPermission.createImmutable(mod), "owen", "users", path);
}Example 64
| Project: guagua-master File: GuaguaSplitWriter.java View source code |
public static <T extends InputSplit> void createSplitFiles(Path jobSubmitDir, Configuration conf, FileSystem fs, T[] splits) throws IOException, InterruptedException {
FSDataOutputStream out = createFile(fs, JobSubmissionFiles.getJobSplitFile(jobSubmitDir), conf);
SplitMetaInfo[] info = writeNewSplits(conf, splits, out);
out.close();
writeJobSplitMetaInfo(fs, JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir), new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION), splitVersion, info);
}Example 65
| Project: hadoop-distcp-mr1-master File: TestCopyCommitter.java View source code |
@Test
public void testPreserveStatus() {
TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config);
JobContext jobContext = new JobContextImpl(taskAttemptContext.getConfiguration(), taskAttemptContext.getTaskAttemptID().getJobID());
Configuration conf = jobContext.getConfiguration();
String sourceBase;
String targetBase;
FileSystem fs = null;
try {
OutputCommitter committer = new CopyCommitter(null, taskAttemptContext);
fs = FileSystem.get(conf);
FsPermission sourcePerm = new FsPermission((short) 511);
FsPermission initialPerm = new FsPermission((short) 448);
sourceBase = TestDistCpUtils.createTestSetup(fs, sourcePerm);
targetBase = TestDistCpUtils.createTestSetup(fs, initialPerm);
DistCpOptions options = new DistCpOptions(Arrays.asList(new Path(sourceBase)), new Path("/out"));
options.preserve(FileAttribute.PERMISSION);
options.appendToConf(conf);
CopyListing listing = new GlobbedCopyListing(conf, CREDENTIALS);
Path listingFile = new Path("/tmp1/" + String.valueOf(rand.nextLong()));
listing.buildListing(listingFile, options);
conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, targetBase);
committer.commitJob(jobContext);
if (!checkDirectoryPermissions(fs, targetBase, sourcePerm)) {
Assert.fail("Permission don't match");
}
//Test for idempotent commit
committer.commitJob(jobContext);
if (!checkDirectoryPermissions(fs, targetBase, sourcePerm)) {
Assert.fail("Permission don't match");
}
} catch (IOException e) {
LOG.error("Exception encountered while testing for preserve status", e);
Assert.fail("Preserve status failure");
} finally {
TestDistCpUtils.delete(fs, "/tmp1");
}
}Example 66
| Project: hbase-master File: TestFSUtils.java View source code |
@Test
public void testPermMask() throws Exception {
// default fs permission
FsPermission defaultFsPerm = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
// 'hbase.data.umask.enable' is false. We will get default fs permission.
assertEquals(FsPermission.getFileDefault(), defaultFsPerm);
conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true);
// first check that we don't crash if we don't have perms set
FsPermission defaultStartPerm = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
// default 'hbase.data.umask'is 000, and this umask will be used when
// 'hbase.data.umask.enable' is true.
// Therefore we will not get the real fs default in this case.
// Instead we will get the starting point FULL_RWX_PERMISSIONS
assertEquals(new FsPermission(FSUtils.FULL_RWX_PERMISSIONS), defaultStartPerm);
conf.setStrings(HConstants.DATA_FILE_UMASK_KEY, "077");
// now check that we get the right perms
FsPermission filePerm = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
assertEquals(new FsPermission("700"), filePerm);
// then that the correct file is created
Path p = new Path("target" + File.separator + UUID.randomUUID().toString());
try {
FSDataOutputStream out = FSUtils.create(conf, fs, p, filePerm, null);
out.close();
FileStatus stat = fs.getFileStatus(p);
assertEquals(new FsPermission("700"), stat.getPermission());
// and then cleanup
} finally {
fs.delete(p, true);
}
}Example 67
| Project: HBase-Research-master File: SnapshotDescriptionUtils.java View source code |
/**
* Write the snapshot description into the working directory of a snapshot
* @param snapshot description of the snapshot being taken
* @param workingDir working directory of the snapshot
* @param fs {@link FileSystem} on which the snapshot should be taken
* @throws IOException if we can't reach the filesystem and the file cannot be cleaned up on
* failure
*/
public static void writeSnapshotInfo(SnapshotDescription snapshot, Path workingDir, FileSystem fs) throws IOException {
FsPermission perms = FSUtils.getFilePermissions(fs, fs.getConf(), HConstants.DATA_FILE_UMASK_KEY);
Path snapshotInfo = new Path(workingDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE);
try {
FSDataOutputStream out = FSUtils.create(fs, snapshotInfo, perms, true);
try {
snapshot.writeTo(out);
} finally {
out.close();
}
} catch (IOException e) {
if (!fs.delete(snapshotInfo, false)) {
String msg = "Couldn't delete snapshot info file: " + snapshotInfo;
LOG.error(msg);
throw new IOException(msg);
}
}
}Example 68
| Project: hindex-master File: SnapshotDescriptionUtils.java View source code |
/**
* Write the snapshot description into the working directory of a snapshot
* @param snapshot description of the snapshot being taken
* @param workingDir working directory of the snapshot
* @param fs {@link FileSystem} on which the snapshot should be taken
* @throws IOException if we can't reach the filesystem and the file cannot be cleaned up on
* failure
*/
public static void writeSnapshotInfo(SnapshotDescription snapshot, Path workingDir, FileSystem fs) throws IOException {
FsPermission perms = FSUtils.getFilePermissions(fs, fs.getConf(), HConstants.DATA_FILE_UMASK_KEY);
Path snapshotInfo = new Path(workingDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE);
try {
FSDataOutputStream out = FSUtils.create(fs, snapshotInfo, perms, true);
try {
snapshot.writeTo(out);
} finally {
out.close();
}
} catch (IOException e) {
if (!fs.delete(snapshotInfo, false)) {
String msg = "Couldn't delete snapshot info file: " + snapshotInfo;
LOG.error(msg);
throw new IOException(msg);
}
}
}Example 69
| Project: incubator-slider-master File: CoreFileSystem.java View source code |
/**
* Create the Slider cluster path for a named cluster and all its subdirs
* This is a directory; a mkdirs() operation is executed
* to ensure that it is there.
*
* @param instancePaths instance paths
* @throws IOException trouble
* @throws SliderException slider-specific exceptions
*/
public void createClusterDirectories(InstancePaths instancePaths) throws IOException, SliderException {
Path instanceDir = instancePaths.instanceDir;
verifyDirectoryNonexistent(instanceDir);
FsPermission clusterPerms = getInstanceDirectoryPermissions();
createWithPermissions(instanceDir, clusterPerms);
createWithPermissions(instancePaths.snapshotConfPath, clusterPerms);
createWithPermissions(instancePaths.generatedConfPath, clusterPerms);
createWithPermissions(instancePaths.historyPath, clusterPerms);
createWithPermissions(instancePaths.tmpPathAM, clusterPerms);
// Data Directory
String dataOpts = configuration.get(SliderXmlConfKeys.DATA_DIRECTORY_PERMISSIONS, SliderXmlConfKeys.DEFAULT_DATA_DIRECTORY_PERMISSIONS);
log.debug("Setting data directory permissions to {}", dataOpts);
createWithPermissions(instancePaths.dataPath, new FsPermission(dataOpts));
}Example 70
| Project: incubator-tajo-master File: TajoMaster.java View source code |
private void checkAndInitializeSystemDirectories() throws IOException {
// Get Tajo root dir
this.tajoRootPath = TajoConf.getTajoRootDir(systemConf);
LOG.info("Tajo Root Directory: " + tajoRootPath);
// Check and Create Tajo root dir
this.defaultFS = tajoRootPath.getFileSystem(systemConf);
systemConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, defaultFS.getUri().toString());
LOG.info("FileSystem (" + this.defaultFS.getUri() + ") is initialized.");
if (!defaultFS.exists(tajoRootPath)) {
defaultFS.mkdirs(tajoRootPath, new FsPermission(TAJO_ROOT_DIR_PERMISSION));
LOG.info("Tajo Root Directory '" + tajoRootPath + "' is created.");
}
// Check and Create system and system resource dir
Path systemPath = TajoConf.getSystemDir(systemConf);
if (!defaultFS.exists(systemPath)) {
defaultFS.mkdirs(systemPath, new FsPermission(SYSTEM_DIR_PERMISSION));
LOG.info("System dir '" + systemPath + "' is created");
}
Path systemResourcePath = TajoConf.getSystemResourceDir(systemConf);
if (!defaultFS.exists(systemResourcePath)) {
defaultFS.mkdirs(systemResourcePath, new FsPermission(SYSTEM_RESOURCE_DIR_PERMISSION));
LOG.info("System resource dir '" + systemResourcePath + "' is created");
}
// Get Warehouse dir
this.wareHousePath = TajoConf.getWarehouseDir(systemConf);
LOG.info("Tajo Warehouse dir: " + wareHousePath);
// Check and Create Warehouse dir
if (!defaultFS.exists(wareHousePath)) {
defaultFS.mkdirs(wareHousePath, new FsPermission(WAREHOUSE_DIR_PERMISSION));
LOG.info("Warehouse dir '" + wareHousePath + "' is created");
}
Path stagingPath = TajoConf.getStagingDir(systemConf);
LOG.info("Staging dir: " + wareHousePath);
if (!defaultFS.exists(stagingPath)) {
defaultFS.mkdirs(stagingPath, new FsPermission(STAGING_ROOTDIR_PERMISSION));
LOG.info("Staging dir '" + stagingPath + "' is created");
}
}Example 71
| Project: incubator-tez-master File: MiniTezCluster.java View source code |
@Override
public void serviceInit(Configuration conf) throws Exception {
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_TEZ_FRAMEWORK_NAME);
// blacklisting disabled to prevent scheduling issues
conf.setBoolean(TezConfiguration.TEZ_AM_NODE_BLACKLISTING_ENABLED, false);
if (conf.get(MRJobConfig.MR_AM_STAGING_DIR) == null) {
conf.set(MRJobConfig.MR_AM_STAGING_DIR, new File(getTestWorkDir(), "apps_staging_dir" + Path.SEPARATOR).getAbsolutePath());
}
if (conf.get(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC) == null) {
// nothing defined. set quick delete value
conf.setLong(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 0l);
}
File appJarLocalFile = new File(MiniTezCluster.APPJAR);
if (!appJarLocalFile.exists()) {
String message = "TezAppJar " + MiniTezCluster.APPJAR + " not found. Exiting.";
LOG.info(message);
throw new TezUncheckedException(message);
}
FileSystem fs = FileSystem.get(conf);
Path testRootDir = fs.makeQualified(new Path("target", getName() + "-tmpDir"));
Path appRemoteJar = new Path(testRootDir, "TezAppJar.jar");
// Copy AppJar and make it public.
Path appMasterJar = new Path(MiniTezCluster.APPJAR);
fs.copyFromLocalFile(appMasterJar, appRemoteJar);
fs.setPermission(appRemoteJar, new FsPermission("777"));
conf.set(TezConfiguration.TEZ_LIB_URIS, appRemoteJar.toUri().toString());
LOG.info("Set TEZ-LIB-URI to: " + conf.get(TezConfiguration.TEZ_LIB_URIS));
// VMEM monitoring disabled, PMEM monitoring enabled.
conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false);
conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false);
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "000");
try {
Path stagingPath = FileContext.getFileContext(conf).makeQualified(new Path(conf.get(MRJobConfig.MR_AM_STAGING_DIR)));
/*
* Re-configure the staging path on Windows if the file system is localFs.
* We need to use a absolute path that contains the drive letter. The unit
* test could run on a different drive than the AM. We can run into the
* issue that job files are localized to the drive where the test runs on,
* while the AM starts on a different drive and fails to find the job
* metafiles. Using absolute path can avoid this ambiguity.
*/
if (Path.WINDOWS) {
if (LocalFileSystem.class.isInstance(stagingPath.getFileSystem(conf))) {
conf.set(MRJobConfig.MR_AM_STAGING_DIR, new File(conf.get(MRJobConfig.MR_AM_STAGING_DIR)).getAbsolutePath());
}
}
FileContext fc = FileContext.getFileContext(stagingPath.toUri(), conf);
if (fc.util().exists(stagingPath)) {
LOG.info(stagingPath + " exists! deleting...");
fc.delete(stagingPath, true);
}
LOG.info("mkdir: " + stagingPath);
fc.mkdir(stagingPath, null, true);
//mkdir done directory as well
String doneDir = JobHistoryUtils.getConfiguredHistoryServerDoneDirPrefix(conf);
Path doneDirPath = fc.makeQualified(new Path(doneDir));
fc.mkdir(doneDirPath, null, true);
} catch (IOException e) {
throw new TezUncheckedException("Could not create staging directory. ", e);
}
conf.set(MRConfig.MASTER_ADDRESS, "test");
//configure the shuffle service in NM
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES, new String[] { ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID });
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID), ShuffleHandler.class, Service.class);
// Non-standard shuffle port
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);
conf.setClass(YarnConfiguration.NM_CONTAINER_EXECUTOR, DefaultContainerExecutor.class, ContainerExecutor.class);
// TestMRJobs is for testing non-uberized operation only; see TestUberAM
// for corresponding uberized tests.
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
super.serviceInit(conf);
}Example 72
| Project: jst-master File: JstormOnYarn.java View source code |
private void addToLocalResources(FileSystem fs, String fileSrcPath, String fileDstPath, String appId, Map<String, LocalResource> localResources, String resources) throws IOException {
String suffix = jstormClientContext.appName + JOYConstants.BACKLASH + appId + JOYConstants.BACKLASH + fileDstPath;
Path dst = new Path(fs.getHomeDirectory(), suffix);
if (fileSrcPath == null) {
FSDataOutputStream ostream = null;
try {
ostream = FileSystem.create(fs, dst, new FsPermission(JOYConstants.FS_PERMISSION));
ostream.writeUTF(resources);
} finally {
IOUtils.closeQuietly(ostream);
}
} else {
fs.copyFromLocalFile(new Path(fileSrcPath), dst);
}
FileStatus scFileStatus = fs.getFileStatus(dst);
LocalResource scRsrc = LocalResource.newInstance(ConverterUtils.getYarnUrlFromURI(dst.toUri()), LocalResourceType.FILE, LocalResourceVisibility.APPLICATION, scFileStatus.getLen(), scFileStatus.getModificationTime());
localResources.put(fileDstPath, scRsrc);
}Example 73
| Project: kiji-mapreduce-master File: KijiBulkLoad.java View source code |
/**
* Recursively sets the permissions to 777 on the HFiles. There is no built-in way in the
* Hadoop Java API to recursively set permissions on a directory, so we implement it here.
*
* @param path The Path to the directory to chmod.
* @throws IOException on IOException.
*/
private void recursiveGrantAllHFilePermissions(Path path) throws IOException {
FileSystem hdfs = path.getFileSystem(getConf());
// Set the permissions on the path itself.
hdfs.setPermission(path, FsPermission.createImmutable((short) 0777));
// Recurse into any files and directories in the path.
// We must use listStatus because listFiles does not list subdirectories.
FileStatus[] fileStatuses = hdfs.listStatus(path);
for (FileStatus fileStatus : fileStatuses) {
if (!fileStatus.getPath().equals(path)) {
recursiveGrantAllHFilePermissions(fileStatus.getPath());
}
}
}Example 74
| Project: knox-master File: ShellTest.java View source code |
private void testPutGetScript(String script) throws IOException, URISyntaxException {
setupLogging();
DistributedFileSystem fileSystem = miniDFSCluster.getFileSystem();
Path dir = new Path("/user/guest/example");
fileSystem.delete(dir, true);
fileSystem.mkdirs(dir, new FsPermission("777"));
fileSystem.setOwner(dir, "guest", "users");
Binding binding = new Binding();
binding.setProperty("gateway", driver.getClusterUrl());
URL readme = driver.getResourceUrl("README");
File file = new File(readme.toURI());
System.out.println(file.exists());
binding.setProperty("file", file.getAbsolutePath());
GroovyShell shell = new GroovyShell(binding);
shell.evaluate(driver.getResourceUrl(script).toURI());
String status = (String) binding.getProperty("status");
assertNotNull(status);
System.out.println(status);
String fetchedFile = (String) binding.getProperty("fetchedFile");
assertNotNull(fetchedFile);
System.out.println(fetchedFile);
assertThat(fetchedFile, containsString("README"));
}Example 75
| Project: kylo-master File: CreateHDFSFolder.java View source code |
/**
* @param context The context provides configuration properties from the processor
* @throws IOException in the event
* @see OnScheduled
*/
@OnScheduled
public void onScheduled(ProcessContext context) throws IOException {
super.abstractOnScheduled(context);
// Set umask once, to avoid thread safety issues doing it in onTrigger
final PropertyValue umaskProp = context.getProperty(UMASK);
final short dfsUmask = resolveUMask(umaskProp);
final Configuration conf = getConfiguration();
FsPermission.setUMask(conf, new FsPermission(dfsUmask));
}Example 76
| Project: logdb-master File: HDFSFilePath.java View source code |
@Override
public boolean canRead() throws SecurityException {
String username = System.getProperty("user.name");
FileStatus fs;
try {
fs = root.getFileSystem().getFileStatus(path);
} catch (IOException e) {
throw new IllegalStateException("Unexpected IOException", e);
}
FsPermission permission = fs.getPermission();
// TODO handle user group
FsAction action = (username.equals(fs.getOwner())) ? permission.getUserAction() : permission.getOtherAction();
return action.and(FsAction.READ).equals(FsAction.READ);
}Example 77
| Project: lumify-master File: ClientBase.java View source code |
private void addToLocalResources(FileSystem fs, Path remotePath, String fileSrcPath, String fileDstPath, Map<String, LocalResource> localResources, String resources) throws IOException {
Path dst = new Path(remotePath, fileDstPath);
if (fileSrcPath == null) {
FSDataOutputStream out = null;
try {
out = FileSystem.create(fs, dst, new FsPermission(FILE_PERMISSIONS));
out.writeUTF(resources);
} finally {
IOUtils.closeQuietly(out);
}
} else {
fs.copyFromLocalFile(new Path(fileSrcPath), dst);
}
FileStatus scFileStatus = fs.getFileStatus(dst);
LocalResource localResource = LocalResource.newInstance(ConverterUtils.getYarnUrlFromURI(dst.toUri()), LocalResourceType.FILE, LocalResourceVisibility.APPLICATION, scFileStatus.getLen(), scFileStatus.getModificationTime());
localResources.put(fileDstPath, localResource);
}Example 78
| Project: oozie-master File: TestFsActionExecutor.java View source code |
public void testChmod() throws Exception {
FsActionExecutor ae = new FsActionExecutor();
FileSystem fs = getFileSystem();
Path path = new Path(getFsTestCaseDir(), "dir");
Path child = new Path(path, "child");
Path grandchild = new Path(child, "grandchild");
fs.mkdirs(grandchild);
fs.setPermission(path, FsPermission.valueOf("-rwx------"));
fs.setPermission(child, FsPermission.valueOf("-rwxr-----"));
fs.setPermission(grandchild, FsPermission.valueOf("-rwx---r--"));
assertEquals("rwx------", fs.getFileStatus(path).getPermission().toString());
assertEquals("rwxr-----", fs.getFileStatus(child).getPermission().toString());
assertEquals("rwx---r--", fs.getFileStatus(grandchild).getPermission().toString());
Context context = createContext("<fs/>");
ae.chmod(context, path, "-rwx-----x", false);
assertEquals("rwx-----x", fs.getFileStatus(path).getPermission().toString());
assertEquals("rwxr-----", fs.getFileStatus(child).getPermission().toString());
assertEquals("rwx---r--", fs.getFileStatus(grandchild).getPermission().toString());
ae.chmod(context, path, "-rwxr----x", true);
assertEquals("rwxr----x", fs.getFileStatus(path).getPermission().toString());
assertEquals("rwxr----x", fs.getFileStatus(child).getPermission().toString());
assertEquals("rwx---r--", fs.getFileStatus(grandchild).getPermission().toString());
}Example 79
| Project: pbase-master File: TestFSUtils.java View source code |
@Test
public void testPermMask() throws Exception {
Configuration conf = HBaseConfiguration.create();
FileSystem fs = FileSystem.get(conf);
// default fs permission
FsPermission defaultFsPerm = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
// 'hbase.data.umask.enable' is false. We will get default fs permission.
assertEquals(FsPermission.getFileDefault(), defaultFsPerm);
conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true);
// first check that we don't crash if we don't have perms set
FsPermission defaultStartPerm = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
// default 'hbase.data.umask'is 000, and this umask will be used when
// 'hbase.data.umask.enable' is true.
// Therefore we will not get the real fs default in this case.
// Instead we will get the starting point FULL_RWX_PERMISSIONS
assertEquals(new FsPermission(FSUtils.FULL_RWX_PERMISSIONS), defaultStartPerm);
conf.setStrings(HConstants.DATA_FILE_UMASK_KEY, "077");
// now check that we get the right perms
FsPermission filePerm = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
assertEquals(new FsPermission("700"), filePerm);
// then that the correct file is created
Path p = new Path("target" + File.separator + UUID.randomUUID().toString());
try {
FSDataOutputStream out = FSUtils.create(fs, p, filePerm, null);
out.close();
FileStatus stat = fs.getFileStatus(p);
assertEquals(new FsPermission("700"), stat.getPermission());
// and then cleanup
} finally {
fs.delete(p, true);
}
}Example 80
| Project: prjvmac-master File: TestFsActionExecutor.java View source code |
public void testChmod() throws Exception {
FsActionExecutor ae = new FsActionExecutor();
FileSystem fs = getFileSystem();
Path path = new Path(getFsTestCaseDir(), "dir");
Path child = new Path(path, "child");
Path grandchild = new Path(child, "grandchild");
fs.mkdirs(grandchild);
fs.setPermission(path, FsPermission.valueOf("-rwx------"));
fs.setPermission(child, FsPermission.valueOf("-rwxr-----"));
fs.setPermission(grandchild, FsPermission.valueOf("-rwx---r--"));
assertEquals("rwx------", fs.getFileStatus(path).getPermission().toString());
assertEquals("rwxr-----", fs.getFileStatus(child).getPermission().toString());
assertEquals("rwx---r--", fs.getFileStatus(grandchild).getPermission().toString());
Context context = createContext("<fs/>");
ae.chmod(context, path, "-rwx-----x", false);
assertEquals("rwx-----x", fs.getFileStatus(path).getPermission().toString());
assertEquals("rwxr-----", fs.getFileStatus(child).getPermission().toString());
assertEquals("rwx---r--", fs.getFileStatus(grandchild).getPermission().toString());
ae.chmod(context, path, "-rwxr----x", true);
assertEquals("rwxr----x", fs.getFileStatus(path).getPermission().toString());
assertEquals("rwxr----x", fs.getFileStatus(child).getPermission().toString());
assertEquals("rwx---r--", fs.getFileStatus(grandchild).getPermission().toString());
}Example 81
| Project: Quatrain-MapReduce-master File: TestFileAppend2.java View source code |
/**
* Creates one file, writes a few bytes to it and then closed it.
* Reopens the same file for appending, write all blocks and then close.
* Verify that all data exists in file.
*/
public void testSimpleAppend() throws IOException {
Configuration conf = new Configuration();
if (simulatedStorage) {
conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
}
conf.setInt("dfs.datanode.handler.count", 50);
conf.setInt("dfs.datanode.handler.count", 50);
conf.setBoolean("dfs.support.append", true);
initBuffer(fileSize);
MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
FileSystem fs = cluster.getFileSystem();
try {
{
// test appending to a file.
// create a new file.
Path file1 = new Path("/simpleAppend.dat");
FSDataOutputStream stm = createFile(fs, file1, 1);
System.out.println("Created file simpleAppend.dat");
// write to file
// io.bytes.per.checksum bytes
int mid = 186;
System.out.println("Writing " + mid + " bytes to file " + file1);
stm.write(fileContents, 0, mid);
stm.close();
System.out.println("Wrote and Closed first part of file.");
// write to file
// io.bytes.per.checksum bytes
int mid2 = 607;
System.out.println("Writing " + mid + " bytes to file " + file1);
stm = fs.append(file1);
stm.write(fileContents, mid, mid2 - mid);
stm.close();
System.out.println("Wrote and Closed second part of file.");
// write the remainder of the file
stm = fs.append(file1);
// ensure getPos is set to reflect existing size of the file
assertTrue(stm.getPos() > 0);
System.out.println("Writing " + (fileSize - mid2) + " bytes to file " + file1);
stm.write(fileContents, mid2, fileSize - mid2);
System.out.println("Written second part of file");
stm.close();
System.out.println("Wrote and Closed second part of file.");
// verify that entire file is good
checkFullFile(fs, file1);
}
{
// test appending to an non-existing file.
FSDataOutputStream out = null;
try {
out = fs.append(new Path("/non-existing.dat"));
fail("Expected to have FileNotFoundException");
} catch (java.io.FileNotFoundException fnfe) {
System.out.println("Good: got " + fnfe);
fnfe.printStackTrace(System.out);
} finally {
IOUtils.closeStream(out);
}
}
{
// test append permission.
//set root to all writable
Path root = new Path("/");
fs.setPermission(root, new FsPermission((short) 0777));
fs.close();
// login as a different user
final UserGroupInformation superuser = UserGroupInformation.getCurrentUGI();
String username = "testappenduser";
String group = "testappendgroup";
assertFalse(superuser.getUserName().equals(username));
assertFalse(Arrays.asList(superuser.getGroupNames()).contains(group));
UnixUserGroupInformation appenduser = UnixUserGroupInformation.createImmutable(new String[] { username, group });
UnixUserGroupInformation.saveToConf(conf, UnixUserGroupInformation.UGI_PROPERTY_NAME, appenduser);
fs = FileSystem.get(conf);
// create a file
Path dir = new Path(root, getClass().getSimpleName());
Path foo = new Path(dir, "foo.dat");
FSDataOutputStream out = null;
int offset = 0;
try {
out = fs.create(foo);
int len = 10 + AppendTestUtil.nextInt(100);
out.write(fileContents, offset, len);
offset += len;
} finally {
IOUtils.closeStream(out);
}
// change dir and foo to minimal permissions.
fs.setPermission(dir, new FsPermission((short) 0100));
fs.setPermission(foo, new FsPermission((short) 0200));
// try append, should success
out = null;
try {
out = fs.append(foo);
int len = 10 + AppendTestUtil.nextInt(100);
out.write(fileContents, offset, len);
offset += len;
} finally {
IOUtils.closeStream(out);
}
// change dir and foo to all but no write on foo.
fs.setPermission(foo, new FsPermission((short) 0577));
fs.setPermission(dir, new FsPermission((short) 0777));
// try append, should fail
out = null;
try {
out = fs.append(foo);
fail("Expected to have AccessControlException");
} catch (AccessControlException ace) {
System.out.println("Good: got " + ace);
ace.printStackTrace(System.out);
} finally {
IOUtils.closeStream(out);
}
}
} catch (IOException e) {
System.out.println("Exception :" + e);
throw e;
} catch (Throwable e) {
System.out.println("Throwable :" + e);
e.printStackTrace();
throw new IOException("Throwable : " + e);
} finally {
fs.close();
cluster.shutdown();
}
}Example 82
| Project: tajo-cdh-master File: QueryMasterTask.java View source code |
/**
* It initializes the final output and staging directory and sets
* them to variables.
*/
private void initStagingDir() throws IOException {
String realUser;
String currentUser;
UserGroupInformation ugi;
ugi = UserGroupInformation.getLoginUser();
realUser = ugi.getShortUserName();
currentUser = UserGroupInformation.getCurrentUser().getShortUserName();
FileSystem defaultFS = TajoConf.getWarehouseDir(systemConf).getFileSystem(systemConf);
Path stagingDir = null;
Path outputDir = null;
try {
////////////////////////////////////////////
// Create Output Directory
////////////////////////////////////////////
stagingDir = new Path(TajoConf.getStagingDir(systemConf), queryId.toString());
if (defaultFS.exists(stagingDir)) {
throw new IOException("The staging directory '" + stagingDir + "' already exists");
}
defaultFS.mkdirs(stagingDir, new FsPermission(STAGING_DIR_PERMISSION));
FileStatus fsStatus = defaultFS.getFileStatus(stagingDir);
String owner = fsStatus.getOwner();
if (!owner.isEmpty() && !(owner.equals(currentUser) || owner.equals(realUser))) {
throw new IOException("The ownership on the user's query " + "directory " + stagingDir + " is not as expected. " + "It is owned by " + owner + ". The directory must " + "be owned by the submitter " + currentUser + " or " + "by " + realUser);
}
if (!fsStatus.getPermission().equals(STAGING_DIR_PERMISSION)) {
LOG.info("Permissions on staging directory " + stagingDir + " are " + "incorrect: " + fsStatus.getPermission() + ". Fixing permissions " + "to correct value " + STAGING_DIR_PERMISSION);
defaultFS.setPermission(stagingDir, new FsPermission(STAGING_DIR_PERMISSION));
}
// Create a subdirectories
defaultFS.mkdirs(new Path(stagingDir, TajoConstants.RESULT_DIR_NAME));
LOG.info("The staging dir '" + stagingDir + "' is created.");
queryContext.setStagingDir(stagingDir);
/////////////////////////////////////////////////
if (queryContext.hasOutputPath()) {
outputDir = queryContext.getOutputPath();
if (!queryContext.isOutputOverwrite()) {
if (defaultFS.exists(outputDir)) {
throw new IOException("The output directory '" + outputDir + " already exists.");
}
}
}
} catch (IOException ioe) {
if (stagingDir != null && defaultFS.exists(stagingDir)) {
defaultFS.delete(stagingDir, true);
LOG.info("The staging directory '" + stagingDir + "' is deleted");
}
throw ioe;
}
}Example 83
| Project: testcases-master File: HDFSTest.java View source code |
@org.junit.Test
public void defaultPermissionsTest() throws Exception {
FileSystem fileSystem = hdfsCluster.getFileSystem();
// Write a file
final Path file = new Path("/tmp/tmpdir/data-file2");
FSDataOutputStream out = fileSystem.create(file);
for (int i = 0; i < 1024; ++i) {
out.write(("data" + i + "\n").getBytes("UTF-8"));
out.flush();
}
out.close();
// Check status
// FileStatus status = fileSystem.getFileStatus(file);
// System.out.println("OWNER: " + status.getOwner());
// System.out.println("GROUP: " + status.getGroup());
// System.out.println("PERM: " + status.getPermission().toString());
// fileSystem.setPermission(file, new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE));
// fileSystem.setOwner(file, "bob", null);
// Now try to read the file as "bob" - this should be allowed
UserGroupInformation ugi = UserGroupInformation.createRemoteUser("bob");
ugi.doAs(new PrivilegedExceptionAction<Void>() {
public Void run() throws Exception {
Configuration conf = new Configuration();
conf.set("fs.defaultFS", defaultFs);
FileSystem fs = FileSystem.get(conf);
// Read the file
FSDataInputStream in = fs.open(file);
ByteArrayOutputStream output = new ByteArrayOutputStream();
IOUtils.copy(in, output);
String content = new String(output.toByteArray());
Assert.assertTrue(content.startsWith("data0"));
fs.close();
return null;
}
});
// Write to the file as the owner, this should be allowed
out = fileSystem.append(file);
out.write(("new data\n").getBytes("UTF-8"));
out.flush();
out.close();
// Now try to write to the file as "bob" - this should not be allowed
ugi.doAs(new PrivilegedExceptionAction<Void>() {
public Void run() throws Exception {
Configuration conf = new Configuration();
conf.set("fs.defaultFS", defaultFs);
FileSystem fs = FileSystem.get(conf);
// Write to the file
try {
fs.append(file);
Assert.fail("Failure expected on an incorrect permission");
} catch (AccessControlException ex) {
}
fs.close();
return null;
}
});
}Example 84
| Project: hbase-cache-master File: TestFSUtils.java View source code |
@Test
public void testPermMask() throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true);
FileSystem fs = FileSystem.get(conf);
// first check that we don't crash if we don't have perms set
FsPermission defaultPerms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
assertEquals(FsPermission.getDefault(), defaultPerms);
conf.setStrings(HConstants.DATA_FILE_UMASK_KEY, "077");
// now check that we get the right perms
FsPermission filePerm = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
assertEquals(new FsPermission("700"), filePerm);
// then that the correct file is created
Path p = new Path("target" + File.separator + UUID.randomUUID().toString());
try {
FSDataOutputStream out = FSUtils.create(fs, p, filePerm);
out.close();
FileStatus stat = fs.getFileStatus(p);
assertEquals(new FsPermission("700"), stat.getPermission());
// and then cleanup
} finally {
fs.delete(p, true);
}
}Example 85
| Project: hbase-trunk-mttr-master File: TestFSUtils.java View source code |
@Test
public void testPermMask() throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true);
FileSystem fs = FileSystem.get(conf);
// first check that we don't crash if we don't have perms set
FsPermission defaultPerms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
assertEquals(FsPermission.getDefault(), defaultPerms);
conf.setStrings(HConstants.DATA_FILE_UMASK_KEY, "077");
// now check that we get the right perms
FsPermission filePerm = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
assertEquals(new FsPermission("700"), filePerm);
// then that the correct file is created
Path p = new Path("target" + File.separator + UUID.randomUUID().toString());
try {
FSDataOutputStream out = FSUtils.create(fs, p, filePerm);
out.close();
FileStatus stat = fs.getFileStatus(p);
assertEquals(new FsPermission("700"), stat.getPermission());
// and then cleanup
} finally {
fs.delete(p, true);
}
}Example 86
| Project: ignite-master File: HadoopSecondaryFileSystemConfigurationTest.java View source code |
/**
* Perform actual check.
*
* @throws Exception If failed.
*/
@SuppressWarnings("deprecation")
private void check() throws Exception {
before();
try {
Path fsHome = new Path(primaryFsUri);
Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3");
Path file = new Path(dir, "someFile");
assertPathDoesNotExist(primaryFs, file);
FsPermission fsPerm = new FsPermission((short) 644);
FSDataOutputStream os = primaryFs.create(file, fsPerm, false, 1, (short) 1, 1L, null);
// Try to write something in file.
os.write("abc".getBytes());
os.close();
// Check file status.
FileStatus fileStatus = primaryFs.getFileStatus(file);
assertFalse(fileStatus.isDir());
assertEquals(file, fileStatus.getPath());
assertEquals(fsPerm, fileStatus.getPermission());
} finally {
after();
}
}Example 87
| Project: incubator-sentry-master File: HiveServerFactory.java View source code |
public static HiveServer create(HiveServer2Type type, Map<String, String> properties, File baseDir, File confDir, File logDir, String policyFile, FileSystem fileSystem) throws Exception {
if (type.equals(HiveServer2Type.UnmanagedHiveServer2)) {
LOGGER.info("Creating UnmanagedHiveServer");
return new UnmanagedHiveServer();
}
if (!properties.containsKey(WAREHOUSE_DIR)) {
LOGGER.info("fileSystem " + fileSystem.getClass().getSimpleName());
if (fileSystem instanceof DistributedFileSystem) {
@SuppressWarnings("static-access") String dfsUri = fileSystem.getDefaultUri(fileSystem.getConf()).toString();
LOGGER.info("dfsUri " + dfsUri);
properties.put(WAREHOUSE_DIR, dfsUri + "/data");
fileSystem.mkdirs(new Path("/data/"), new FsPermission((short) 0777));
} else {
properties.put(WAREHOUSE_DIR, new File(baseDir, "warehouse").getPath());
fileSystem.mkdirs(new Path("/", "warehouse"), new FsPermission((short) 0777));
}
}
Boolean policyOnHDFS = Boolean.valueOf(System.getProperty("sentry.e2etest.policyonhdfs", "false"));
if (policyOnHDFS) {
// Initialize "hive.exec.scratchdir", according the description of
// "hive.exec.scratchdir", the permission should be (733).
// <description>HDFS root scratch dir for Hive jobs which gets created with write
// all (733) permission. For each connecting user, an HDFS scratch dir:
// ${hive.exec.scratchdir}/<username> is created,
// with ${hive.scratch.dir.permission}.</description>
fileSystem.mkdirs(new Path("/tmp/hive/"));
fileSystem.setPermission(new Path("/tmp/hive/"), new FsPermission((short) 0733));
} else {
LOGGER.info("Setting an readable path to hive.exec.scratchdir");
properties.put("hive.exec.scratchdir", new File(baseDir, "scratchdir").getPath());
}
if (!properties.containsKey(METASTORE_CONNECTION_URL)) {
properties.put(METASTORE_CONNECTION_URL, String.format("jdbc:derby:;databaseName=%s;create=true", new File(baseDir, "metastore").getPath()));
}
if (!properties.containsKey(ACCESS_TESTING_MODE)) {
properties.put(ACCESS_TESTING_MODE, "true");
}
if (!properties.containsKey(AUTHZ_PROVIDER_RESOURCE)) {
LOGGER.info("Policy File location: " + policyFile);
properties.put(AUTHZ_PROVIDER_RESOURCE, policyFile);
}
if (!properties.containsKey(AUTHZ_PROVIDER)) {
properties.put(AUTHZ_PROVIDER, LocalGroupResourceAuthorizationProvider.class.getName());
}
if (!properties.containsKey(AUTHZ_SERVER_NAME)) {
properties.put(AUTHZ_SERVER_NAME, DEFAULT_AUTHZ_SERVER_NAME);
}
if (!properties.containsKey(HS2_PORT)) {
properties.put(HS2_PORT, String.valueOf(findPort()));
}
if (!properties.containsKey(SUPPORT_CONCURRENCY)) {
properties.put(SUPPORT_CONCURRENCY, "false");
}
if (!properties.containsKey(HADOOPBIN)) {
properties.put(HADOOPBIN, "./target/test-classes/hadoop");
}
// Modify the test resource to have executable permission
java.nio.file.Path hadoopPath = FileSystems.getDefault().getPath("target/test-classes", "hadoop");
if (hadoopPath != null) {
hadoopPath.toFile().setExecutable(true);
}
properties.put(METASTORE_RAW_STORE_IMPL, "org.apache.sentry.binding.metastore.AuthorizingObjectStore");
if (!properties.containsKey(METASTORE_URI) && HiveServer2Type.InternalMetastore.equals(type)) {
// The configuration sentry.metastore.service.users is for the user who
// has all access to get the metadata.
properties.put(METASTORE_BYPASS, "accessAllMetaUser");
properties.put(METASTORE_URI, "thrift://localhost:" + String.valueOf(findPort()));
if (!properties.containsKey(METASTORE_HOOK)) {
properties.put(METASTORE_HOOK, "org.apache.sentry.binding.metastore.MetastoreAuthzBinding");
}
properties.put(ConfVars.METASTORESERVERMINTHREADS.varname, "5");
}
// set the SentryMetaStoreFilterHook for HiveServer2 only, not for metastore
if (!HiveServer2Type.InternalMetastore.equals(type)) {
properties.put(ConfVars.METASTORE_FILTER_HOOK.varname, org.apache.sentry.binding.metastore.SentryMetaStoreFilterHook.class.getName());
}
if (!properties.containsKey(METASTORE_BYPASS)) {
properties.put(METASTORE_BYPASS, "hive,impala," + System.getProperty("user.name", ""));
} else {
String tempByPass = properties.get(METASTORE_BYPASS);
tempByPass = "hive,impala," + System.getProperty("user.name", "") + "," + tempByPass;
properties.put(METASTORE_BYPASS, tempByPass);
}
properties.put(METASTORE_SETUGI, "true");
properties.put(METASTORE_CLIENT_TIMEOUT, "100");
properties.put(ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS.varname, "true");
properties.put(ConfVars.HIVESTATSAUTOGATHER.varname, "false");
properties.put(ConfVars.HIVE_STATS_COLLECT_SCANCOLS.varname, "true");
String hadoopBinPath = properties.get(HADOOPBIN);
Assert.assertNotNull(hadoopBinPath, "Hadoop Bin");
File hadoopBin = new File(hadoopBinPath);
if (!hadoopBin.isFile()) {
Assert.fail("Path to hadoop bin " + hadoopBin.getPath() + " is invalid. " + "Perhaps you missed the download-hadoop profile.");
}
/*
* This hack, setting the hiveSiteURL field removes a previous hack involving
* setting of system properties for each property. Although both are hacks,
* I prefer this hack because once the system properties are set they can
* affect later tests unless those tests clear them. This hack allows for
* a clean switch to a new set of defaults when a new HiveConf object is created.
*/
Reflection.staticField("hiveSiteURL").ofType(URL.class).in(HiveConf.class).set(null);
HiveConf hiveConf = new HiveConf();
HiveAuthzConf authzConf = new HiveAuthzConf(Resources.getResource("sentry-site.xml"));
for (Map.Entry<String, String> entry : properties.entrySet()) {
LOGGER.info(entry.getKey() + " => " + entry.getValue());
hiveConf.set(entry.getKey(), entry.getValue());
authzConf.set(entry.getKey(), entry.getValue());
}
File hiveSite = new File(confDir, "hive-site.xml");
File accessSite = new File(confDir, HiveAuthzConf.AUTHZ_SITE_FILE);
OutputStream out = new FileOutputStream(accessSite);
authzConf.writeXml(out);
out.close();
// points hive-site.xml at access-site.xml
hiveConf.set(HiveAuthzConf.HIVE_SENTRY_CONF_URL, "file:///" + accessSite.getPath());
if (!properties.containsKey(HiveConf.ConfVars.HIVE_SERVER2_SESSION_HOOK.varname)) {
hiveConf.set(HiveConf.ConfVars.HIVE_SERVER2_SESSION_HOOK.varname, "org.apache.sentry.binding.hive.HiveAuthzBindingSessionHook");
}
hiveConf.set(HIVESERVER2_IMPERSONATION, "false");
out = new FileOutputStream(hiveSite);
hiveConf.writeXml(out);
out.close();
Reflection.staticField("hiveSiteURL").ofType(URL.class).in(HiveConf.class).set(hiveSite.toURI().toURL());
switch(type) {
case EmbeddedHiveServer2:
LOGGER.info("Creating EmbeddedHiveServer");
return new EmbeddedHiveServer();
case InternalHiveServer2:
LOGGER.info("Creating InternalHiveServer");
return new InternalHiveServer(hiveConf);
case InternalMetastore:
LOGGER.info("Creating InternalMetastoreServer");
return new InternalMetastoreServer(hiveConf);
case ExternalHiveServer2:
LOGGER.info("Creating ExternalHiveServer");
return new ExternalHiveServer(hiveConf, confDir, logDir);
default:
throw new UnsupportedOperationException(type.name());
}
}Example 88
| Project: spring-hadoop-master File: AbstractROFsShellTest.java View source code |
@Test
public void testChmod() throws Exception {
String name = "local/" + UUID.randomUUID() + ".txt";
Resource res = TestUtils.writeToFS(cfg, name);
name = res.getURI().getPath();
FsPermission perm = hadoopFs.getFileStatus(new Path(name)).getPermission();
assertTrue(perm.getGroupAction().implies(FsAction.READ));
assertTrue(perm.getOtherAction().implies(FsAction.READ));
shell.chmod("600", name);
perm = hadoopFs.getFileStatus(new Path(name)).getPermission();
assertTrue(perm.getUserAction().equals(FsAction.READ_WRITE));
assertTrue(perm.getGroupAction().implies(FsAction.NONE));
assertTrue(perm.getOtherAction().implies(FsAction.NONE));
}Example 89
| Project: tachyon-rdma-master File: TFS.java View source code |
@Override
public FSDataOutputStream create(Path cPath, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException {
LOG.info("create(" + cPath + ", " + permission + ", " + overwrite + ", " + bufferSize + ", " + replication + ", " + blockSize + ", " + progress + ")");
if (!CommonConf.get().ASYNC_ENABLED) {
String path = Utils.getPathWithoutScheme(cPath);
if (mTFS.exist(path)) {
if (!mTFS.delete(path, false)) {
throw new IOException("Failed to delete existing data " + cPath);
}
}
int fileId = mTFS.createFile(path, blockSize);
TachyonFile file = mTFS.getFile(fileId);
file.setUFSConf(getConf());
return new FSDataOutputStream(file.getOutStream(WriteType.CACHE_THROUGH), null);
}
if (cPath.toString().contains(FIRST_COM_PATH) && !cPath.toString().contains("SUCCESS")) {
String path = Utils.getPathWithoutScheme(cPath);
mTFS.createFile(path, blockSize);
path = path.substring(path.indexOf(FIRST_COM_PATH) + FIRST_COM_PATH.length());
path = path.substring(0, path.indexOf(Constants.PATH_SEPARATOR));
int depId = Integer.parseInt(path);
LOG.info("create(" + cPath + ") : " + path + " " + depId);
path = Utils.getPathWithoutScheme(cPath);
path = path.substring(path.indexOf("part-") + 5);
int index = Integer.parseInt(path);
ClientDependencyInfo info = mTFS.getClientDependencyInfo(depId);
int fileId = info.getChildren().get(index);
LOG.info("create(" + cPath + ") : " + path + " " + index + " " + info + " " + fileId);
TachyonFile file = mTFS.getFile(fileId);
file.setUFSConf(getConf());
// }
return new FSDataOutputStream(file.getOutStream(WriteType.ASYNC_THROUGH), null);
}
if (cPath.toString().contains(RECOMPUTE_PATH) && !cPath.toString().contains("SUCCESS")) {
String path = Utils.getPathWithoutScheme(cPath);
mTFS.createFile(path, blockSize);
path = path.substring(path.indexOf(RECOMPUTE_PATH) + RECOMPUTE_PATH.length());
path = path.substring(0, path.indexOf(Constants.PATH_SEPARATOR));
int depId = Integer.parseInt(path);
LOG.info("create(" + cPath + ") : " + path + " " + depId);
path = Utils.getPathWithoutScheme(cPath);
path = path.substring(path.indexOf("part-") + 5);
int index = Integer.parseInt(path);
ClientDependencyInfo info = mTFS.getClientDependencyInfo(depId);
int fileId = info.getChildren().get(index);
LOG.info("create(" + cPath + ") : " + path + " " + index + " " + info + " " + fileId);
TachyonFile file = mTFS.getFile(fileId);
file.setUFSConf(getConf());
// }
return new FSDataOutputStream(file.getOutStream(WriteType.ASYNC_THROUGH), null);
} else {
String path = Utils.getPathWithoutScheme(cPath);
int fileId;
WriteType type = WriteType.CACHE_THROUGH;
if (mTFS.exist(path)) {
fileId = mTFS.getFileId(path);
type = WriteType.MUST_CACHE;
} else {
fileId = mTFS.createFile(path, blockSize);
}
TachyonFile file = mTFS.getFile(fileId);
file.setUFSConf(getConf());
// }
return new FSDataOutputStream(file.getOutStream(type), null);
}
}Example 90
| Project: tez-master File: MiniTezCluster.java View source code |
@Override
public void serviceInit(Configuration conf) throws Exception {
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_TEZ_FRAMEWORK_NAME);
// Use libs from cluster since no build is available
conf.setBoolean(TezConfiguration.TEZ_USE_CLUSTER_HADOOP_LIBS, true);
// blacklisting disabled to prevent scheduling issues
conf.setBoolean(TezConfiguration.TEZ_AM_NODE_BLACKLISTING_ENABLED, false);
if (conf.get(MRJobConfig.MR_AM_STAGING_DIR) == null) {
conf.set(MRJobConfig.MR_AM_STAGING_DIR, new File(getTestWorkDir(), "apps_staging_dir" + Path.SEPARATOR).getAbsolutePath());
}
if (conf.get(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC) == null) {
// nothing defined. set quick delete value
conf.setLong(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 0l);
}
maxTimeToWaitForAppsOnShutdown = conf.getLong(TezConfiguration.TEZ_TEST_MINI_CLUSTER_APP_WAIT_ON_SHUTDOWN_SECS, TezConfiguration.TEZ_TEST_MINI_CLUSTER_APP_WAIT_ON_SHUTDOWN_SECS_DEFAULT);
File appJarLocalFile = new File(MiniTezCluster.APPJAR);
if (!appJarLocalFile.exists()) {
String message = "TezAppJar " + MiniTezCluster.APPJAR + " not found. Exiting.";
LOG.info(message);
throw new TezUncheckedException(message);
} else {
LOG.info("Using Tez AppJar: " + appJarLocalFile.getAbsolutePath());
}
FileSystem fs = FileSystem.get(conf);
Path testRootDir = fs.makeQualified(new Path("target", getName() + "-tmpDir"));
Path appRemoteJar = new Path(testRootDir, "TezAppJar.jar");
// Copy AppJar and make it public.
Path appMasterJar = new Path(MiniTezCluster.APPJAR);
fs.copyFromLocalFile(appMasterJar, appRemoteJar);
fs.setPermission(appRemoteJar, new FsPermission("777"));
conf.set(TezConfiguration.TEZ_LIB_URIS, appRemoteJar.toUri().toString());
LOG.info("Set TEZ-LIB-URI to: " + conf.get(TezConfiguration.TEZ_LIB_URIS));
// VMEM monitoring disabled, PMEM monitoring enabled.
conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false);
conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false);
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "000");
try {
Path stagingPath = FileContext.getFileContext(conf).makeQualified(new Path(conf.get(MRJobConfig.MR_AM_STAGING_DIR)));
/*
* Re-configure the staging path on Windows if the file system is localFs.
* We need to use a absolute path that contains the drive letter. The unit
* test could run on a different drive than the AM. We can run into the
* issue that job files are localized to the drive where the test runs on,
* while the AM starts on a different drive and fails to find the job
* metafiles. Using absolute path can avoid this ambiguity.
*/
if (Path.WINDOWS) {
if (LocalFileSystem.class.isInstance(stagingPath.getFileSystem(conf))) {
conf.set(MRJobConfig.MR_AM_STAGING_DIR, new File(conf.get(MRJobConfig.MR_AM_STAGING_DIR)).getAbsolutePath());
}
}
FileContext fc = FileContext.getFileContext(stagingPath.toUri(), conf);
if (fc.util().exists(stagingPath)) {
LOG.info(stagingPath + " exists! deleting...");
fc.delete(stagingPath, true);
}
LOG.info("mkdir: " + stagingPath);
fc.mkdir(stagingPath, null, true);
//mkdir done directory as well
String doneDir = JobHistoryUtils.getConfiguredHistoryServerDoneDirPrefix(conf);
Path doneDirPath = fc.makeQualified(new Path(doneDir));
fc.mkdir(doneDirPath, null, true);
} catch (IOException e) {
throw new TezUncheckedException("Could not create staging directory. ", e);
}
conf.set(MRConfig.MASTER_ADDRESS, "test");
//configure the shuffle service in NM
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES, new String[] { ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID });
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID), ShuffleHandler.class, Service.class);
// Non-standard shuffle port
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);
conf.setClass(YarnConfiguration.NM_CONTAINER_EXECUTOR, DefaultContainerExecutor.class, ContainerExecutor.class);
// TestMRJobs is for testing non-uberized operation only; see TestUberAM
// for corresponding uberized tests.
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
super.serviceInit(conf);
}Example 91
| Project: yaya-master File: InJvmContainerExecutor.java View source code |
/**
* Most of this code is copied from the super class's launchContainer method (unfortunately), since directory
* and other preparation logic is tightly coupled with the actual container launch.
* Would be nice if it was broken apart where launch method would be invoked when
* everything is prepared
*/
private void prepareContainerDirectories(Container container, Path nmPrivateContainerScriptPath, Path nmPrivateTokensPath, String userName, String appId, Path containerWorkDir, List<String> localDirs, List<String> logDirs) {
FsPermission dirPerm = new FsPermission(APPDIR_PERM);
ContainerId containerId = container.getContainerId();
String containerIdStr = ConverterUtils.toString(containerId);
String appIdStr = ConverterUtils.toString(containerId.getApplicationAttemptId().getApplicationId());
try {
for (String sLocalDir : localDirs) {
Path usersdir = new Path(sLocalDir, ContainerLocalizer.USERCACHE);
Path userdir = new Path(usersdir, userName);
Path appCacheDir = new Path(userdir, ContainerLocalizer.APPCACHE);
Path appDir = new Path(appCacheDir, appIdStr);
Path containerDir = new Path(appDir, containerIdStr);
createDir(containerDir, dirPerm, true);
}
// Create the container log-dirs on all disks
this.createLogDirs(appIdStr, containerIdStr, logDirs);
Path tmpDir = new Path(containerWorkDir, YarnConfiguration.DEFAULT_CONTAINER_TEMP_DIR);
createDir(tmpDir, dirPerm, false);
// copy launch script to work dir
Path launchDst = new Path(containerWorkDir, ContainerLaunch.CONTAINER_SCRIPT);
fc.util().copy(nmPrivateContainerScriptPath, launchDst);
// copy container tokens to work dir
Path tokenDst = new Path(containerWorkDir, ContainerLaunch.FINAL_CONTAINER_TOKENS_FILE);
fc.util().copy(nmPrivateTokensPath, tokenDst);
} catch (Exception e) {
throw new IllegalStateException("Failed to prepare container directories for container " + container, e);
}
}Example 92
| Project: hadoop-analyzer-master File: DistCh.java View source code |
private boolean isDifferent(FileStatus original) {
if (owner != null && !owner.equals(original.getOwner())) {
return true;
}
if (group != null && !group.equals(original.getGroup())) {
return true;
}
if (permission != null) {
FsPermission orig = original.getPermission();
return original.isDir() ? !permission.equals(orig) : !permission.applyUMask(FILE_UMASK).equals(orig);
}
return false;
}Example 93
| Project: HiveRunner-master File: StandaloneHiveRunner.java View source code |
/**
* Drives the unit test.
*/
private void evaluateStatement(Object target, TemporaryFolder temporaryFolder, Statement base) throws Throwable {
container = null;
FileUtil.setPermission(temporaryFolder.getRoot(), FsPermission.getDirDefault());
try {
LOGGER.info("Setting up {} in {}", getName(), temporaryFolder.getRoot().getAbsolutePath());
container = createHiveServerContainer(target, temporaryFolder);
base.evaluate();
} finally {
tearDown();
}
}Example 94
| Project: mammoth-master File: LinuxTaskController.java View source code |
@Override
public void truncateLogsAsUser(String user, List<Task> allAttempts) throws IOException {
Task firstTask = allAttempts.get(0);
String taskid = firstTask.getTaskID().toString();
LocalDirAllocator ldirAlloc = new LocalDirAllocator(JobConf.MAPRED_LOCAL_DIR_PROPERTY);
String taskRanFile = TaskTracker.TT_LOG_TMP_DIR + Path.SEPARATOR + taskid;
Configuration conf = getConf();
//write the serialized task information to a file to pass to the truncater
Path taskRanFilePath = ldirAlloc.getLocalPathForWrite(taskRanFile, conf);
LocalFileSystem lfs = FileSystem.getLocal(conf);
FSDataOutputStream out = lfs.create(taskRanFilePath);
out.writeInt(allAttempts.size());
for (Task t : allAttempts) {
out.writeBoolean(t.isMapTask());
t.write(out);
}
out.close();
lfs.setPermission(taskRanFilePath, FsPermission.createImmutable((short) 0755));
List<String> command = new ArrayList<String>();
File // use same jvm as parent
jvm = new File(new File(System.getProperty("java.home"), "bin"), "java");
command.add(jvm.toString());
command.add("-Djava.library.path=" + System.getProperty("java.library.path"));
command.add("-Dhadoop.log.dir=" + TaskLog.getBaseLogDir());
command.add("-Dhadoop.root.logger=INFO,console");
command.add("-classpath");
command.add(System.getProperty("java.class.path"));
// main of TaskLogsTruncater
command.add(TaskLogsTruncater.class.getName());
command.add(taskRanFilePath.toString());
String[] taskControllerCmd = new String[4 + command.size()];
taskControllerCmd[0] = taskControllerExe;
taskControllerCmd[1] = user;
taskControllerCmd[2] = localStorage.getDirsString();
taskControllerCmd[3] = Integer.toString(Commands.RUN_COMMAND_AS_USER.getValue());
int i = 4;
for (String cmdArg : command) {
taskControllerCmd[i++] = cmdArg;
}
if (LOG.isDebugEnabled()) {
for (String cmd : taskControllerCmd) {
LOG.debug("taskctrl command = " + cmd);
}
}
ShellCommandExecutor shExec = new ShellCommandExecutor(taskControllerCmd);
try {
shExec.execute();
} catch (Exception e) {
LOG.warn("Exit code from " + taskControllerExe.toString() + " is : " + shExec.getExitCode() + " for truncateLogs");
LOG.warn("Exception thrown by " + taskControllerExe.toString() + " : " + StringUtils.stringifyException(e));
LOG.info("Output from LinuxTaskController's " + taskControllerExe.toString() + " follows:");
logOutput(shExec.getOutput());
lfs.delete(taskRanFilePath, false);
throw new IOException(e);
}
lfs.delete(taskRanFilePath, false);
if (LOG.isDebugEnabled()) {
LOG.info("Output from LinuxTaskController's " + taskControllerExe.toString() + " follows:");
logOutput(shExec.getOutput());
}
}Example 95
| Project: NetApp-Hadoop-NFS-Connector-master File: NFSv3AbstractFilesystem.java View source code |
@Override
public FSDataOutputStream createInternal(Path f, EnumSet<CreateFlag> flag, FsPermission absolutePermission, int bufferSize, short replication, long blockSize, Progressable progress, ChecksumOpt checksumOpt, boolean createParent) throws AccessControlException, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, UnsupportedFileSystemException, UnresolvedLinkException, IOException {
LOG.debug("createInternal(): path=" + f + " flag=" + flag + " permission=" + absolutePermission + " buffersize=" + bufferSize + "replication=" + replication + " blocksize=" + blockSize);
checkPath(f);
// FSs that implement permissions should override this.
if (// parent must exist.
!createParent) {
// since this.create makes parent dirs automatically
// we must throw exception if parent does not exist.
final FileStatus stat = getFileStatus(f.getParent());
if (stat == null) {
throw new FileNotFoundException("Missing parent:" + f);
}
if (!stat.isDirectory()) {
throw new ParentNotDirectoryException("parent is not a dir:" + f);
}
// parent does exist - go ahead with create of file.
}
FSDataOutputStream stream = fsImpl.create(f, absolutePermission, flag, bufferSize, replication, blockSize, progress, checksumOpt);
LOG.debug("createInternal(): fsImpl returned status=" + stream);
return stream;
}Example 96
| Project: RadosFs-master File: RadosFileSystem.java View source code |
/**
* @param permission Currently ignored.
*/
@Override
public boolean mkdirs(Path path, FsPermission permission) throws IOException {
Path absolutePath = makeAbsolute(path);
List<Path> paths = new ArrayList<Path>();
do {
paths.add(0, absolutePath);
absolutePath = absolutePath.getParent();
} while (absolutePath != null);
boolean result = true;
for (Path p : paths) {
result &= mkdir(p);
}
return result;
}Example 97
| Project: camus-master File: CamusSweeper.java View source code |
private static String getUmask(Configuration conf) {
if (conf.get(FsPermission.UMASK_LABEL) != null && conf.get(FsPermission.DEPRECATED_UMASK_LABEL) != null) {
log.warn(String.format("Both umask labels exist: %s=%s, %s=%s", FsPermission.UMASK_LABEL, conf.get(FsPermission.UMASK_LABEL), FsPermission.DEPRECATED_UMASK_LABEL, conf.get(FsPermission.DEPRECATED_UMASK_LABEL)));
return conf.get(FsPermission.UMASK_LABEL);
} else if (conf.get(FsPermission.UMASK_LABEL) != null) {
log.info(String.format("umask set: %s=%s", FsPermission.UMASK_LABEL, conf.get(FsPermission.UMASK_LABEL)));
return conf.get(FsPermission.UMASK_LABEL);
} else if (conf.get(FsPermission.DEPRECATED_UMASK_LABEL) != null) {
log.info(String.format("umask set: %s=%s", FsPermission.DEPRECATED_UMASK_LABEL, conf.get(FsPermission.DEPRECATED_UMASK_LABEL)));
return conf.get(FsPermission.DEPRECATED_UMASK_LABEL);
} else {
log.info("umask unset");
return "undefined";
}
}Example 98
| Project: cdap-master File: SFTPFileSystem.java View source code |
/**
* Convert the file information in LsEntry to a {@link FileStatus} object. *
*
* @param sftpFile
* @param parentPath
* @return file status
* @throws IOException
*/
private FileStatus getFileStatus(ChannelSftp channel, LsEntry sftpFile, Path parentPath) throws IOException {
SftpATTRS attr = sftpFile.getAttrs();
long length = attr.getSize();
boolean isDir = attr.isDir();
boolean isLink = attr.isLink();
if (isLink) {
String link = parentPath.toUri().getPath() + "/" + sftpFile.getFilename();
try {
link = channel.realpath(link);
Path linkParent = new Path("/", link);
FileStatus fstat = getFileStatus(channel, linkParent);
isDir = fstat.isDirectory();
length = fstat.getLen();
} catch (Exception e) {
throw new IOException(e);
}
}
int blockReplication = 1;
// Using default block size since there is no way in SFTP channel to know of
// block sizes on server. The assumption could be less than ideal.
long blockSize = DEFAULT_BLOCK_SIZE;
// convert to milliseconds
long modTime = attr.getMTime() * 1000;
long accessTime = 0;
FsPermission permission = getPermissions(sftpFile);
// not be able to get the real user group name, just use the user and group
// id
String user = Integer.toString(attr.getUId());
String group = Integer.toString(attr.getGId());
Path filePath = new Path(parentPath, sftpFile.getFilename());
return new FileStatus(length, isDir, blockReplication, blockSize, modTime, accessTime, permission, user, group, filePath.makeQualified(this.getUri(), this.getWorkingDirectory()));
}Example 99
| Project: hoya-master File: HoyaFileSystem.java View source code |
/**
* Create the Hoya cluster path for a named cluster and all its subdirs
* This is a directory; a mkdirs() operation is executed
* to ensure that it is there.
*
* @param clustername name of the cluster
* @return the path to the cluster directory
* @throws java.io.IOException trouble
* @throws org.apache.hoya.exceptions.HoyaException hoya-specific exceptions
*/
public Path createClusterDirectories(String clustername, Configuration conf) throws IOException, HoyaException {
Path clusterDirectory = buildHoyaClusterDirPath(clustername);
Path snapshotConfPath = new Path(clusterDirectory, HoyaKeys.SNAPSHOT_CONF_DIR_NAME);
Path generatedConfPath = new Path(clusterDirectory, HoyaKeys.GENERATED_CONF_DIR_NAME);
Path historyPath = new Path(clusterDirectory, HoyaKeys.HISTORY_DIR_NAME);
String clusterDirPermsOct = conf.get(HoyaXmlConfKeys.HOYA_CLUSTER_DIRECTORY_PERMISSIONS, HoyaXmlConfKeys.DEFAULT_HOYA_CLUSTER_DIRECTORY_PERMISSIONS);
FsPermission clusterPerms = new FsPermission(clusterDirPermsOct);
verifyClusterDirectoryNonexistent(clustername, clusterDirectory);
createWithPermissions(clusterDirectory, clusterPerms);
createWithPermissions(snapshotConfPath, clusterPerms);
createWithPermissions(generatedConfPath, clusterPerms);
createWithPermissions(historyPath, clusterPerms);
// Data Directory
Path datapath = new Path(clusterDirectory, HoyaKeys.DATA_DIR_NAME);
String dataOpts = conf.get(HoyaXmlConfKeys.HOYA_DATA_DIRECTORY_PERMISSIONS, HoyaXmlConfKeys.DEFAULT_HOYA_DATA_DIRECTORY_PERMISSIONS);
HoyaFileSystem.log.debug("Setting data directory permissions to {}", dataOpts);
createWithPermissions(datapath, new FsPermission(dataOpts));
return clusterDirectory;
}Example 100
| Project: incubator-systemml-master File: MapReduceTool.java View source code |
public static void createDirIfNotExistOnHDFS(String dir, String permissions) throws IOException {
Path path = new Path(dir);
try {
FileSystem fs = FileSystem.get(_rJob);
if (!fs.exists(path)) {
char[] c = permissions.toCharArray();
short sU = (short) ((c[0] - 48) * 64);
short sG = (short) ((c[1] - 48) * 8);
short sO = (short) ((c[2] - 48));
short mode = (short) (sU + sG + sO);
FsPermission perm = new FsPermission(mode);
fs.mkdirs(path, perm);
}
} catch (Exception ex) {
throw new IOException("Failed in creating a non existing dir on HDFS", ex);
}
//NOTE: we depend on the configured umask, setting umask in job or fspermission has no effect
//similarly setting MRConfigurationNames.DFS_DATANODE_DATA_DIR_PERM as no effect either.
}Example 101
| Project: jstorm-master File: JstormOnYarn.java View source code |
private void addToLocalResources(FileSystem fs, String fileSrcPath, String fileDstPath, String appId, Map<String, LocalResource> localResources, String resources) throws IOException {
String suffix = jstormClientContext.appName + JOYConstants.BACKLASH + appId + JOYConstants.BACKLASH + fileDstPath;
Path dst = new Path(fs.getHomeDirectory(), suffix);
if (fileSrcPath == null) {
FSDataOutputStream ostream = null;
try {
ostream = FileSystem.create(fs, dst, new FsPermission(JOYConstants.FS_PERMISSION));
ostream.writeUTF(resources);
} finally {
IOUtils.closeQuietly(ostream);
}
} else {
fs.copyFromLocalFile(new Path(fileSrcPath), dst);
}
FileStatus scFileStatus = fs.getFileStatus(dst);
LocalResource scRsrc = LocalResource.newInstance(ConverterUtils.getYarnUrlFromURI(dst.toUri()), LocalResourceType.FILE, LocalResourceVisibility.APPLICATION, scFileStatus.getLen(), scFileStatus.getModificationTime());
localResources.put(fileDstPath, scRsrc);
}