/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hive.shims; import java.io.IOException; import java.net.MalformedURLException; import java.net.URL; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Trash; import org.apache.hadoop.hive.shims.HadoopShimsSecure; import org.apache.hadoop.mapred.ClusterStatus; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.TaskLogServlet; import org.apache.hadoop.mapreduce.TaskAttemptID; import org.apache.hadoop.util.Progressable; /** * Implemention of shims against Hadoop 0.20 with Security. */ public class Hadoop20SShims extends HadoopShimsSecure { @Override public String getTaskAttemptLogUrl(JobConf conf, String taskTrackerHttpAddress, String taskAttemptId) throws MalformedURLException { URL taskTrackerHttpURL = new URL(taskTrackerHttpAddress); return TaskLogServlet.getTaskLogUrl( taskTrackerHttpURL.getHost(), Integer.toString(taskTrackerHttpURL.getPort()), taskAttemptId); } @Override public JobTrackerState getJobTrackerState(ClusterStatus clusterStatus) throws Exception { JobTrackerState state; switch (clusterStatus.getJobTrackerState()) { case INITIALIZING: return JobTrackerState.INITIALIZING; case RUNNING: return JobTrackerState.RUNNING; default: String errorMsg = "Unrecognized JobTracker state: " + clusterStatus.getJobTrackerState(); throw new Exception(errorMsg); } } @Override public org.apache.hadoop.mapreduce.TaskAttemptContext newTaskAttemptContext(Configuration conf, final Progressable progressable) { return new org.apache.hadoop.mapreduce.TaskAttemptContext(conf, new TaskAttemptID()) { @Override public void progress() { progressable.progress(); } }; } @Override public org.apache.hadoop.mapreduce.JobContext newJobContext(Job job) { return new org.apache.hadoop.mapreduce.JobContext(job.getConfiguration(), job.getJobID()); } @Override public boolean isLocalMode(Configuration conf) { return "local".equals(getJobLauncherRpcAddress(conf)); } @Override public String getJobLauncherRpcAddress(Configuration conf) { return conf.get("mapred.job.tracker"); } @Override public void setJobLauncherRpcAddress(Configuration conf, String val) { conf.set("mapred.job.tracker", val); } @Override public String getJobLauncherHttpAddress(Configuration conf) { return conf.get("mapred.job.tracker.http.address"); } @Override public boolean moveToAppropriateTrash(FileSystem fs, Path path, Configuration conf) throws IOException { // older versions of Hadoop don't have a Trash constructor based on the // Path or FileSystem. So need to achieve this by creating a dummy conf. // this needs to be filtered out based on version Configuration dupConf = new Configuration(conf); FileSystem.setDefaultUri(dupConf, fs.getUri()); Trash trash = new Trash(dupConf); return trash.moveToTrash(path); } @Override public long getDefaultBlockSize(FileSystem fs, Path path) { return fs.getDefaultBlockSize(); } @Override public short getDefaultReplication(FileSystem fs, Path path) { return fs.getDefaultReplication(); } }