/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSOutputStream; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.net.Socket; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_SEND_BUFFER_SIZE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_SEND_BUFFER_SIZE_KEY; import static org.junit.Assert.assertTrue; public class TestDFSClientSocketSize { private static final Logger LOG = LoggerFactory.getLogger( TestDFSClientSocketSize.class); static { GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL); } /** * The setting of socket send buffer size in * {@link java.net.Socket#setSendBufferSize(int)} is only a hint. Actual * value may differ. We just sanity check that it is somewhere close. */ @Test public void testDefaultSendBufferSize() throws IOException { assertTrue("Send buffer size should be somewhere near default.", getSendBufferSize(new Configuration()) >= DFS_CLIENT_SOCKET_SEND_BUFFER_SIZE_DEFAULT / 2); } /** * Note that {@link java.net.Socket#setSendBufferSize(int)} is only a hint. * If this test is flaky it should be ignored. See HADOOP-13351. */ @Test public void testSpecifiedSendBufferSize() throws IOException { final Configuration conf1 = new Configuration(); conf1.setInt(DFS_CLIENT_SOCKET_SEND_BUFFER_SIZE_KEY, 256 * 1024); // 256 KB final int sendBufferSize1 = getSendBufferSize(conf1); final Configuration conf2 = new Configuration(); conf2.setInt(DFS_CLIENT_SOCKET_SEND_BUFFER_SIZE_KEY, 1024); // 1 KB final int sendBufferSize2 = getSendBufferSize(conf2); LOG.info("Large buf size is {}, small is {}", sendBufferSize1, sendBufferSize2); assertTrue("Larger specified send buffer should have effect", sendBufferSize1 > sendBufferSize2); } @Test public void testAutoTuningSendBufferSize() throws IOException { final Configuration conf = new Configuration(); conf.setInt(DFS_CLIENT_SOCKET_SEND_BUFFER_SIZE_KEY, 0); final int sendBufferSize = getSendBufferSize(conf); LOG.info("The auto tuned send buffer size is: {}", sendBufferSize); assertTrue("Send buffer size should be non-negative value which is " + "determined by system (kernel).", sendBufferSize > 0); } private int getSendBufferSize(Configuration conf) throws IOException { final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(1).build(); try { cluster.waitActive(); LOG.info("MiniDFSCluster started."); try ( Socket socket = DFSOutputStream.createSocketForPipeline( new DatanodeInfo( cluster.dataNodes.get(0).datanode.getDatanodeId()), 1, cluster.getFileSystem().getClient())) { return socket.getSendBufferSize(); } } finally { if (cluster != null) { cluster.shutdown(); } } } }