/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with this * work for additional information regarding copyright ownership. The ASF * licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package org.apache.hadoop.zebra.io; import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RawLocalFileSystem; import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.zebra.io.BasicTable; import org.apache.hadoop.zebra.io.TableInserter; import org.apache.hadoop.zebra.schema.Schema; import org.apache.hadoop.zebra.types.TypesUtils; import org.apache.pig.data.DataByteArray; import org.apache.pig.data.Tuple; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; /** * * Test projections on complicated column types. * */ public class TestColumnGroupName2 { final static String STR_SCHEMA = "s1:bool, s2:int, s3:long, s4:double, s5:string, s6:bytes"; final static String STR_STORAGE = "[s1, s2] as PI secure by uid:users perm:777; [s3, s4] as General; [s5, s6] as ULT"; private static Configuration conf; private static Path path; private static FileSystem fs; @BeforeClass public static void setUpOnce() throws IOException { conf = new Configuration(); conf.setInt("table.output.tfile.minBlock.size", 64 * 1024); conf.setInt("table.input.split.minSize", 64 * 1024); conf.set("table.output.tfile.compression", "none"); RawLocalFileSystem rawLFS = new RawLocalFileSystem(); fs = new LocalFileSystem(rawLFS); path = new Path(fs.getWorkingDirectory(), "TestColumnGroupName"); fs = path.getFileSystem(conf); // drop any previous tables BasicTable.drop(path, conf); BasicTable.Writer writer = new BasicTable.Writer(path, STR_SCHEMA, STR_STORAGE, conf); writer.finish(); Schema schema = writer.getSchema(); Tuple tuple = TypesUtils.createTuple(schema); BasicTable.Writer writer1 = new BasicTable.Writer(path, conf); int part = 0; TableInserter inserter = writer1.getInserter("part" + part, true); TypesUtils.resetTuple(tuple); // insert data in row 1 int row = 0; tuple.set(0, true); // bool tuple.set(1, 1); // int tuple.set(2, 1001L); // long tuple.set(3, 1.1); // float tuple.set(4, "hello world 1"); // string tuple.set(5, new DataByteArray("hello byte 1")); // byte inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1) .getBytes()), tuple); // insert data in row 2 row++; tuple.set(0, false); tuple.set(1, 2); // int tuple.set(2, 1002L); // long tuple.set(3, 3.1); // float tuple.set(4, "hello world 2"); // string tuple.set(5, new DataByteArray("hello byte 2")); // byte inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1) .getBytes()), tuple); // finish building table, closing out the inserter, writer, writer1 inserter.close(); writer1.finish(); writer.close(); } @AfterClass public static void tearDownOnce() throws IOException { // BasicTable.drop(path, conf); } // Test simple projection @Test public void test1() throws IOException, Exception { BasicTable.dumpInfo(path.toString(), System.out, conf); } }