/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder.ModifyableTableDescriptor;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
/**
* HTableDescriptor contains the details about an HBase table such as the descriptors of
* all the column families, is the table a catalog table, <code> hbase:meta </code>,
* if the table is read only, the maximum size of the memstore,
* when the region split should occur, coprocessors associated with it etc...
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0.
* use {@link TableDescriptorBuilder} to build {@link HTableDescriptor}.
*/
@Deprecated
@InterfaceAudience.Public
public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescriptor> {
public static final String SPLIT_POLICY = TableDescriptorBuilder.SPLIT_POLICY;
public static final String MAX_FILESIZE = TableDescriptorBuilder.MAX_FILESIZE;
public static final String OWNER = TableDescriptorBuilder.OWNER;
public static final Bytes OWNER_KEY = TableDescriptorBuilder.OWNER_KEY;
public static final String READONLY = TableDescriptorBuilder.READONLY;
public static final String COMPACTION_ENABLED = TableDescriptorBuilder.COMPACTION_ENABLED;
public static final String MEMSTORE_FLUSHSIZE = TableDescriptorBuilder.MEMSTORE_FLUSHSIZE;
public static final String FLUSH_POLICY = TableDescriptorBuilder.FLUSH_POLICY;
public static final String IS_ROOT = "IS_ROOT";
public static final String IS_META = TableDescriptorBuilder.IS_META;
public static final String DURABILITY = TableDescriptorBuilder.DURABILITY;
public static final String REGION_REPLICATION = TableDescriptorBuilder.REGION_REPLICATION;
public static final String REGION_MEMSTORE_REPLICATION = TableDescriptorBuilder.REGION_MEMSTORE_REPLICATION;
public static final String NORMALIZATION_ENABLED = TableDescriptorBuilder.NORMALIZATION_ENABLED;
public static final String PRIORITY = TableDescriptorBuilder.PRIORITY;
public static final boolean DEFAULT_READONLY = TableDescriptorBuilder.DEFAULT_READONLY;
public static final boolean DEFAULT_COMPACTION_ENABLED = TableDescriptorBuilder.DEFAULT_COMPACTION_ENABLED;
public static final boolean DEFAULT_NORMALIZATION_ENABLED = TableDescriptorBuilder.DEFAULT_NORMALIZATION_ENABLED;
public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE;
public static final int DEFAULT_REGION_REPLICATION = TableDescriptorBuilder.DEFAULT_REGION_REPLICATION;
public static final boolean DEFAULT_REGION_MEMSTORE_REPLICATION = TableDescriptorBuilder.DEFAULT_REGION_MEMSTORE_REPLICATION;
private final ModifyableTableDescriptor delegatee;
/**
* Construct a table descriptor specifying a TableName object
* @param name Table name.
* @see <a href="https://issues.apache.org/jira/browse/HBASE-174">HADOOP-1581 HBASE: (HBASE-174) Un-openable tablename bug</a>
*/
public HTableDescriptor(final TableName name) {
this(name, Collections.EMPTY_LIST, Collections.EMPTY_MAP, Collections.EMPTY_MAP);
}
/**
* Construct a table descriptor by cloning the descriptor passed as a parameter.
* <p>
* Makes a deep copy of the supplied descriptor.
* Can make a modifiable descriptor from an ImmutableHTableDescriptor.
* @param desc The descriptor.
*/
public HTableDescriptor(final HTableDescriptor desc) {
this(desc.getTableName(), desc.getFamilies(), desc.getValues(), desc.getConfiguration());
}
/**
* Construct a table descriptor by cloning the descriptor passed as a parameter
* but using a different table name.
* <p>
* Makes a deep copy of the supplied descriptor.
* Can make a modifiable descriptor from an ImmutableHTableDescriptor.
* @param name Table name.
* @param desc The descriptor.
*/
public HTableDescriptor(final TableName name, final HTableDescriptor desc) {
this(name, desc.getFamilies(), desc.getValues(), desc.getConfiguration());
}
public HTableDescriptor(final TableDescriptor desc) {
this(desc.getTableName(), desc.getFamilies(), desc.getValues(), desc.getConfiguration());
}
private HTableDescriptor(final TableName name, final Collection<HColumnDescriptor> families,
Map<Bytes, Bytes> values, Map<String, String> configuration) {
this(new ModifyableTableDescriptor(name, families, values, configuration));
}
protected HTableDescriptor(ModifyableTableDescriptor delegatee) {
this.delegatee = delegatee;
}
/**
* This is vestigial API. It will be removed in 3.0.
*
* @return always return the false
*/
public boolean isRootRegion() {
return false;
}
/**
* Checks if this table is <code> hbase:meta </code>
* region.
*
* @return true if this table is <code> hbase:meta </code>
* region
*/
@Override
public boolean isMetaRegion() {
return delegatee.isMetaRegion();
}
/**
* Checks if the table is a <code>hbase:meta</code> table
*
* @return true if table is <code> hbase:meta </code> region.
*/
@Override
public boolean isMetaTable() {
return delegatee.isMetaTable();
}
/**
* Getter for accessing the metadata associated with the key
*
* @param key The key.
* @return The value.
*/
@Override
public byte[] getValue(byte[] key) {
return delegatee.getValue(key);
}
/**
* Getter for accessing the metadata associated with the key
*
* @param key The key.
* @return The value.
*/
public String getValue(String key) {
return delegatee.getValue(key);
}
/**
* @return Getter for fetching an unmodifiable map.
*/
@Override
public Map<Bytes, Bytes> getValues() {
return delegatee.getValues();
}
/**
* Setter for storing metadata as a (key, value) pair in map
*
* @param key The key.
* @param value The value. If null, removes the setting.
*/
public HTableDescriptor setValue(byte[] key, byte[] value) {
delegatee.setValue(key, value);
return this;
}
/*
* Setter for storing metadata as a (key, value) pair in map
*
* @param key The key.
* @param value The value. If null, removes the setting.
*/
public HTableDescriptor setValue(final Bytes key, final Bytes value) {
delegatee.setValue(key, value);
return this;
}
/**
* Setter for storing metadata as a (key, value) pair in map
*
* @param key The key.
* @param value The value. If null, removes the setting.
*/
public HTableDescriptor setValue(String key, String value) {
delegatee.setValue(key, value);
return this;
}
/**
* Remove metadata represented by the key from the map
*
* @param key Key whose key and value we're to remove from HTableDescriptor
* parameters.
*/
public void remove(final String key) {
delegatee.remove(key);
}
/**
* Remove metadata represented by the key from the map
*
* @param key Key whose key and value we're to remove from HTableDescriptor
* parameters.
*/
public void remove(Bytes key) {
delegatee.remove(key);
}
/**
* Remove metadata represented by the key from the map
*
* @param key Key whose key and value we're to remove from HTableDescriptor
* parameters.
*/
public void remove(final byte [] key) {
delegatee.remove(key);
}
/**
* Check if the readOnly flag of the table is set. If the readOnly flag is
* set then the contents of the table can only be read from but not modified.
*
* @return true if all columns in the table should be read only
*/
@Override
public boolean isReadOnly() {
return delegatee.isReadOnly();
}
/**
* Setting the table as read only sets all the columns in the table as read
* only. By default all tables are modifiable, but if the readOnly flag is
* set to true then the contents of the table can only be read but not modified.
*
* @param readOnly True if all of the columns in the table should be read
* only.
*/
public HTableDescriptor setReadOnly(final boolean readOnly) {
delegatee.setReadOnly(readOnly);
return this;
}
/**
* Check if the compaction enable flag of the table is true. If flag is
* false then no minor/major compactions will be done in real.
*
* @return true if table compaction enabled
*/
@Override
public boolean isCompactionEnabled() {
return delegatee.isCompactionEnabled();
}
/**
* Setting the table compaction enable flag.
*
* @param isEnable True if enable compaction.
*/
public HTableDescriptor setCompactionEnabled(final boolean isEnable) {
delegatee.setCompactionEnabled(isEnable);
return this;
}
/**
* Check if normalization enable flag of the table is true. If flag is
* false then no region normalizer won't attempt to normalize this table.
*
* @return true if region normalization is enabled for this table
*/
@Override
public boolean isNormalizationEnabled() {
return delegatee.isNormalizationEnabled();
}
/**
* Setting the table normalization enable flag.
*
* @param isEnable True if enable normalization.
*/
public HTableDescriptor setNormalizationEnabled(final boolean isEnable) {
delegatee.setNormalizationEnabled(isEnable);
return this;
}
/**
* Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT.
* @param durability enum value
*/
public HTableDescriptor setDurability(Durability durability) {
delegatee.setDurability(durability);
return this;
}
/**
* Returns the durability setting for the table.
* @return durability setting for the table.
*/
@Override
public Durability getDurability() {
return delegatee.getDurability();
}
/**
* Get the name of the table
*
* @return TableName
*/
@Override
public TableName getTableName() {
return delegatee.getTableName();
}
/**
* Get the name of the table as a String
*
* @return name of table as a String
*/
public String getNameAsString() {
return delegatee.getTableName().getNameAsString();
}
/**
* This sets the class associated with the region split policy which
* determines when a region split should occur. The class used by
* default is defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy
* @param clazz the class name
*/
public HTableDescriptor setRegionSplitPolicyClassName(String clazz) {
delegatee.setRegionSplitPolicyClassName(clazz);
return this;
}
/**
* This gets the class associated with the region split policy which
* determines when a region split should occur. The class used by
* default is defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy
*
* @return the class name of the region split policy for this table.
* If this returns null, the default split policy is used.
*/
@Override
public String getRegionSplitPolicyClassName() {
return delegatee.getRegionSplitPolicyClassName();
}
/**
* Returns the maximum size upto which a region can grow to after which a region
* split is triggered. The region size is represented by the size of the biggest
* store file in that region.
*
* @return max hregion size for table, -1 if not set.
*
* @see #setMaxFileSize(long)
*/
@Override
public long getMaxFileSize() {
return delegatee.getMaxFileSize();
}
/**
* Sets the maximum size upto which a region can grow to after which a region
* split is triggered. The region size is represented by the size of the biggest
* store file in that region, i.e. If the biggest store file grows beyond the
* maxFileSize, then the region split is triggered. This defaults to a value of
* 256 MB.
* <p>
* This is not an absolute value and might vary. Assume that a single row exceeds
* the maxFileSize then the storeFileSize will be greater than maxFileSize since
* a single row cannot be split across multiple regions
* </p>
*
* @param maxFileSize The maximum file size that a store file can grow to
* before a split is triggered.
*/
public HTableDescriptor setMaxFileSize(long maxFileSize) {
delegatee.setMaxFileSize(maxFileSize);
return this;
}
/**
* Returns the size of the memstore after which a flush to filesystem is triggered.
*
* @return memory cache flush size for each hregion, -1 if not set.
*
* @see #setMemStoreFlushSize(long)
*/
@Override
public long getMemStoreFlushSize() {
return delegatee.getMemStoreFlushSize();
}
/**
* Represents the maximum size of the memstore after which the contents of the
* memstore are flushed to the filesystem. This defaults to a size of 64 MB.
*
* @param memstoreFlushSize memory cache flush size for each hregion
*/
public HTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) {
delegatee.setMemStoreFlushSize(memstoreFlushSize);
return this;
}
/**
* This sets the class associated with the flush policy which determines determines the stores
* need to be flushed when flushing a region. The class used by default is defined in
* org.apache.hadoop.hbase.regionserver.FlushPolicy.
* @param clazz the class name
*/
public HTableDescriptor setFlushPolicyClassName(String clazz) {
delegatee.setFlushPolicyClassName(clazz);
return this;
}
/**
* This gets the class associated with the flush policy which determines the stores need to be
* flushed when flushing a region. The class used by default is defined in
* org.apache.hadoop.hbase.regionserver.FlushPolicy.
* @return the class name of the flush policy for this table. If this returns null, the default
* flush policy is used.
*/
@Override
public String getFlushPolicyClassName() {
return delegatee.getFlushPolicyClassName();
}
/**
* Adds a column family.
* For the updating purpose please use {@link #modifyFamily(HColumnDescriptor)} instead.
* @param family HColumnDescriptor of family to add.
*/
public HTableDescriptor addFamily(final HColumnDescriptor family) {
delegatee.addFamily(family);
return this;
}
/**
* Modifies the existing column family.
* @param family HColumnDescriptor of family to update
* @return this (for chained invocation)
*/
public HTableDescriptor modifyFamily(final HColumnDescriptor family) {
delegatee.modifyFamily(family);
return this;
}
/**
* Checks to see if this table contains the given column family
* @param familyName Family name or column name.
* @return true if the table contains the specified family name
*/
@Override
public boolean hasFamily(final byte [] familyName) {
return delegatee.hasFamily(familyName);
}
/**
* @return Name of this table and then a map of all of the column family
* descriptors.
* @see #getNameAsString()
*/
@Override
public String toString() {
return delegatee.toString();
}
/**
* @return Name of this table and then a map of all of the column family
* descriptors (with only the non-default column family attributes)
*/
public String toStringCustomizedValues() {
return delegatee.toStringCustomizedValues();
}
/**
* @return map of all table attributes formatted into string.
*/
public String toStringTableAttributes() {
return delegatee.toStringTableAttributes();
}
/**
* Compare the contents of the descriptor with another one passed as a parameter.
* Checks if the obj passed is an instance of HTableDescriptor, if yes then the
* contents of the descriptors are compared.
*
* @return true if the contents of the the two descriptors exactly match
*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof HTableDescriptor)) {
return false;
}
return compareTo((HTableDescriptor)obj) == 0;
}
/**
* @see java.lang.Object#hashCode()
*/
@Override
public int hashCode() {
return delegatee.hashCode();
}
// Comparable
/**
* Compares the descriptor with another descriptor which is passed as a parameter.
* This compares the content of the two descriptors and not the reference.
*
* @return 0 if the contents of the descriptors are exactly matching,
* 1 if there is a mismatch in the contents
*/
@Override
public int compareTo(final HTableDescriptor other) {
return delegatee.compareTo(other.delegatee);
}
/**
* Returns an unmodifiable collection of all the {@link HColumnDescriptor}
* of all the column families of the table.
*
* @return Immutable collection of {@link HColumnDescriptor} of all the
* column families.
*/
@Override
public Collection<HColumnDescriptor> getFamilies() {
return delegatee.getFamilies();
}
/**
* Return true if there are at least one cf whose replication scope is serial.
*/
@Override
public boolean hasSerialReplicationScope() {
return delegatee.hasSerialReplicationScope();
}
/**
* Returns the configured replicas per region
*/
@Override
public int getRegionReplication() {
return delegatee.getRegionReplication();
}
/**
* Sets the number of replicas per region.
* @param regionReplication the replication factor per region
*/
public HTableDescriptor setRegionReplication(int regionReplication) {
delegatee.setRegionReplication(regionReplication);
return this;
}
/**
* @return true if the read-replicas memstore replication is enabled.
*/
@Override
public boolean hasRegionMemstoreReplication() {
return delegatee.hasRegionMemstoreReplication();
}
/**
* Enable or Disable the memstore replication from the primary region to the replicas.
* The replication will be used only for meta operations (e.g. flush, compaction, ...)
*
* @param memstoreReplication true if the new data written to the primary region
* should be replicated.
* false if the secondaries can tollerate to have new
* data only when the primary flushes the memstore.
*/
public HTableDescriptor setRegionMemstoreReplication(boolean memstoreReplication) {
delegatee.setRegionMemstoreReplication(memstoreReplication);
return this;
}
public HTableDescriptor setPriority(int priority) {
delegatee.setPriority(priority);
return this;
}
@Override
public int getPriority() {
return delegatee.getPriority();
}
/**
* Returns all the column family names of the current table. The map of
* HTableDescriptor contains mapping of family name to HColumnDescriptors.
* This returns all the keys of the family map which represents the column
* family names of the table.
*
* @return Immutable sorted set of the keys of the families.
*/
@Override
public Set<byte[]> getFamiliesKeys() {
return delegatee.getFamiliesKeys();
}
/**
* Returns the count of the column families of the table.
*
* @return Count of column families of the table
*/
@Override
public int getColumnFamilyCount() {
return delegatee.getColumnFamilyCount();
}
/**
* Returns an array all the {@link HColumnDescriptor} of the column families
* of the table.
*
* @return Array of all the HColumnDescriptors of the current table
*
* @see #getFamilies()
*/
@Override
public HColumnDescriptor[] getColumnFamilies() {
return delegatee.getColumnFamilies();
}
/**
* Returns the HColumnDescriptor for a specific column family with name as
* specified by the parameter column.
*
* @param column Column family name
* @return Column descriptor for the passed family name or the family on
* passed in column.
*/
@Override
public HColumnDescriptor getFamily(final byte [] column) {
return delegatee.getFamily(column);
}
/**
* Removes the HColumnDescriptor with name specified by the parameter column
* from the table descriptor
*
* @param column Name of the column family to be removed.
* @return Column descriptor for the passed family name or the family on
* passed in column.
*/
public HColumnDescriptor removeFamily(final byte [] column) {
return delegatee.removeFamily(column);
}
/**
* Add a table coprocessor to this table. The coprocessor
* type must be org.apache.hadoop.hbase.coprocessor.RegionObserver
* or Endpoint.
* It won't check if the class can be loaded or not.
* Whether a coprocessor is loadable or not will be determined when
* a region is opened.
* @param className Full class name.
* @throws IOException
*/
public HTableDescriptor addCoprocessor(String className) throws IOException {
delegatee.addCoprocessor(className);
return this;
}
/**
* Add a table coprocessor to this table. The coprocessor
* type must be org.apache.hadoop.hbase.coprocessor.RegionObserver
* or Endpoint.
* It won't check if the class can be loaded or not.
* Whether a coprocessor is loadable or not will be determined when
* a region is opened.
* @param jarFilePath Path of the jar file. If it's null, the class will be
* loaded from default classloader.
* @param className Full class name.
* @param priority Priority
* @param kvs Arbitrary key-value parameter pairs passed into the coprocessor.
* @throws IOException
*/
public HTableDescriptor addCoprocessor(String className, Path jarFilePath,
int priority, final Map<String, String> kvs)
throws IOException {
delegatee.addCoprocessor(className, jarFilePath, priority, kvs);
return this;
}
/**
* Add a table coprocessor to this table. The coprocessor
* type must be org.apache.hadoop.hbase.coprocessor.RegionObserver
* or Endpoint.
* It won't check if the class can be loaded or not.
* Whether a coprocessor is loadable or not will be determined when
* a region is opened.
* @param specStr The Coprocessor specification all in in one String formatted so matches
* {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
* @throws IOException
*/
public HTableDescriptor addCoprocessorWithSpec(final String specStr) throws IOException {
delegatee.addCoprocessorWithSpec(specStr);
return this;
}
/**
* Check if the table has an attached co-processor represented by the name className
*
* @param classNameToMatch - Class name of the co-processor
* @return true of the table has a co-processor className
*/
@Override
public boolean hasCoprocessor(String classNameToMatch) {
return delegatee.hasCoprocessor(classNameToMatch);
}
/**
* Return the list of attached co-processor represented by their name className
*
* @return The list of co-processors classNames
*/
@Override
public List<String> getCoprocessors() {
return delegatee.getCoprocessors();
}
/**
* Remove a coprocessor from those set on the table
* @param className Class name of the co-processor
*/
public void removeCoprocessor(String className) {
delegatee.removeCoprocessor(className);
}
public final static String NAMESPACE_FAMILY_INFO = TableDescriptorBuilder.NAMESPACE_FAMILY_INFO;
public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES;
public final static byte[] NAMESPACE_COL_DESC_BYTES = TableDescriptorBuilder.NAMESPACE_COL_DESC_BYTES;
/** Table descriptor for namespace table */
public static final HTableDescriptor NAMESPACE_TABLEDESC
= new HTableDescriptor(TableDescriptorBuilder.NAMESPACE_TABLEDESC);
@Deprecated
public HTableDescriptor setOwner(User owner) {
delegatee.setOwner(owner);
return this;
}
// used by admin.rb:alter(table_name,*args) to update owner.
@Deprecated
public HTableDescriptor setOwnerString(String ownerString) {
delegatee.setOwnerString(ownerString);
return this;
}
@Override
@Deprecated
public String getOwnerString() {
return delegatee.getOwnerString();
}
/**
* @return This instance serialized with pb with pb magic prefix
* @see #parseFrom(byte[])
*/
public byte[] toByteArray() {
return delegatee.toByteArray();
}
/**
* @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
* @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
* @throws DeserializationException
* @throws IOException
* @see #toByteArray()
*/
public static HTableDescriptor parseFrom(final byte [] bytes)
throws DeserializationException, IOException {
return new HTableDescriptor(ModifyableTableDescriptor.parseFrom(bytes));
}
/**
* Getter for accessing the configuration value by key
*/
@Override
public String getConfigurationValue(String key) {
return delegatee.getConfigurationValue(key);
}
/**
* Getter for fetching an unmodifiable map.
*/
@Override
public Map<String, String> getConfiguration() {
return delegatee.getConfiguration();
}
/**
* Setter for storing a configuration setting in map.
* @param key Config key. Same as XML config key e.g. hbase.something.or.other.
* @param value String value. If null, removes the setting.
*/
public HTableDescriptor setConfiguration(String key, String value) {
delegatee.setConfiguration(key, value);
return this;
}
/**
* Remove a config setting represented by the key from the map
*/
public void removeConfiguration(final String key) {
delegatee.removeConfiguration(key);
}
}