// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: StorageClusterStatusMessage.proto
package org.apache.hadoop.hbase.rest.protobuf.generated;
public final class StorageClusterStatusMessage {
private StorageClusterStatusMessage() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
}
public interface StorageClusterStatusOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node>
getLiveNodesList();
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node getLiveNodes(int index);
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
int getLiveNodesCount();
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder>
getLiveNodesOrBuilderList();
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder getLiveNodesOrBuilder(
int index);
// repeated string deadNodes = 2;
/**
* <code>repeated string deadNodes = 2;</code>
*/
java.util.List<java.lang.String>
getDeadNodesList();
/**
* <code>repeated string deadNodes = 2;</code>
*/
int getDeadNodesCount();
/**
* <code>repeated string deadNodes = 2;</code>
*/
java.lang.String getDeadNodes(int index);
/**
* <code>repeated string deadNodes = 2;</code>
*/
com.google.protobuf.ByteString
getDeadNodesBytes(int index);
// optional int32 regions = 3;
/**
* <code>optional int32 regions = 3;</code>
*
* <pre>
* summary statistics
* </pre>
*/
boolean hasRegions();
/**
* <code>optional int32 regions = 3;</code>
*
* <pre>
* summary statistics
* </pre>
*/
int getRegions();
// optional int64 requests = 4;
/**
* <code>optional int64 requests = 4;</code>
*/
boolean hasRequests();
/**
* <code>optional int64 requests = 4;</code>
*/
long getRequests();
// optional double averageLoad = 5;
/**
* <code>optional double averageLoad = 5;</code>
*/
boolean hasAverageLoad();
/**
* <code>optional double averageLoad = 5;</code>
*/
double getAverageLoad();
}
/**
* Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus}
*/
public static final class StorageClusterStatus extends
com.google.protobuf.GeneratedMessage
implements StorageClusterStatusOrBuilder {
// Use StorageClusterStatus.newBuilder() to construct.
private StorageClusterStatus(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private StorageClusterStatus(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final StorageClusterStatus defaultInstance;
public static StorageClusterStatus getDefaultInstance() {
return defaultInstance;
}
public StorageClusterStatus getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private StorageClusterStatus(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
liveNodes_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node>();
mutable_bitField0_ |= 0x00000001;
}
liveNodes_.add(input.readMessage(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.PARSER, extensionRegistry));
break;
}
case 18: {
if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
deadNodes_ = new com.google.protobuf.LazyStringArrayList();
mutable_bitField0_ |= 0x00000002;
}
deadNodes_.add(input.readBytes());
break;
}
case 24: {
bitField0_ |= 0x00000001;
regions_ = input.readInt32();
break;
}
case 32: {
bitField0_ |= 0x00000002;
requests_ = input.readInt64();
break;
}
case 41: {
bitField0_ |= 0x00000004;
averageLoad_ = input.readDouble();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
liveNodes_ = java.util.Collections.unmodifiableList(liveNodes_);
}
if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
deadNodes_ = new com.google.protobuf.UnmodifiableLazyStringList(deadNodes_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.class, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Builder.class);
}
public static com.google.protobuf.Parser<StorageClusterStatus> PARSER =
new com.google.protobuf.AbstractParser<StorageClusterStatus>() {
public StorageClusterStatus parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new StorageClusterStatus(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<StorageClusterStatus> getParserForType() {
return PARSER;
}
public interface RegionOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required bytes name = 1;
/**
* <code>required bytes name = 1;</code>
*/
boolean hasName();
/**
* <code>required bytes name = 1;</code>
*/
com.google.protobuf.ByteString getName();
// optional int32 stores = 2;
/**
* <code>optional int32 stores = 2;</code>
*/
boolean hasStores();
/**
* <code>optional int32 stores = 2;</code>
*/
int getStores();
// optional int32 storefiles = 3;
/**
* <code>optional int32 storefiles = 3;</code>
*/
boolean hasStorefiles();
/**
* <code>optional int32 storefiles = 3;</code>
*/
int getStorefiles();
// optional int32 storefileSizeMB = 4;
/**
* <code>optional int32 storefileSizeMB = 4;</code>
*/
boolean hasStorefileSizeMB();
/**
* <code>optional int32 storefileSizeMB = 4;</code>
*/
int getStorefileSizeMB();
// optional int32 memstoreSizeMB = 5;
/**
* <code>optional int32 memstoreSizeMB = 5;</code>
*/
boolean hasMemstoreSizeMB();
/**
* <code>optional int32 memstoreSizeMB = 5;</code>
*/
int getMemstoreSizeMB();
// optional int32 storefileIndexSizeMB = 6;
/**
* <code>optional int32 storefileIndexSizeMB = 6;</code>
*/
boolean hasStorefileIndexSizeMB();
/**
* <code>optional int32 storefileIndexSizeMB = 6;</code>
*/
int getStorefileIndexSizeMB();
// optional int64 readRequestsCount = 7;
/**
* <code>optional int64 readRequestsCount = 7;</code>
*/
boolean hasReadRequestsCount();
/**
* <code>optional int64 readRequestsCount = 7;</code>
*/
long getReadRequestsCount();
// optional int64 writeRequestsCount = 8;
/**
* <code>optional int64 writeRequestsCount = 8;</code>
*/
boolean hasWriteRequestsCount();
/**
* <code>optional int64 writeRequestsCount = 8;</code>
*/
long getWriteRequestsCount();
// optional int32 rootIndexSizeKB = 9;
/**
* <code>optional int32 rootIndexSizeKB = 9;</code>
*/
boolean hasRootIndexSizeKB();
/**
* <code>optional int32 rootIndexSizeKB = 9;</code>
*/
int getRootIndexSizeKB();
// optional int32 totalStaticIndexSizeKB = 10;
/**
* <code>optional int32 totalStaticIndexSizeKB = 10;</code>
*/
boolean hasTotalStaticIndexSizeKB();
/**
* <code>optional int32 totalStaticIndexSizeKB = 10;</code>
*/
int getTotalStaticIndexSizeKB();
// optional int32 totalStaticBloomSizeKB = 11;
/**
* <code>optional int32 totalStaticBloomSizeKB = 11;</code>
*/
boolean hasTotalStaticBloomSizeKB();
/**
* <code>optional int32 totalStaticBloomSizeKB = 11;</code>
*/
int getTotalStaticBloomSizeKB();
// optional int64 totalCompactingKVs = 12;
/**
* <code>optional int64 totalCompactingKVs = 12;</code>
*/
boolean hasTotalCompactingKVs();
/**
* <code>optional int64 totalCompactingKVs = 12;</code>
*/
long getTotalCompactingKVs();
// optional int64 currentCompactedKVs = 13;
/**
* <code>optional int64 currentCompactedKVs = 13;</code>
*/
boolean hasCurrentCompactedKVs();
/**
* <code>optional int64 currentCompactedKVs = 13;</code>
*/
long getCurrentCompactedKVs();
}
/**
* Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region}
*/
public static final class Region extends
com.google.protobuf.GeneratedMessage
implements RegionOrBuilder {
// Use Region.newBuilder() to construct.
private Region(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private Region(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final Region defaultInstance;
public static Region getDefaultInstance() {
return defaultInstance;
}
public Region getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private Region(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
name_ = input.readBytes();
break;
}
case 16: {
bitField0_ |= 0x00000002;
stores_ = input.readInt32();
break;
}
case 24: {
bitField0_ |= 0x00000004;
storefiles_ = input.readInt32();
break;
}
case 32: {
bitField0_ |= 0x00000008;
storefileSizeMB_ = input.readInt32();
break;
}
case 40: {
bitField0_ |= 0x00000010;
memstoreSizeMB_ = input.readInt32();
break;
}
case 48: {
bitField0_ |= 0x00000020;
storefileIndexSizeMB_ = input.readInt32();
break;
}
case 56: {
bitField0_ |= 0x00000040;
readRequestsCount_ = input.readInt64();
break;
}
case 64: {
bitField0_ |= 0x00000080;
writeRequestsCount_ = input.readInt64();
break;
}
case 72: {
bitField0_ |= 0x00000100;
rootIndexSizeKB_ = input.readInt32();
break;
}
case 80: {
bitField0_ |= 0x00000200;
totalStaticIndexSizeKB_ = input.readInt32();
break;
}
case 88: {
bitField0_ |= 0x00000400;
totalStaticBloomSizeKB_ = input.readInt32();
break;
}
case 96: {
bitField0_ |= 0x00000800;
totalCompactingKVs_ = input.readInt64();
break;
}
case 104: {
bitField0_ |= 0x00001000;
currentCompactedKVs_ = input.readInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.class, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder.class);
}
public static com.google.protobuf.Parser<Region> PARSER =
new com.google.protobuf.AbstractParser<Region>() {
public Region parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new Region(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<Region> getParserForType() {
return PARSER;
}
private int bitField0_;
// required bytes name = 1;
public static final int NAME_FIELD_NUMBER = 1;
private com.google.protobuf.ByteString name_;
/**
* <code>required bytes name = 1;</code>
*/
public boolean hasName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required bytes name = 1;</code>
*/
public com.google.protobuf.ByteString getName() {
return name_;
}
// optional int32 stores = 2;
public static final int STORES_FIELD_NUMBER = 2;
private int stores_;
/**
* <code>optional int32 stores = 2;</code>
*/
public boolean hasStores() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>optional int32 stores = 2;</code>
*/
public int getStores() {
return stores_;
}
// optional int32 storefiles = 3;
public static final int STOREFILES_FIELD_NUMBER = 3;
private int storefiles_;
/**
* <code>optional int32 storefiles = 3;</code>
*/
public boolean hasStorefiles() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* <code>optional int32 storefiles = 3;</code>
*/
public int getStorefiles() {
return storefiles_;
}
// optional int32 storefileSizeMB = 4;
public static final int STOREFILESIZEMB_FIELD_NUMBER = 4;
private int storefileSizeMB_;
/**
* <code>optional int32 storefileSizeMB = 4;</code>
*/
public boolean hasStorefileSizeMB() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* <code>optional int32 storefileSizeMB = 4;</code>
*/
public int getStorefileSizeMB() {
return storefileSizeMB_;
}
// optional int32 memstoreSizeMB = 5;
public static final int MEMSTORESIZEMB_FIELD_NUMBER = 5;
private int memstoreSizeMB_;
/**
* <code>optional int32 memstoreSizeMB = 5;</code>
*/
public boolean hasMemstoreSizeMB() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* <code>optional int32 memstoreSizeMB = 5;</code>
*/
public int getMemstoreSizeMB() {
return memstoreSizeMB_;
}
// optional int32 storefileIndexSizeMB = 6;
public static final int STOREFILEINDEXSIZEMB_FIELD_NUMBER = 6;
private int storefileIndexSizeMB_;
/**
* <code>optional int32 storefileIndexSizeMB = 6;</code>
*/
public boolean hasStorefileIndexSizeMB() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* <code>optional int32 storefileIndexSizeMB = 6;</code>
*/
public int getStorefileIndexSizeMB() {
return storefileIndexSizeMB_;
}
// optional int64 readRequestsCount = 7;
public static final int READREQUESTSCOUNT_FIELD_NUMBER = 7;
private long readRequestsCount_;
/**
* <code>optional int64 readRequestsCount = 7;</code>
*/
public boolean hasReadRequestsCount() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* <code>optional int64 readRequestsCount = 7;</code>
*/
public long getReadRequestsCount() {
return readRequestsCount_;
}
// optional int64 writeRequestsCount = 8;
public static final int WRITEREQUESTSCOUNT_FIELD_NUMBER = 8;
private long writeRequestsCount_;
/**
* <code>optional int64 writeRequestsCount = 8;</code>
*/
public boolean hasWriteRequestsCount() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* <code>optional int64 writeRequestsCount = 8;</code>
*/
public long getWriteRequestsCount() {
return writeRequestsCount_;
}
// optional int32 rootIndexSizeKB = 9;
public static final int ROOTINDEXSIZEKB_FIELD_NUMBER = 9;
private int rootIndexSizeKB_;
/**
* <code>optional int32 rootIndexSizeKB = 9;</code>
*/
public boolean hasRootIndexSizeKB() {
return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
* <code>optional int32 rootIndexSizeKB = 9;</code>
*/
public int getRootIndexSizeKB() {
return rootIndexSizeKB_;
}
// optional int32 totalStaticIndexSizeKB = 10;
public static final int TOTALSTATICINDEXSIZEKB_FIELD_NUMBER = 10;
private int totalStaticIndexSizeKB_;
/**
* <code>optional int32 totalStaticIndexSizeKB = 10;</code>
*/
public boolean hasTotalStaticIndexSizeKB() {
return ((bitField0_ & 0x00000200) == 0x00000200);
}
/**
* <code>optional int32 totalStaticIndexSizeKB = 10;</code>
*/
public int getTotalStaticIndexSizeKB() {
return totalStaticIndexSizeKB_;
}
// optional int32 totalStaticBloomSizeKB = 11;
public static final int TOTALSTATICBLOOMSIZEKB_FIELD_NUMBER = 11;
private int totalStaticBloomSizeKB_;
/**
* <code>optional int32 totalStaticBloomSizeKB = 11;</code>
*/
public boolean hasTotalStaticBloomSizeKB() {
return ((bitField0_ & 0x00000400) == 0x00000400);
}
/**
* <code>optional int32 totalStaticBloomSizeKB = 11;</code>
*/
public int getTotalStaticBloomSizeKB() {
return totalStaticBloomSizeKB_;
}
// optional int64 totalCompactingKVs = 12;
public static final int TOTALCOMPACTINGKVS_FIELD_NUMBER = 12;
private long totalCompactingKVs_;
/**
* <code>optional int64 totalCompactingKVs = 12;</code>
*/
public boolean hasTotalCompactingKVs() {
return ((bitField0_ & 0x00000800) == 0x00000800);
}
/**
* <code>optional int64 totalCompactingKVs = 12;</code>
*/
public long getTotalCompactingKVs() {
return totalCompactingKVs_;
}
// optional int64 currentCompactedKVs = 13;
public static final int CURRENTCOMPACTEDKVS_FIELD_NUMBER = 13;
private long currentCompactedKVs_;
/**
* <code>optional int64 currentCompactedKVs = 13;</code>
*/
public boolean hasCurrentCompactedKVs() {
return ((bitField0_ & 0x00001000) == 0x00001000);
}
/**
* <code>optional int64 currentCompactedKVs = 13;</code>
*/
public long getCurrentCompactedKVs() {
return currentCompactedKVs_;
}
private void initFields() {
name_ = com.google.protobuf.ByteString.EMPTY;
stores_ = 0;
storefiles_ = 0;
storefileSizeMB_ = 0;
memstoreSizeMB_ = 0;
storefileIndexSizeMB_ = 0;
readRequestsCount_ = 0L;
writeRequestsCount_ = 0L;
rootIndexSizeKB_ = 0;
totalStaticIndexSizeKB_ = 0;
totalStaticBloomSizeKB_ = 0;
totalCompactingKVs_ = 0L;
currentCompactedKVs_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasName()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, name_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeInt32(2, stores_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeInt32(3, storefiles_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeInt32(4, storefileSizeMB_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeInt32(5, memstoreSizeMB_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeInt32(6, storefileIndexSizeMB_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeInt64(7, readRequestsCount_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
output.writeInt64(8, writeRequestsCount_);
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
output.writeInt32(9, rootIndexSizeKB_);
}
if (((bitField0_ & 0x00000200) == 0x00000200)) {
output.writeInt32(10, totalStaticIndexSizeKB_);
}
if (((bitField0_ & 0x00000400) == 0x00000400)) {
output.writeInt32(11, totalStaticBloomSizeKB_);
}
if (((bitField0_ & 0x00000800) == 0x00000800)) {
output.writeInt64(12, totalCompactingKVs_);
}
if (((bitField0_ & 0x00001000) == 0x00001000)) {
output.writeInt64(13, currentCompactedKVs_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, name_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(2, stores_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(3, storefiles_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(4, storefileSizeMB_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(5, memstoreSizeMB_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(6, storefileIndexSizeMB_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(7, readRequestsCount_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(8, writeRequestsCount_);
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(9, rootIndexSizeKB_);
}
if (((bitField0_ & 0x00000200) == 0x00000200)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(10, totalStaticIndexSizeKB_);
}
if (((bitField0_ & 0x00000400) == 0x00000400)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(11, totalStaticBloomSizeKB_);
}
if (((bitField0_ & 0x00000800) == 0x00000800)) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(12, totalCompactingKVs_);
}
if (((bitField0_ & 0x00001000) == 0x00001000)) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(13, currentCompactedKVs_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.class, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder.class);
}
// Construct using org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
name_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000001);
stores_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
storefiles_ = 0;
bitField0_ = (bitField0_ & ~0x00000004);
storefileSizeMB_ = 0;
bitField0_ = (bitField0_ & ~0x00000008);
memstoreSizeMB_ = 0;
bitField0_ = (bitField0_ & ~0x00000010);
storefileIndexSizeMB_ = 0;
bitField0_ = (bitField0_ & ~0x00000020);
readRequestsCount_ = 0L;
bitField0_ = (bitField0_ & ~0x00000040);
writeRequestsCount_ = 0L;
bitField0_ = (bitField0_ & ~0x00000080);
rootIndexSizeKB_ = 0;
bitField0_ = (bitField0_ & ~0x00000100);
totalStaticIndexSizeKB_ = 0;
bitField0_ = (bitField0_ & ~0x00000200);
totalStaticBloomSizeKB_ = 0;
bitField0_ = (bitField0_ & ~0x00000400);
totalCompactingKVs_ = 0L;
bitField0_ = (bitField0_ & ~0x00000800);
currentCompactedKVs_ = 0L;
bitField0_ = (bitField0_ & ~0x00001000);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_descriptor;
}
public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region getDefaultInstanceForType() {
return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.getDefaultInstance();
}
public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region build() {
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region buildPartial() {
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region result = new org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.name_ = name_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.stores_ = stores_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.storefiles_ = storefiles_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.storefileSizeMB_ = storefileSizeMB_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.memstoreSizeMB_ = memstoreSizeMB_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000020;
}
result.storefileIndexSizeMB_ = storefileIndexSizeMB_;
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000040;
}
result.readRequestsCount_ = readRequestsCount_;
if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
to_bitField0_ |= 0x00000080;
}
result.writeRequestsCount_ = writeRequestsCount_;
if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
to_bitField0_ |= 0x00000100;
}
result.rootIndexSizeKB_ = rootIndexSizeKB_;
if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
to_bitField0_ |= 0x00000200;
}
result.totalStaticIndexSizeKB_ = totalStaticIndexSizeKB_;
if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
to_bitField0_ |= 0x00000400;
}
result.totalStaticBloomSizeKB_ = totalStaticBloomSizeKB_;
if (((from_bitField0_ & 0x00000800) == 0x00000800)) {
to_bitField0_ |= 0x00000800;
}
result.totalCompactingKVs_ = totalCompactingKVs_;
if (((from_bitField0_ & 0x00001000) == 0x00001000)) {
to_bitField0_ |= 0x00001000;
}
result.currentCompactedKVs_ = currentCompactedKVs_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region) {
return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region other) {
if (other == org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.getDefaultInstance()) return this;
if (other.hasName()) {
setName(other.getName());
}
if (other.hasStores()) {
setStores(other.getStores());
}
if (other.hasStorefiles()) {
setStorefiles(other.getStorefiles());
}
if (other.hasStorefileSizeMB()) {
setStorefileSizeMB(other.getStorefileSizeMB());
}
if (other.hasMemstoreSizeMB()) {
setMemstoreSizeMB(other.getMemstoreSizeMB());
}
if (other.hasStorefileIndexSizeMB()) {
setStorefileIndexSizeMB(other.getStorefileIndexSizeMB());
}
if (other.hasReadRequestsCount()) {
setReadRequestsCount(other.getReadRequestsCount());
}
if (other.hasWriteRequestsCount()) {
setWriteRequestsCount(other.getWriteRequestsCount());
}
if (other.hasRootIndexSizeKB()) {
setRootIndexSizeKB(other.getRootIndexSizeKB());
}
if (other.hasTotalStaticIndexSizeKB()) {
setTotalStaticIndexSizeKB(other.getTotalStaticIndexSizeKB());
}
if (other.hasTotalStaticBloomSizeKB()) {
setTotalStaticBloomSizeKB(other.getTotalStaticBloomSizeKB());
}
if (other.hasTotalCompactingKVs()) {
setTotalCompactingKVs(other.getTotalCompactingKVs());
}
if (other.hasCurrentCompactedKVs()) {
setCurrentCompactedKVs(other.getCurrentCompactedKVs());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasName()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required bytes name = 1;
private com.google.protobuf.ByteString name_ = com.google.protobuf.ByteString.EMPTY;
/**
* <code>required bytes name = 1;</code>
*/
public boolean hasName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required bytes name = 1;</code>
*/
public com.google.protobuf.ByteString getName() {
return name_;
}
/**
* <code>required bytes name = 1;</code>
*/
public Builder setName(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
name_ = value;
onChanged();
return this;
}
/**
* <code>required bytes name = 1;</code>
*/
public Builder clearName() {
bitField0_ = (bitField0_ & ~0x00000001);
name_ = getDefaultInstance().getName();
onChanged();
return this;
}
// optional int32 stores = 2;
private int stores_ ;
/**
* <code>optional int32 stores = 2;</code>
*/
public boolean hasStores() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>optional int32 stores = 2;</code>
*/
public int getStores() {
return stores_;
}
/**
* <code>optional int32 stores = 2;</code>
*/
public Builder setStores(int value) {
bitField0_ |= 0x00000002;
stores_ = value;
onChanged();
return this;
}
/**
* <code>optional int32 stores = 2;</code>
*/
public Builder clearStores() {
bitField0_ = (bitField0_ & ~0x00000002);
stores_ = 0;
onChanged();
return this;
}
// optional int32 storefiles = 3;
private int storefiles_ ;
/**
* <code>optional int32 storefiles = 3;</code>
*/
public boolean hasStorefiles() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* <code>optional int32 storefiles = 3;</code>
*/
public int getStorefiles() {
return storefiles_;
}
/**
* <code>optional int32 storefiles = 3;</code>
*/
public Builder setStorefiles(int value) {
bitField0_ |= 0x00000004;
storefiles_ = value;
onChanged();
return this;
}
/**
* <code>optional int32 storefiles = 3;</code>
*/
public Builder clearStorefiles() {
bitField0_ = (bitField0_ & ~0x00000004);
storefiles_ = 0;
onChanged();
return this;
}
// optional int32 storefileSizeMB = 4;
private int storefileSizeMB_ ;
/**
* <code>optional int32 storefileSizeMB = 4;</code>
*/
public boolean hasStorefileSizeMB() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* <code>optional int32 storefileSizeMB = 4;</code>
*/
public int getStorefileSizeMB() {
return storefileSizeMB_;
}
/**
* <code>optional int32 storefileSizeMB = 4;</code>
*/
public Builder setStorefileSizeMB(int value) {
bitField0_ |= 0x00000008;
storefileSizeMB_ = value;
onChanged();
return this;
}
/**
* <code>optional int32 storefileSizeMB = 4;</code>
*/
public Builder clearStorefileSizeMB() {
bitField0_ = (bitField0_ & ~0x00000008);
storefileSizeMB_ = 0;
onChanged();
return this;
}
// optional int32 memstoreSizeMB = 5;
private int memstoreSizeMB_ ;
/**
* <code>optional int32 memstoreSizeMB = 5;</code>
*/
public boolean hasMemstoreSizeMB() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* <code>optional int32 memstoreSizeMB = 5;</code>
*/
public int getMemstoreSizeMB() {
return memstoreSizeMB_;
}
/**
* <code>optional int32 memstoreSizeMB = 5;</code>
*/
public Builder setMemstoreSizeMB(int value) {
bitField0_ |= 0x00000010;
memstoreSizeMB_ = value;
onChanged();
return this;
}
/**
* <code>optional int32 memstoreSizeMB = 5;</code>
*/
public Builder clearMemstoreSizeMB() {
bitField0_ = (bitField0_ & ~0x00000010);
memstoreSizeMB_ = 0;
onChanged();
return this;
}
// optional int32 storefileIndexSizeMB = 6;
private int storefileIndexSizeMB_ ;
/**
* <code>optional int32 storefileIndexSizeMB = 6;</code>
*/
public boolean hasStorefileIndexSizeMB() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* <code>optional int32 storefileIndexSizeMB = 6;</code>
*/
public int getStorefileIndexSizeMB() {
return storefileIndexSizeMB_;
}
/**
* <code>optional int32 storefileIndexSizeMB = 6;</code>
*/
public Builder setStorefileIndexSizeMB(int value) {
bitField0_ |= 0x00000020;
storefileIndexSizeMB_ = value;
onChanged();
return this;
}
/**
* <code>optional int32 storefileIndexSizeMB = 6;</code>
*/
public Builder clearStorefileIndexSizeMB() {
bitField0_ = (bitField0_ & ~0x00000020);
storefileIndexSizeMB_ = 0;
onChanged();
return this;
}
// optional int64 readRequestsCount = 7;
private long readRequestsCount_ ;
/**
* <code>optional int64 readRequestsCount = 7;</code>
*/
public boolean hasReadRequestsCount() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* <code>optional int64 readRequestsCount = 7;</code>
*/
public long getReadRequestsCount() {
return readRequestsCount_;
}
/**
* <code>optional int64 readRequestsCount = 7;</code>
*/
public Builder setReadRequestsCount(long value) {
bitField0_ |= 0x00000040;
readRequestsCount_ = value;
onChanged();
return this;
}
/**
* <code>optional int64 readRequestsCount = 7;</code>
*/
public Builder clearReadRequestsCount() {
bitField0_ = (bitField0_ & ~0x00000040);
readRequestsCount_ = 0L;
onChanged();
return this;
}
// optional int64 writeRequestsCount = 8;
private long writeRequestsCount_ ;
/**
* <code>optional int64 writeRequestsCount = 8;</code>
*/
public boolean hasWriteRequestsCount() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* <code>optional int64 writeRequestsCount = 8;</code>
*/
public long getWriteRequestsCount() {
return writeRequestsCount_;
}
/**
* <code>optional int64 writeRequestsCount = 8;</code>
*/
public Builder setWriteRequestsCount(long value) {
bitField0_ |= 0x00000080;
writeRequestsCount_ = value;
onChanged();
return this;
}
/**
* <code>optional int64 writeRequestsCount = 8;</code>
*/
public Builder clearWriteRequestsCount() {
bitField0_ = (bitField0_ & ~0x00000080);
writeRequestsCount_ = 0L;
onChanged();
return this;
}
// optional int32 rootIndexSizeKB = 9;
private int rootIndexSizeKB_ ;
/**
* <code>optional int32 rootIndexSizeKB = 9;</code>
*/
public boolean hasRootIndexSizeKB() {
return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
* <code>optional int32 rootIndexSizeKB = 9;</code>
*/
public int getRootIndexSizeKB() {
return rootIndexSizeKB_;
}
/**
* <code>optional int32 rootIndexSizeKB = 9;</code>
*/
public Builder setRootIndexSizeKB(int value) {
bitField0_ |= 0x00000100;
rootIndexSizeKB_ = value;
onChanged();
return this;
}
/**
* <code>optional int32 rootIndexSizeKB = 9;</code>
*/
public Builder clearRootIndexSizeKB() {
bitField0_ = (bitField0_ & ~0x00000100);
rootIndexSizeKB_ = 0;
onChanged();
return this;
}
// optional int32 totalStaticIndexSizeKB = 10;
private int totalStaticIndexSizeKB_ ;
/**
* <code>optional int32 totalStaticIndexSizeKB = 10;</code>
*/
public boolean hasTotalStaticIndexSizeKB() {
return ((bitField0_ & 0x00000200) == 0x00000200);
}
/**
* <code>optional int32 totalStaticIndexSizeKB = 10;</code>
*/
public int getTotalStaticIndexSizeKB() {
return totalStaticIndexSizeKB_;
}
/**
* <code>optional int32 totalStaticIndexSizeKB = 10;</code>
*/
public Builder setTotalStaticIndexSizeKB(int value) {
bitField0_ |= 0x00000200;
totalStaticIndexSizeKB_ = value;
onChanged();
return this;
}
/**
* <code>optional int32 totalStaticIndexSizeKB = 10;</code>
*/
public Builder clearTotalStaticIndexSizeKB() {
bitField0_ = (bitField0_ & ~0x00000200);
totalStaticIndexSizeKB_ = 0;
onChanged();
return this;
}
// optional int32 totalStaticBloomSizeKB = 11;
private int totalStaticBloomSizeKB_ ;
/**
* <code>optional int32 totalStaticBloomSizeKB = 11;</code>
*/
public boolean hasTotalStaticBloomSizeKB() {
return ((bitField0_ & 0x00000400) == 0x00000400);
}
/**
* <code>optional int32 totalStaticBloomSizeKB = 11;</code>
*/
public int getTotalStaticBloomSizeKB() {
return totalStaticBloomSizeKB_;
}
/**
* <code>optional int32 totalStaticBloomSizeKB = 11;</code>
*/
public Builder setTotalStaticBloomSizeKB(int value) {
bitField0_ |= 0x00000400;
totalStaticBloomSizeKB_ = value;
onChanged();
return this;
}
/**
* <code>optional int32 totalStaticBloomSizeKB = 11;</code>
*/
public Builder clearTotalStaticBloomSizeKB() {
bitField0_ = (bitField0_ & ~0x00000400);
totalStaticBloomSizeKB_ = 0;
onChanged();
return this;
}
// optional int64 totalCompactingKVs = 12;
private long totalCompactingKVs_ ;
/**
* <code>optional int64 totalCompactingKVs = 12;</code>
*/
public boolean hasTotalCompactingKVs() {
return ((bitField0_ & 0x00000800) == 0x00000800);
}
/**
* <code>optional int64 totalCompactingKVs = 12;</code>
*/
public long getTotalCompactingKVs() {
return totalCompactingKVs_;
}
/**
* <code>optional int64 totalCompactingKVs = 12;</code>
*/
public Builder setTotalCompactingKVs(long value) {
bitField0_ |= 0x00000800;
totalCompactingKVs_ = value;
onChanged();
return this;
}
/**
* <code>optional int64 totalCompactingKVs = 12;</code>
*/
public Builder clearTotalCompactingKVs() {
bitField0_ = (bitField0_ & ~0x00000800);
totalCompactingKVs_ = 0L;
onChanged();
return this;
}
// optional int64 currentCompactedKVs = 13;
private long currentCompactedKVs_ ;
/**
* <code>optional int64 currentCompactedKVs = 13;</code>
*/
public boolean hasCurrentCompactedKVs() {
return ((bitField0_ & 0x00001000) == 0x00001000);
}
/**
* <code>optional int64 currentCompactedKVs = 13;</code>
*/
public long getCurrentCompactedKVs() {
return currentCompactedKVs_;
}
/**
* <code>optional int64 currentCompactedKVs = 13;</code>
*/
public Builder setCurrentCompactedKVs(long value) {
bitField0_ |= 0x00001000;
currentCompactedKVs_ = value;
onChanged();
return this;
}
/**
* <code>optional int64 currentCompactedKVs = 13;</code>
*/
public Builder clearCurrentCompactedKVs() {
bitField0_ = (bitField0_ & ~0x00001000);
currentCompactedKVs_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region)
}
static {
defaultInstance = new Region(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region)
}
public interface NodeOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string name = 1;
/**
* <code>required string name = 1;</code>
*
* <pre>
* name:port
* </pre>
*/
boolean hasName();
/**
* <code>required string name = 1;</code>
*
* <pre>
* name:port
* </pre>
*/
java.lang.String getName();
/**
* <code>required string name = 1;</code>
*
* <pre>
* name:port
* </pre>
*/
com.google.protobuf.ByteString
getNameBytes();
// optional int64 startCode = 2;
/**
* <code>optional int64 startCode = 2;</code>
*/
boolean hasStartCode();
/**
* <code>optional int64 startCode = 2;</code>
*/
long getStartCode();
// optional int64 requests = 3;
/**
* <code>optional int64 requests = 3;</code>
*/
boolean hasRequests();
/**
* <code>optional int64 requests = 3;</code>
*/
long getRequests();
// optional int32 heapSizeMB = 4;
/**
* <code>optional int32 heapSizeMB = 4;</code>
*/
boolean hasHeapSizeMB();
/**
* <code>optional int32 heapSizeMB = 4;</code>
*/
int getHeapSizeMB();
// optional int32 maxHeapSizeMB = 5;
/**
* <code>optional int32 maxHeapSizeMB = 5;</code>
*/
boolean hasMaxHeapSizeMB();
/**
* <code>optional int32 maxHeapSizeMB = 5;</code>
*/
int getMaxHeapSizeMB();
// repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region>
getRegionsList();
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region getRegions(int index);
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
int getRegionsCount();
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder>
getRegionsOrBuilderList();
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder getRegionsOrBuilder(
int index);
}
/**
* Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node}
*/
public static final class Node extends
com.google.protobuf.GeneratedMessage
implements NodeOrBuilder {
// Use Node.newBuilder() to construct.
private Node(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private Node(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final Node defaultInstance;
public static Node getDefaultInstance() {
return defaultInstance;
}
public Node getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private Node(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
name_ = input.readBytes();
break;
}
case 16: {
bitField0_ |= 0x00000002;
startCode_ = input.readInt64();
break;
}
case 24: {
bitField0_ |= 0x00000004;
requests_ = input.readInt64();
break;
}
case 32: {
bitField0_ |= 0x00000008;
heapSizeMB_ = input.readInt32();
break;
}
case 40: {
bitField0_ |= 0x00000010;
maxHeapSizeMB_ = input.readInt32();
break;
}
case 50: {
if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
regions_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region>();
mutable_bitField0_ |= 0x00000020;
}
regions_.add(input.readMessage(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.PARSER, extensionRegistry));
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
regions_ = java.util.Collections.unmodifiableList(regions_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.class, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder.class);
}
public static com.google.protobuf.Parser<Node> PARSER =
new com.google.protobuf.AbstractParser<Node>() {
public Node parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new Node(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<Node> getParserForType() {
return PARSER;
}
private int bitField0_;
// required string name = 1;
public static final int NAME_FIELD_NUMBER = 1;
private java.lang.Object name_;
/**
* <code>required string name = 1;</code>
*
* <pre>
* name:port
* </pre>
*/
public boolean hasName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required string name = 1;</code>
*
* <pre>
* name:port
* </pre>
*/
public java.lang.String getName() {
java.lang.Object ref = name_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
name_ = s;
}
return s;
}
}
/**
* <code>required string name = 1;</code>
*
* <pre>
* name:port
* </pre>
*/
public com.google.protobuf.ByteString
getNameBytes() {
java.lang.Object ref = name_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
name_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional int64 startCode = 2;
public static final int STARTCODE_FIELD_NUMBER = 2;
private long startCode_;
/**
* <code>optional int64 startCode = 2;</code>
*/
public boolean hasStartCode() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>optional int64 startCode = 2;</code>
*/
public long getStartCode() {
return startCode_;
}
// optional int64 requests = 3;
public static final int REQUESTS_FIELD_NUMBER = 3;
private long requests_;
/**
* <code>optional int64 requests = 3;</code>
*/
public boolean hasRequests() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* <code>optional int64 requests = 3;</code>
*/
public long getRequests() {
return requests_;
}
// optional int32 heapSizeMB = 4;
public static final int HEAPSIZEMB_FIELD_NUMBER = 4;
private int heapSizeMB_;
/**
* <code>optional int32 heapSizeMB = 4;</code>
*/
public boolean hasHeapSizeMB() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* <code>optional int32 heapSizeMB = 4;</code>
*/
public int getHeapSizeMB() {
return heapSizeMB_;
}
// optional int32 maxHeapSizeMB = 5;
public static final int MAXHEAPSIZEMB_FIELD_NUMBER = 5;
private int maxHeapSizeMB_;
/**
* <code>optional int32 maxHeapSizeMB = 5;</code>
*/
public boolean hasMaxHeapSizeMB() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* <code>optional int32 maxHeapSizeMB = 5;</code>
*/
public int getMaxHeapSizeMB() {
return maxHeapSizeMB_;
}
// repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
public static final int REGIONS_FIELD_NUMBER = 6;
private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region> regions_;
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region> getRegionsList() {
return regions_;
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder>
getRegionsOrBuilderList() {
return regions_;
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
public int getRegionsCount() {
return regions_.size();
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region getRegions(int index) {
return regions_.get(index);
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder getRegionsOrBuilder(
int index) {
return regions_.get(index);
}
private void initFields() {
name_ = "";
startCode_ = 0L;
requests_ = 0L;
heapSizeMB_ = 0;
maxHeapSizeMB_ = 0;
regions_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasName()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getRegionsCount(); i++) {
if (!getRegions(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getNameBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeInt64(2, startCode_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeInt64(3, requests_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeInt32(4, heapSizeMB_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeInt32(5, maxHeapSizeMB_);
}
for (int i = 0; i < regions_.size(); i++) {
output.writeMessage(6, regions_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getNameBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(2, startCode_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(3, requests_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(4, heapSizeMB_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(5, maxHeapSizeMB_);
}
for (int i = 0; i < regions_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(6, regions_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.class, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder.class);
}
// Construct using org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getRegionsFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
name_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
startCode_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
requests_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
heapSizeMB_ = 0;
bitField0_ = (bitField0_ & ~0x00000008);
maxHeapSizeMB_ = 0;
bitField0_ = (bitField0_ & ~0x00000010);
if (regionsBuilder_ == null) {
regions_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000020);
} else {
regionsBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_descriptor;
}
public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node getDefaultInstanceForType() {
return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.getDefaultInstance();
}
public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node build() {
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node buildPartial() {
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node result = new org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.name_ = name_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.startCode_ = startCode_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.requests_ = requests_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.heapSizeMB_ = heapSizeMB_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.maxHeapSizeMB_ = maxHeapSizeMB_;
if (regionsBuilder_ == null) {
if (((bitField0_ & 0x00000020) == 0x00000020)) {
regions_ = java.util.Collections.unmodifiableList(regions_);
bitField0_ = (bitField0_ & ~0x00000020);
}
result.regions_ = regions_;
} else {
result.regions_ = regionsBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node) {
return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node other) {
if (other == org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.getDefaultInstance()) return this;
if (other.hasName()) {
bitField0_ |= 0x00000001;
name_ = other.name_;
onChanged();
}
if (other.hasStartCode()) {
setStartCode(other.getStartCode());
}
if (other.hasRequests()) {
setRequests(other.getRequests());
}
if (other.hasHeapSizeMB()) {
setHeapSizeMB(other.getHeapSizeMB());
}
if (other.hasMaxHeapSizeMB()) {
setMaxHeapSizeMB(other.getMaxHeapSizeMB());
}
if (regionsBuilder_ == null) {
if (!other.regions_.isEmpty()) {
if (regions_.isEmpty()) {
regions_ = other.regions_;
bitField0_ = (bitField0_ & ~0x00000020);
} else {
ensureRegionsIsMutable();
regions_.addAll(other.regions_);
}
onChanged();
}
} else {
if (!other.regions_.isEmpty()) {
if (regionsBuilder_.isEmpty()) {
regionsBuilder_.dispose();
regionsBuilder_ = null;
regions_ = other.regions_;
bitField0_ = (bitField0_ & ~0x00000020);
regionsBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getRegionsFieldBuilder() : null;
} else {
regionsBuilder_.addAllMessages(other.regions_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasName()) {
return false;
}
for (int i = 0; i < getRegionsCount(); i++) {
if (!getRegions(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string name = 1;
private java.lang.Object name_ = "";
/**
* <code>required string name = 1;</code>
*
* <pre>
* name:port
* </pre>
*/
public boolean hasName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required string name = 1;</code>
*
* <pre>
* name:port
* </pre>
*/
public java.lang.String getName() {
java.lang.Object ref = name_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
name_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* <code>required string name = 1;</code>
*
* <pre>
* name:port
* </pre>
*/
public com.google.protobuf.ByteString
getNameBytes() {
java.lang.Object ref = name_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
name_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* <code>required string name = 1;</code>
*
* <pre>
* name:port
* </pre>
*/
public Builder setName(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
name_ = value;
onChanged();
return this;
}
/**
* <code>required string name = 1;</code>
*
* <pre>
* name:port
* </pre>
*/
public Builder clearName() {
bitField0_ = (bitField0_ & ~0x00000001);
name_ = getDefaultInstance().getName();
onChanged();
return this;
}
/**
* <code>required string name = 1;</code>
*
* <pre>
* name:port
* </pre>
*/
public Builder setNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
name_ = value;
onChanged();
return this;
}
// optional int64 startCode = 2;
private long startCode_ ;
/**
* <code>optional int64 startCode = 2;</code>
*/
public boolean hasStartCode() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>optional int64 startCode = 2;</code>
*/
public long getStartCode() {
return startCode_;
}
/**
* <code>optional int64 startCode = 2;</code>
*/
public Builder setStartCode(long value) {
bitField0_ |= 0x00000002;
startCode_ = value;
onChanged();
return this;
}
/**
* <code>optional int64 startCode = 2;</code>
*/
public Builder clearStartCode() {
bitField0_ = (bitField0_ & ~0x00000002);
startCode_ = 0L;
onChanged();
return this;
}
// optional int64 requests = 3;
private long requests_ ;
/**
* <code>optional int64 requests = 3;</code>
*/
public boolean hasRequests() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* <code>optional int64 requests = 3;</code>
*/
public long getRequests() {
return requests_;
}
/**
* <code>optional int64 requests = 3;</code>
*/
public Builder setRequests(long value) {
bitField0_ |= 0x00000004;
requests_ = value;
onChanged();
return this;
}
/**
* <code>optional int64 requests = 3;</code>
*/
public Builder clearRequests() {
bitField0_ = (bitField0_ & ~0x00000004);
requests_ = 0L;
onChanged();
return this;
}
// optional int32 heapSizeMB = 4;
private int heapSizeMB_ ;
/**
* <code>optional int32 heapSizeMB = 4;</code>
*/
public boolean hasHeapSizeMB() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* <code>optional int32 heapSizeMB = 4;</code>
*/
public int getHeapSizeMB() {
return heapSizeMB_;
}
/**
* <code>optional int32 heapSizeMB = 4;</code>
*/
public Builder setHeapSizeMB(int value) {
bitField0_ |= 0x00000008;
heapSizeMB_ = value;
onChanged();
return this;
}
/**
* <code>optional int32 heapSizeMB = 4;</code>
*/
public Builder clearHeapSizeMB() {
bitField0_ = (bitField0_ & ~0x00000008);
heapSizeMB_ = 0;
onChanged();
return this;
}
// optional int32 maxHeapSizeMB = 5;
private int maxHeapSizeMB_ ;
/**
* <code>optional int32 maxHeapSizeMB = 5;</code>
*/
public boolean hasMaxHeapSizeMB() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* <code>optional int32 maxHeapSizeMB = 5;</code>
*/
public int getMaxHeapSizeMB() {
return maxHeapSizeMB_;
}
/**
* <code>optional int32 maxHeapSizeMB = 5;</code>
*/
public Builder setMaxHeapSizeMB(int value) {
bitField0_ |= 0x00000010;
maxHeapSizeMB_ = value;
onChanged();
return this;
}
/**
* <code>optional int32 maxHeapSizeMB = 5;</code>
*/
public Builder clearMaxHeapSizeMB() {
bitField0_ = (bitField0_ & ~0x00000010);
maxHeapSizeMB_ = 0;
onChanged();
return this;
}
// repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region> regions_ =
java.util.Collections.emptyList();
private void ensureRegionsIsMutable() {
if (!((bitField0_ & 0x00000020) == 0x00000020)) {
regions_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region>(regions_);
bitField0_ |= 0x00000020;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder> regionsBuilder_;
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region> getRegionsList() {
if (regionsBuilder_ == null) {
return java.util.Collections.unmodifiableList(regions_);
} else {
return regionsBuilder_.getMessageList();
}
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
public int getRegionsCount() {
if (regionsBuilder_ == null) {
return regions_.size();
} else {
return regionsBuilder_.getCount();
}
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region getRegions(int index) {
if (regionsBuilder_ == null) {
return regions_.get(index);
} else {
return regionsBuilder_.getMessage(index);
}
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
public Builder setRegions(
int index, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region value) {
if (regionsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureRegionsIsMutable();
regions_.set(index, value);
onChanged();
} else {
regionsBuilder_.setMessage(index, value);
}
return this;
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
public Builder setRegions(
int index, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder builderForValue) {
if (regionsBuilder_ == null) {
ensureRegionsIsMutable();
regions_.set(index, builderForValue.build());
onChanged();
} else {
regionsBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
public Builder addRegions(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region value) {
if (regionsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureRegionsIsMutable();
regions_.add(value);
onChanged();
} else {
regionsBuilder_.addMessage(value);
}
return this;
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
public Builder addRegions(
int index, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region value) {
if (regionsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureRegionsIsMutable();
regions_.add(index, value);
onChanged();
} else {
regionsBuilder_.addMessage(index, value);
}
return this;
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
public Builder addRegions(
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder builderForValue) {
if (regionsBuilder_ == null) {
ensureRegionsIsMutable();
regions_.add(builderForValue.build());
onChanged();
} else {
regionsBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
public Builder addRegions(
int index, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder builderForValue) {
if (regionsBuilder_ == null) {
ensureRegionsIsMutable();
regions_.add(index, builderForValue.build());
onChanged();
} else {
regionsBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
public Builder addAllRegions(
java.lang.Iterable<? extends org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region> values) {
if (regionsBuilder_ == null) {
ensureRegionsIsMutable();
super.addAll(values, regions_);
onChanged();
} else {
regionsBuilder_.addAllMessages(values);
}
return this;
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
public Builder clearRegions() {
if (regionsBuilder_ == null) {
regions_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000020);
onChanged();
} else {
regionsBuilder_.clear();
}
return this;
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
public Builder removeRegions(int index) {
if (regionsBuilder_ == null) {
ensureRegionsIsMutable();
regions_.remove(index);
onChanged();
} else {
regionsBuilder_.remove(index);
}
return this;
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder getRegionsBuilder(
int index) {
return getRegionsFieldBuilder().getBuilder(index);
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder getRegionsOrBuilder(
int index) {
if (regionsBuilder_ == null) {
return regions_.get(index); } else {
return regionsBuilder_.getMessageOrBuilder(index);
}
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder>
getRegionsOrBuilderList() {
if (regionsBuilder_ != null) {
return regionsBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(regions_);
}
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder addRegionsBuilder() {
return getRegionsFieldBuilder().addBuilder(
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.getDefaultInstance());
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder addRegionsBuilder(
int index) {
return getRegionsFieldBuilder().addBuilder(
index, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.getDefaultInstance());
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
*/
public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder>
getRegionsBuilderList() {
return getRegionsFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder>
getRegionsFieldBuilder() {
if (regionsBuilder_ == null) {
regionsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder>(
regions_,
((bitField0_ & 0x00000020) == 0x00000020),
getParentForChildren(),
isClean());
regions_ = null;
}
return regionsBuilder_;
}
// @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node)
}
static {
defaultInstance = new Node(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node)
}
private int bitField0_;
// repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
public static final int LIVENODES_FIELD_NUMBER = 1;
private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node> liveNodes_;
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node> getLiveNodesList() {
return liveNodes_;
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder>
getLiveNodesOrBuilderList() {
return liveNodes_;
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
public int getLiveNodesCount() {
return liveNodes_.size();
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node getLiveNodes(int index) {
return liveNodes_.get(index);
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder getLiveNodesOrBuilder(
int index) {
return liveNodes_.get(index);
}
// repeated string deadNodes = 2;
public static final int DEADNODES_FIELD_NUMBER = 2;
private com.google.protobuf.LazyStringList deadNodes_;
/**
* <code>repeated string deadNodes = 2;</code>
*/
public java.util.List<java.lang.String>
getDeadNodesList() {
return deadNodes_;
}
/**
* <code>repeated string deadNodes = 2;</code>
*/
public int getDeadNodesCount() {
return deadNodes_.size();
}
/**
* <code>repeated string deadNodes = 2;</code>
*/
public java.lang.String getDeadNodes(int index) {
return deadNodes_.get(index);
}
/**
* <code>repeated string deadNodes = 2;</code>
*/
public com.google.protobuf.ByteString
getDeadNodesBytes(int index) {
return deadNodes_.getByteString(index);
}
// optional int32 regions = 3;
public static final int REGIONS_FIELD_NUMBER = 3;
private int regions_;
/**
* <code>optional int32 regions = 3;</code>
*
* <pre>
* summary statistics
* </pre>
*/
public boolean hasRegions() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>optional int32 regions = 3;</code>
*
* <pre>
* summary statistics
* </pre>
*/
public int getRegions() {
return regions_;
}
// optional int64 requests = 4;
public static final int REQUESTS_FIELD_NUMBER = 4;
private long requests_;
/**
* <code>optional int64 requests = 4;</code>
*/
public boolean hasRequests() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>optional int64 requests = 4;</code>
*/
public long getRequests() {
return requests_;
}
// optional double averageLoad = 5;
public static final int AVERAGELOAD_FIELD_NUMBER = 5;
private double averageLoad_;
/**
* <code>optional double averageLoad = 5;</code>
*/
public boolean hasAverageLoad() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* <code>optional double averageLoad = 5;</code>
*/
public double getAverageLoad() {
return averageLoad_;
}
private void initFields() {
liveNodes_ = java.util.Collections.emptyList();
deadNodes_ = com.google.protobuf.LazyStringArrayList.EMPTY;
regions_ = 0;
requests_ = 0L;
averageLoad_ = 0D;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
for (int i = 0; i < getLiveNodesCount(); i++) {
if (!getLiveNodes(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < liveNodes_.size(); i++) {
output.writeMessage(1, liveNodes_.get(i));
}
for (int i = 0; i < deadNodes_.size(); i++) {
output.writeBytes(2, deadNodes_.getByteString(i));
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeInt32(3, regions_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeInt64(4, requests_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeDouble(5, averageLoad_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
for (int i = 0; i < liveNodes_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, liveNodes_.get(i));
}
{
int dataSize = 0;
for (int i = 0; i < deadNodes_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeBytesSizeNoTag(deadNodes_.getByteString(i));
}
size += dataSize;
size += 1 * getDeadNodesList().size();
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(3, regions_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(4, requests_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeDoubleSize(5, averageLoad_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatusOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.class, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Builder.class);
}
// Construct using org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getLiveNodesFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (liveNodesBuilder_ == null) {
liveNodes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
} else {
liveNodesBuilder_.clear();
}
deadNodes_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000002);
regions_ = 0;
bitField0_ = (bitField0_ & ~0x00000004);
requests_ = 0L;
bitField0_ = (bitField0_ & ~0x00000008);
averageLoad_ = 0D;
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_descriptor;
}
public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus getDefaultInstanceForType() {
return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.getDefaultInstance();
}
public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus build() {
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus buildPartial() {
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus result = new org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (liveNodesBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
liveNodes_ = java.util.Collections.unmodifiableList(liveNodes_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.liveNodes_ = liveNodes_;
} else {
result.liveNodes_ = liveNodesBuilder_.build();
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
deadNodes_ = new com.google.protobuf.UnmodifiableLazyStringList(
deadNodes_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.deadNodes_ = deadNodes_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000001;
}
result.regions_ = regions_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000002;
}
result.requests_ = requests_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000004;
}
result.averageLoad_ = averageLoad_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus) {
return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus other) {
if (other == org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.getDefaultInstance()) return this;
if (liveNodesBuilder_ == null) {
if (!other.liveNodes_.isEmpty()) {
if (liveNodes_.isEmpty()) {
liveNodes_ = other.liveNodes_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureLiveNodesIsMutable();
liveNodes_.addAll(other.liveNodes_);
}
onChanged();
}
} else {
if (!other.liveNodes_.isEmpty()) {
if (liveNodesBuilder_.isEmpty()) {
liveNodesBuilder_.dispose();
liveNodesBuilder_ = null;
liveNodes_ = other.liveNodes_;
bitField0_ = (bitField0_ & ~0x00000001);
liveNodesBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getLiveNodesFieldBuilder() : null;
} else {
liveNodesBuilder_.addAllMessages(other.liveNodes_);
}
}
}
if (!other.deadNodes_.isEmpty()) {
if (deadNodes_.isEmpty()) {
deadNodes_ = other.deadNodes_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureDeadNodesIsMutable();
deadNodes_.addAll(other.deadNodes_);
}
onChanged();
}
if (other.hasRegions()) {
setRegions(other.getRegions());
}
if (other.hasRequests()) {
setRequests(other.getRequests());
}
if (other.hasAverageLoad()) {
setAverageLoad(other.getAverageLoad());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
for (int i = 0; i < getLiveNodesCount(); i++) {
if (!getLiveNodes(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node> liveNodes_ =
java.util.Collections.emptyList();
private void ensureLiveNodesIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
liveNodes_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node>(liveNodes_);
bitField0_ |= 0x00000001;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder> liveNodesBuilder_;
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node> getLiveNodesList() {
if (liveNodesBuilder_ == null) {
return java.util.Collections.unmodifiableList(liveNodes_);
} else {
return liveNodesBuilder_.getMessageList();
}
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
public int getLiveNodesCount() {
if (liveNodesBuilder_ == null) {
return liveNodes_.size();
} else {
return liveNodesBuilder_.getCount();
}
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node getLiveNodes(int index) {
if (liveNodesBuilder_ == null) {
return liveNodes_.get(index);
} else {
return liveNodesBuilder_.getMessage(index);
}
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
public Builder setLiveNodes(
int index, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node value) {
if (liveNodesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureLiveNodesIsMutable();
liveNodes_.set(index, value);
onChanged();
} else {
liveNodesBuilder_.setMessage(index, value);
}
return this;
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
public Builder setLiveNodes(
int index, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder builderForValue) {
if (liveNodesBuilder_ == null) {
ensureLiveNodesIsMutable();
liveNodes_.set(index, builderForValue.build());
onChanged();
} else {
liveNodesBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
public Builder addLiveNodes(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node value) {
if (liveNodesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureLiveNodesIsMutable();
liveNodes_.add(value);
onChanged();
} else {
liveNodesBuilder_.addMessage(value);
}
return this;
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
public Builder addLiveNodes(
int index, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node value) {
if (liveNodesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureLiveNodesIsMutable();
liveNodes_.add(index, value);
onChanged();
} else {
liveNodesBuilder_.addMessage(index, value);
}
return this;
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
public Builder addLiveNodes(
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder builderForValue) {
if (liveNodesBuilder_ == null) {
ensureLiveNodesIsMutable();
liveNodes_.add(builderForValue.build());
onChanged();
} else {
liveNodesBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
public Builder addLiveNodes(
int index, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder builderForValue) {
if (liveNodesBuilder_ == null) {
ensureLiveNodesIsMutable();
liveNodes_.add(index, builderForValue.build());
onChanged();
} else {
liveNodesBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
public Builder addAllLiveNodes(
java.lang.Iterable<? extends org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node> values) {
if (liveNodesBuilder_ == null) {
ensureLiveNodesIsMutable();
super.addAll(values, liveNodes_);
onChanged();
} else {
liveNodesBuilder_.addAllMessages(values);
}
return this;
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
public Builder clearLiveNodes() {
if (liveNodesBuilder_ == null) {
liveNodes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
} else {
liveNodesBuilder_.clear();
}
return this;
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
public Builder removeLiveNodes(int index) {
if (liveNodesBuilder_ == null) {
ensureLiveNodesIsMutable();
liveNodes_.remove(index);
onChanged();
} else {
liveNodesBuilder_.remove(index);
}
return this;
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder getLiveNodesBuilder(
int index) {
return getLiveNodesFieldBuilder().getBuilder(index);
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder getLiveNodesOrBuilder(
int index) {
if (liveNodesBuilder_ == null) {
return liveNodes_.get(index); } else {
return liveNodesBuilder_.getMessageOrBuilder(index);
}
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder>
getLiveNodesOrBuilderList() {
if (liveNodesBuilder_ != null) {
return liveNodesBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(liveNodes_);
}
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder addLiveNodesBuilder() {
return getLiveNodesFieldBuilder().addBuilder(
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.getDefaultInstance());
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder addLiveNodesBuilder(
int index) {
return getLiveNodesFieldBuilder().addBuilder(
index, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.getDefaultInstance());
}
/**
* <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
*
* <pre>
* node status
* </pre>
*/
public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder>
getLiveNodesBuilderList() {
return getLiveNodesFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder>
getLiveNodesFieldBuilder() {
if (liveNodesBuilder_ == null) {
liveNodesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder>(
liveNodes_,
((bitField0_ & 0x00000001) == 0x00000001),
getParentForChildren(),
isClean());
liveNodes_ = null;
}
return liveNodesBuilder_;
}
// repeated string deadNodes = 2;
private com.google.protobuf.LazyStringList deadNodes_ = com.google.protobuf.LazyStringArrayList.EMPTY;
private void ensureDeadNodesIsMutable() {
if (!((bitField0_ & 0x00000002) == 0x00000002)) {
deadNodes_ = new com.google.protobuf.LazyStringArrayList(deadNodes_);
bitField0_ |= 0x00000002;
}
}
/**
* <code>repeated string deadNodes = 2;</code>
*/
public java.util.List<java.lang.String>
getDeadNodesList() {
return java.util.Collections.unmodifiableList(deadNodes_);
}
/**
* <code>repeated string deadNodes = 2;</code>
*/
public int getDeadNodesCount() {
return deadNodes_.size();
}
/**
* <code>repeated string deadNodes = 2;</code>
*/
public java.lang.String getDeadNodes(int index) {
return deadNodes_.get(index);
}
/**
* <code>repeated string deadNodes = 2;</code>
*/
public com.google.protobuf.ByteString
getDeadNodesBytes(int index) {
return deadNodes_.getByteString(index);
}
/**
* <code>repeated string deadNodes = 2;</code>
*/
public Builder setDeadNodes(
int index, java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureDeadNodesIsMutable();
deadNodes_.set(index, value);
onChanged();
return this;
}
/**
* <code>repeated string deadNodes = 2;</code>
*/
public Builder addDeadNodes(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureDeadNodesIsMutable();
deadNodes_.add(value);
onChanged();
return this;
}
/**
* <code>repeated string deadNodes = 2;</code>
*/
public Builder addAllDeadNodes(
java.lang.Iterable<java.lang.String> values) {
ensureDeadNodesIsMutable();
super.addAll(values, deadNodes_);
onChanged();
return this;
}
/**
* <code>repeated string deadNodes = 2;</code>
*/
public Builder clearDeadNodes() {
deadNodes_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
return this;
}
/**
* <code>repeated string deadNodes = 2;</code>
*/
public Builder addDeadNodesBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
ensureDeadNodesIsMutable();
deadNodes_.add(value);
onChanged();
return this;
}
// optional int32 regions = 3;
private int regions_ ;
/**
* <code>optional int32 regions = 3;</code>
*
* <pre>
* summary statistics
* </pre>
*/
public boolean hasRegions() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* <code>optional int32 regions = 3;</code>
*
* <pre>
* summary statistics
* </pre>
*/
public int getRegions() {
return regions_;
}
/**
* <code>optional int32 regions = 3;</code>
*
* <pre>
* summary statistics
* </pre>
*/
public Builder setRegions(int value) {
bitField0_ |= 0x00000004;
regions_ = value;
onChanged();
return this;
}
/**
* <code>optional int32 regions = 3;</code>
*
* <pre>
* summary statistics
* </pre>
*/
public Builder clearRegions() {
bitField0_ = (bitField0_ & ~0x00000004);
regions_ = 0;
onChanged();
return this;
}
// optional int64 requests = 4;
private long requests_ ;
/**
* <code>optional int64 requests = 4;</code>
*/
public boolean hasRequests() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* <code>optional int64 requests = 4;</code>
*/
public long getRequests() {
return requests_;
}
/**
* <code>optional int64 requests = 4;</code>
*/
public Builder setRequests(long value) {
bitField0_ |= 0x00000008;
requests_ = value;
onChanged();
return this;
}
/**
* <code>optional int64 requests = 4;</code>
*/
public Builder clearRequests() {
bitField0_ = (bitField0_ & ~0x00000008);
requests_ = 0L;
onChanged();
return this;
}
// optional double averageLoad = 5;
private double averageLoad_ ;
/**
* <code>optional double averageLoad = 5;</code>
*/
public boolean hasAverageLoad() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* <code>optional double averageLoad = 5;</code>
*/
public double getAverageLoad() {
return averageLoad_;
}
/**
* <code>optional double averageLoad = 5;</code>
*/
public Builder setAverageLoad(double value) {
bitField0_ |= 0x00000010;
averageLoad_ = value;
onChanged();
return this;
}
/**
* <code>optional double averageLoad = 5;</code>
*/
public Builder clearAverageLoad() {
bitField0_ = (bitField0_ & ~0x00000010);
averageLoad_ = 0D;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus)
}
static {
defaultInstance = new StorageClusterStatus(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus)
}
private static com.google.protobuf.Descriptors.Descriptor
internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static com.google.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n!StorageClusterStatusMessage.proto\022/org" +
".apache.hadoop.hbase.rest.protobuf.gener" +
"ated\"\333\005\n\024StorageClusterStatus\022]\n\tliveNod" +
"es\030\001 \003(\0132J.org.apache.hadoop.hbase.rest." +
"protobuf.generated.StorageClusterStatus." +
"Node\022\021\n\tdeadNodes\030\002 \003(\t\022\017\n\007regions\030\003 \001(\005" +
"\022\020\n\010requests\030\004 \001(\003\022\023\n\013averageLoad\030\005 \001(\001\032" +
"\322\002\n\006Region\022\014\n\004name\030\001 \002(\014\022\016\n\006stores\030\002 \001(\005" +
"\022\022\n\nstorefiles\030\003 \001(\005\022\027\n\017storefileSizeMB\030" +
"\004 \001(\005\022\026\n\016memstoreSizeMB\030\005 \001(\005\022\034\n\024storefi",
"leIndexSizeMB\030\006 \001(\005\022\031\n\021readRequestsCount" +
"\030\007 \001(\003\022\032\n\022writeRequestsCount\030\010 \001(\003\022\027\n\017ro" +
"otIndexSizeKB\030\t \001(\005\022\036\n\026totalStaticIndexS" +
"izeKB\030\n \001(\005\022\036\n\026totalStaticBloomSizeKB\030\013 " +
"\001(\005\022\032\n\022totalCompactingKVs\030\014 \001(\003\022\033\n\023curre" +
"ntCompactedKVs\030\r \001(\003\032\303\001\n\004Node\022\014\n\004name\030\001 " +
"\002(\t\022\021\n\tstartCode\030\002 \001(\003\022\020\n\010requests\030\003 \001(\003" +
"\022\022\n\nheapSizeMB\030\004 \001(\005\022\025\n\rmaxHeapSizeMB\030\005 " +
"\001(\005\022]\n\007regions\030\006 \003(\0132L.org.apache.hadoop" +
".hbase.rest.protobuf.generated.StorageCl",
"usterStatus.Region"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
public com.google.protobuf.ExtensionRegistry assignDescriptors(
com.google.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_descriptor,
new java.lang.String[] { "LiveNodes", "DeadNodes", "Regions", "Requests", "AverageLoad", });
internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_descriptor =
internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_descriptor.getNestedTypes().get(0);
internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_descriptor,
new java.lang.String[] { "Name", "Stores", "Storefiles", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "TotalCompactingKVs", "CurrentCompactedKVs", });
internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_descriptor =
internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_descriptor.getNestedTypes().get(1);
internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_descriptor,
new java.lang.String[] { "Name", "StartCode", "Requests", "HeapSizeMB", "MaxHeapSizeMB", "Regions", });
return null;
}
};
com.google.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
}, assigner);
}
// @@protoc_insertion_point(outer_class_scope)
}