// Generated by the protocol buffer compiler. DO NOT EDIT! // source: RegionServerStatus.proto package org.apache.hadoop.hbase.protobuf.generated; public final class RegionServerStatusProtos { private RegionServerStatusProtos() {} public static void registerAllExtensions( com.google.protobuf.ExtensionRegistry registry) { } public interface RegionServerStartupRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { // required uint32 port = 1; /** * <code>required uint32 port = 1;</code> * * <pre> ** Port number this regionserver is up on * </pre> */ boolean hasPort(); /** * <code>required uint32 port = 1;</code> * * <pre> ** Port number this regionserver is up on * </pre> */ int getPort(); // required uint64 server_start_code = 2; /** * <code>required uint64 server_start_code = 2;</code> * * <pre> ** This servers' startcode * </pre> */ boolean hasServerStartCode(); /** * <code>required uint64 server_start_code = 2;</code> * * <pre> ** This servers' startcode * </pre> */ long getServerStartCode(); // required uint64 server_current_time = 3; /** * <code>required uint64 server_current_time = 3;</code> * * <pre> ** Current time of the region server in ms * </pre> */ boolean hasServerCurrentTime(); /** * <code>required uint64 server_current_time = 3;</code> * * <pre> ** Current time of the region server in ms * </pre> */ long getServerCurrentTime(); } /** * Protobuf type {@code RegionServerStartupRequest} */ public static final class RegionServerStartupRequest extends com.google.protobuf.GeneratedMessage implements RegionServerStartupRequestOrBuilder { // Use RegionServerStartupRequest.newBuilder() to construct. private RegionServerStartupRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RegionServerStartupRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RegionServerStartupRequest defaultInstance; public static RegionServerStartupRequest getDefaultInstance() { return defaultInstance; } public RegionServerStartupRequest getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RegionServerStartupRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; port_ = input.readUInt32(); break; } case 16: { bitField0_ |= 0x00000002; serverStartCode_ = input.readUInt64(); break; } case 24: { bitField0_ |= 0x00000004; serverCurrentTime_ = input.readUInt64(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerStartupRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerStartupRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest.class, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest.Builder.class); } public static com.google.protobuf.Parser<RegionServerStartupRequest> PARSER = new com.google.protobuf.AbstractParser<RegionServerStartupRequest>() { public RegionServerStartupRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new RegionServerStartupRequest(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser<RegionServerStartupRequest> getParserForType() { return PARSER; } private int bitField0_; // required uint32 port = 1; public static final int PORT_FIELD_NUMBER = 1; private int port_; /** * <code>required uint32 port = 1;</code> * * <pre> ** Port number this regionserver is up on * </pre> */ public boolean hasPort() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <code>required uint32 port = 1;</code> * * <pre> ** Port number this regionserver is up on * </pre> */ public int getPort() { return port_; } // required uint64 server_start_code = 2; public static final int SERVER_START_CODE_FIELD_NUMBER = 2; private long serverStartCode_; /** * <code>required uint64 server_start_code = 2;</code> * * <pre> ** This servers' startcode * </pre> */ public boolean hasServerStartCode() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * <code>required uint64 server_start_code = 2;</code> * * <pre> ** This servers' startcode * </pre> */ public long getServerStartCode() { return serverStartCode_; } // required uint64 server_current_time = 3; public static final int SERVER_CURRENT_TIME_FIELD_NUMBER = 3; private long serverCurrentTime_; /** * <code>required uint64 server_current_time = 3;</code> * * <pre> ** Current time of the region server in ms * </pre> */ public boolean hasServerCurrentTime() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * <code>required uint64 server_current_time = 3;</code> * * <pre> ** Current time of the region server in ms * </pre> */ public long getServerCurrentTime() { return serverCurrentTime_; } private void initFields() { port_ = 0; serverStartCode_ = 0L; serverCurrentTime_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasPort()) { memoizedIsInitialized = 0; return false; } if (!hasServerStartCode()) { memoizedIsInitialized = 0; return false; } if (!hasServerCurrentTime()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt32(1, port_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, serverStartCode_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt64(3, serverCurrentTime_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(1, port_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(2, serverStartCode_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(3, serverCurrentTime_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest)) { return super.equals(obj); } org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest other = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest) obj; boolean result = true; result = result && (hasPort() == other.hasPort()); if (hasPort()) { result = result && (getPort() == other.getPort()); } result = result && (hasServerStartCode() == other.hasServerStartCode()); if (hasServerStartCode()) { result = result && (getServerStartCode() == other.getServerStartCode()); } result = result && (hasServerCurrentTime() == other.hasServerCurrentTime()); if (hasServerCurrentTime()) { result = result && (getServerCurrentTime() == other.getServerCurrentTime()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasPort()) { hash = (37 * hash) + PORT_FIELD_NUMBER; hash = (53 * hash) + getPort(); } if (hasServerStartCode()) { hash = (37 * hash) + SERVER_START_CODE_FIELD_NUMBER; hash = (53 * hash) + hashLong(getServerStartCode()); } if (hasServerCurrentTime()) { hash = (37 * hash) + SERVER_CURRENT_TIME_FIELD_NUMBER; hash = (53 * hash) + hashLong(getServerCurrentTime()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code RegionServerStartupRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder<Builder> implements org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerStartupRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerStartupRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest.class, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest.Builder.class); } // Construct using org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); port_ = 0; bitField0_ = (bitField0_ & ~0x00000001); serverStartCode_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); serverCurrentTime_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerStartupRequest_descriptor; } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest getDefaultInstanceForType() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest.getDefaultInstance(); } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest build() { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest buildPartial() { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest result = new org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.port_ = port_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.serverStartCode_ = serverStartCode_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.serverCurrentTime_ = serverCurrentTime_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest) { return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest other) { if (other == org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest.getDefaultInstance()) return this; if (other.hasPort()) { setPort(other.getPort()); } if (other.hasServerStartCode()) { setServerStartCode(other.getServerStartCode()); } if (other.hasServerCurrentTime()) { setServerCurrentTime(other.getServerCurrentTime()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasPort()) { return false; } if (!hasServerStartCode()) { return false; } if (!hasServerCurrentTime()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required uint32 port = 1; private int port_ ; /** * <code>required uint32 port = 1;</code> * * <pre> ** Port number this regionserver is up on * </pre> */ public boolean hasPort() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <code>required uint32 port = 1;</code> * * <pre> ** Port number this regionserver is up on * </pre> */ public int getPort() { return port_; } /** * <code>required uint32 port = 1;</code> * * <pre> ** Port number this regionserver is up on * </pre> */ public Builder setPort(int value) { bitField0_ |= 0x00000001; port_ = value; onChanged(); return this; } /** * <code>required uint32 port = 1;</code> * * <pre> ** Port number this regionserver is up on * </pre> */ public Builder clearPort() { bitField0_ = (bitField0_ & ~0x00000001); port_ = 0; onChanged(); return this; } // required uint64 server_start_code = 2; private long serverStartCode_ ; /** * <code>required uint64 server_start_code = 2;</code> * * <pre> ** This servers' startcode * </pre> */ public boolean hasServerStartCode() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * <code>required uint64 server_start_code = 2;</code> * * <pre> ** This servers' startcode * </pre> */ public long getServerStartCode() { return serverStartCode_; } /** * <code>required uint64 server_start_code = 2;</code> * * <pre> ** This servers' startcode * </pre> */ public Builder setServerStartCode(long value) { bitField0_ |= 0x00000002; serverStartCode_ = value; onChanged(); return this; } /** * <code>required uint64 server_start_code = 2;</code> * * <pre> ** This servers' startcode * </pre> */ public Builder clearServerStartCode() { bitField0_ = (bitField0_ & ~0x00000002); serverStartCode_ = 0L; onChanged(); return this; } // required uint64 server_current_time = 3; private long serverCurrentTime_ ; /** * <code>required uint64 server_current_time = 3;</code> * * <pre> ** Current time of the region server in ms * </pre> */ public boolean hasServerCurrentTime() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * <code>required uint64 server_current_time = 3;</code> * * <pre> ** Current time of the region server in ms * </pre> */ public long getServerCurrentTime() { return serverCurrentTime_; } /** * <code>required uint64 server_current_time = 3;</code> * * <pre> ** Current time of the region server in ms * </pre> */ public Builder setServerCurrentTime(long value) { bitField0_ |= 0x00000004; serverCurrentTime_ = value; onChanged(); return this; } /** * <code>required uint64 server_current_time = 3;</code> * * <pre> ** Current time of the region server in ms * </pre> */ public Builder clearServerCurrentTime() { bitField0_ = (bitField0_ & ~0x00000004); serverCurrentTime_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:RegionServerStartupRequest) } static { defaultInstance = new RegionServerStartupRequest(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:RegionServerStartupRequest) } public interface RegionServerStartupResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { // repeated .NameStringPair map_entries = 1; /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair> getMapEntriesList(); /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getMapEntries(int index); /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ int getMapEntriesCount(); /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> getMapEntriesOrBuilderList(); /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getMapEntriesOrBuilder( int index); } /** * Protobuf type {@code RegionServerStartupResponse} */ public static final class RegionServerStartupResponse extends com.google.protobuf.GeneratedMessage implements RegionServerStartupResponseOrBuilder { // Use RegionServerStartupResponse.newBuilder() to construct. private RegionServerStartupResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RegionServerStartupResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RegionServerStartupResponse defaultInstance; public static RegionServerStartupResponse getDefaultInstance() { return defaultInstance; } public RegionServerStartupResponse getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RegionServerStartupResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { mapEntries_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair>(); mutable_bitField0_ |= 0x00000001; } mapEntries_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.PARSER, extensionRegistry)); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { mapEntries_ = java.util.Collections.unmodifiableList(mapEntries_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerStartupResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerStartupResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.class, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.Builder.class); } public static com.google.protobuf.Parser<RegionServerStartupResponse> PARSER = new com.google.protobuf.AbstractParser<RegionServerStartupResponse>() { public RegionServerStartupResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new RegionServerStartupResponse(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser<RegionServerStartupResponse> getParserForType() { return PARSER; } // repeated .NameStringPair map_entries = 1; public static final int MAP_ENTRIES_FIELD_NUMBER = 1; private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair> mapEntries_; /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair> getMapEntriesList() { return mapEntries_; } /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> getMapEntriesOrBuilderList() { return mapEntries_; } /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ public int getMapEntriesCount() { return mapEntries_.size(); } /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getMapEntries(int index) { return mapEntries_.get(index); } /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getMapEntriesOrBuilder( int index) { return mapEntries_.get(index); } private void initFields() { mapEntries_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; for (int i = 0; i < getMapEntriesCount(); i++) { if (!getMapEntries(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); for (int i = 0; i < mapEntries_.size(); i++) { output.writeMessage(1, mapEntries_.get(i)); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; for (int i = 0; i < mapEntries_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, mapEntries_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse)) { return super.equals(obj); } org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse other = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse) obj; boolean result = true; result = result && getMapEntriesList() .equals(other.getMapEntriesList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (getMapEntriesCount() > 0) { hash = (37 * hash) + MAP_ENTRIES_FIELD_NUMBER; hash = (53 * hash) + getMapEntriesList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code RegionServerStartupResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder<Builder> implements org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerStartupResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerStartupResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.class, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.Builder.class); } // Construct using org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getMapEntriesFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (mapEntriesBuilder_ == null) { mapEntries_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { mapEntriesBuilder_.clear(); } return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerStartupResponse_descriptor; } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse getDefaultInstanceForType() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.getDefaultInstance(); } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse build() { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse buildPartial() { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse result = new org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse(this); int from_bitField0_ = bitField0_; if (mapEntriesBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { mapEntries_ = java.util.Collections.unmodifiableList(mapEntries_); bitField0_ = (bitField0_ & ~0x00000001); } result.mapEntries_ = mapEntries_; } else { result.mapEntries_ = mapEntriesBuilder_.build(); } onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse) { return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse other) { if (other == org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.getDefaultInstance()) return this; if (mapEntriesBuilder_ == null) { if (!other.mapEntries_.isEmpty()) { if (mapEntries_.isEmpty()) { mapEntries_ = other.mapEntries_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureMapEntriesIsMutable(); mapEntries_.addAll(other.mapEntries_); } onChanged(); } } else { if (!other.mapEntries_.isEmpty()) { if (mapEntriesBuilder_.isEmpty()) { mapEntriesBuilder_.dispose(); mapEntriesBuilder_ = null; mapEntries_ = other.mapEntries_; bitField0_ = (bitField0_ & ~0x00000001); mapEntriesBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getMapEntriesFieldBuilder() : null; } else { mapEntriesBuilder_.addAllMessages(other.mapEntries_); } } } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { for (int i = 0; i < getMapEntriesCount(); i++) { if (!getMapEntries(i).isInitialized()) { return false; } } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // repeated .NameStringPair map_entries = 1; private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair> mapEntries_ = java.util.Collections.emptyList(); private void ensureMapEntriesIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { mapEntries_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair>(mapEntries_); bitField0_ |= 0x00000001; } } private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> mapEntriesBuilder_; /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair> getMapEntriesList() { if (mapEntriesBuilder_ == null) { return java.util.Collections.unmodifiableList(mapEntries_); } else { return mapEntriesBuilder_.getMessageList(); } } /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ public int getMapEntriesCount() { if (mapEntriesBuilder_ == null) { return mapEntries_.size(); } else { return mapEntriesBuilder_.getCount(); } } /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getMapEntries(int index) { if (mapEntriesBuilder_ == null) { return mapEntries_.get(index); } else { return mapEntriesBuilder_.getMessage(index); } } /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ public Builder setMapEntries( int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) { if (mapEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureMapEntriesIsMutable(); mapEntries_.set(index, value); onChanged(); } else { mapEntriesBuilder_.setMessage(index, value); } return this; } /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ public Builder setMapEntries( int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) { if (mapEntriesBuilder_ == null) { ensureMapEntriesIsMutable(); mapEntries_.set(index, builderForValue.build()); onChanged(); } else { mapEntriesBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ public Builder addMapEntries(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) { if (mapEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureMapEntriesIsMutable(); mapEntries_.add(value); onChanged(); } else { mapEntriesBuilder_.addMessage(value); } return this; } /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ public Builder addMapEntries( int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) { if (mapEntriesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureMapEntriesIsMutable(); mapEntries_.add(index, value); onChanged(); } else { mapEntriesBuilder_.addMessage(index, value); } return this; } /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ public Builder addMapEntries( org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) { if (mapEntriesBuilder_ == null) { ensureMapEntriesIsMutable(); mapEntries_.add(builderForValue.build()); onChanged(); } else { mapEntriesBuilder_.addMessage(builderForValue.build()); } return this; } /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ public Builder addMapEntries( int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) { if (mapEntriesBuilder_ == null) { ensureMapEntriesIsMutable(); mapEntries_.add(index, builderForValue.build()); onChanged(); } else { mapEntriesBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ public Builder addAllMapEntries( java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair> values) { if (mapEntriesBuilder_ == null) { ensureMapEntriesIsMutable(); super.addAll(values, mapEntries_); onChanged(); } else { mapEntriesBuilder_.addAllMessages(values); } return this; } /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ public Builder clearMapEntries() { if (mapEntriesBuilder_ == null) { mapEntries_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { mapEntriesBuilder_.clear(); } return this; } /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ public Builder removeMapEntries(int index) { if (mapEntriesBuilder_ == null) { ensureMapEntriesIsMutable(); mapEntries_.remove(index); onChanged(); } else { mapEntriesBuilder_.remove(index); } return this; } /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder getMapEntriesBuilder( int index) { return getMapEntriesFieldBuilder().getBuilder(index); } /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getMapEntriesOrBuilder( int index) { if (mapEntriesBuilder_ == null) { return mapEntries_.get(index); } else { return mapEntriesBuilder_.getMessageOrBuilder(index); } } /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> getMapEntriesOrBuilderList() { if (mapEntriesBuilder_ != null) { return mapEntriesBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(mapEntries_); } } /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder addMapEntriesBuilder() { return getMapEntriesFieldBuilder().addBuilder( org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance()); } /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder addMapEntriesBuilder( int index) { return getMapEntriesFieldBuilder().addBuilder( index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance()); } /** * <code>repeated .NameStringPair map_entries = 1;</code> * * <pre> ** * Configuration for the regionserver to use: e.g. filesystem, * hbase rootdir, the hostname to use creating the RegionServer ServerName, * etc * </pre> */ public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder> getMapEntriesBuilderList() { return getMapEntriesFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> getMapEntriesFieldBuilder() { if (mapEntriesBuilder_ == null) { mapEntriesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder>( mapEntries_, ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); mapEntries_ = null; } return mapEntriesBuilder_; } // @@protoc_insertion_point(builder_scope:RegionServerStartupResponse) } static { defaultInstance = new RegionServerStartupResponse(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:RegionServerStartupResponse) } public interface RegionServerReportRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .ServerName server = 1; /** * <code>required .ServerName server = 1;</code> */ boolean hasServer(); /** * <code>required .ServerName server = 1;</code> */ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer(); /** * <code>required .ServerName server = 1;</code> */ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder(); // optional .ServerLoad load = 2; /** * <code>optional .ServerLoad load = 2;</code> * * <pre> ** load the server is under * </pre> */ boolean hasLoad(); /** * <code>optional .ServerLoad load = 2;</code> * * <pre> ** load the server is under * </pre> */ org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad getLoad(); /** * <code>optional .ServerLoad load = 2;</code> * * <pre> ** load the server is under * </pre> */ org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder getLoadOrBuilder(); } /** * Protobuf type {@code RegionServerReportRequest} */ public static final class RegionServerReportRequest extends com.google.protobuf.GeneratedMessage implements RegionServerReportRequestOrBuilder { // Use RegionServerReportRequest.newBuilder() to construct. private RegionServerReportRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RegionServerReportRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RegionServerReportRequest defaultInstance; public static RegionServerReportRequest getDefaultInstance() { return defaultInstance; } public RegionServerReportRequest getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RegionServerReportRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = server_.toBuilder(); } server_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(server_); server_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 18: { org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder subBuilder = null; if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = load_.toBuilder(); } load_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(load_); load_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000002; break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerReportRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerReportRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest.class, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest.Builder.class); } public static com.google.protobuf.Parser<RegionServerReportRequest> PARSER = new com.google.protobuf.AbstractParser<RegionServerReportRequest>() { public RegionServerReportRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new RegionServerReportRequest(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser<RegionServerReportRequest> getParserForType() { return PARSER; } private int bitField0_; // required .ServerName server = 1; public static final int SERVER_FIELD_NUMBER = 1; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_; /** * <code>required .ServerName server = 1;</code> */ public boolean hasServer() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <code>required .ServerName server = 1;</code> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() { return server_; } /** * <code>required .ServerName server = 1;</code> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() { return server_; } // optional .ServerLoad load = 2; public static final int LOAD_FIELD_NUMBER = 2; private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad load_; /** * <code>optional .ServerLoad load = 2;</code> * * <pre> ** load the server is under * </pre> */ public boolean hasLoad() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * <code>optional .ServerLoad load = 2;</code> * * <pre> ** load the server is under * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad getLoad() { return load_; } /** * <code>optional .ServerLoad load = 2;</code> * * <pre> ** load the server is under * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder getLoadOrBuilder() { return load_; } private void initFields() { server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); load_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasServer()) { memoizedIsInitialized = 0; return false; } if (!getServer().isInitialized()) { memoizedIsInitialized = 0; return false; } if (hasLoad()) { if (!getLoad().isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, server_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeMessage(2, load_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, server_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(2, load_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest)) { return super.equals(obj); } org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest other = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest) obj; boolean result = true; result = result && (hasServer() == other.hasServer()); if (hasServer()) { result = result && getServer() .equals(other.getServer()); } result = result && (hasLoad() == other.hasLoad()); if (hasLoad()) { result = result && getLoad() .equals(other.getLoad()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasServer()) { hash = (37 * hash) + SERVER_FIELD_NUMBER; hash = (53 * hash) + getServer().hashCode(); } if (hasLoad()) { hash = (37 * hash) + LOAD_FIELD_NUMBER; hash = (53 * hash) + getLoad().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code RegionServerReportRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder<Builder> implements org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerReportRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerReportRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest.class, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest.Builder.class); } // Construct using org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getServerFieldBuilder(); getLoadFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (serverBuilder_ == null) { server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); } else { serverBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); if (loadBuilder_ == null) { load_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance(); } else { loadBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerReportRequest_descriptor; } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest getDefaultInstanceForType() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest.getDefaultInstance(); } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest build() { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest buildPartial() { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest result = new org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (serverBuilder_ == null) { result.server_ = server_; } else { result.server_ = serverBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } if (loadBuilder_ == null) { result.load_ = load_; } else { result.load_ = loadBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest) { return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest other) { if (other == org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest.getDefaultInstance()) return this; if (other.hasServer()) { mergeServer(other.getServer()); } if (other.hasLoad()) { mergeLoad(other.getLoad()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasServer()) { return false; } if (!getServer().isInitialized()) { return false; } if (hasLoad()) { if (!getLoad().isInitialized()) { return false; } } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .ServerName server = 1; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverBuilder_; /** * <code>required .ServerName server = 1;</code> */ public boolean hasServer() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <code>required .ServerName server = 1;</code> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() { if (serverBuilder_ == null) { return server_; } else { return serverBuilder_.getMessage(); } } /** * <code>required .ServerName server = 1;</code> */ public Builder setServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { if (serverBuilder_ == null) { if (value == null) { throw new NullPointerException(); } server_ = value; onChanged(); } else { serverBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * <code>required .ServerName server = 1;</code> */ public Builder setServer( org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { if (serverBuilder_ == null) { server_ = builderForValue.build(); onChanged(); } else { serverBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * <code>required .ServerName server = 1;</code> */ public Builder mergeServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { if (serverBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && server_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(server_).mergeFrom(value).buildPartial(); } else { server_ = value; } onChanged(); } else { serverBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * <code>required .ServerName server = 1;</code> */ public Builder clearServer() { if (serverBuilder_ == null) { server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); onChanged(); } else { serverBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * <code>required .ServerName server = 1;</code> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerBuilder() { bitField0_ |= 0x00000001; onChanged(); return getServerFieldBuilder().getBuilder(); } /** * <code>required .ServerName server = 1;</code> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() { if (serverBuilder_ != null) { return serverBuilder_.getMessageOrBuilder(); } else { return server_; } } /** * <code>required .ServerName server = 1;</code> */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> getServerFieldBuilder() { if (serverBuilder_ == null) { serverBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( server_, getParentForChildren(), isClean()); server_ = null; } return serverBuilder_; } // optional .ServerLoad load = 2; private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad load_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder> loadBuilder_; /** * <code>optional .ServerLoad load = 2;</code> * * <pre> ** load the server is under * </pre> */ public boolean hasLoad() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * <code>optional .ServerLoad load = 2;</code> * * <pre> ** load the server is under * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad getLoad() { if (loadBuilder_ == null) { return load_; } else { return loadBuilder_.getMessage(); } } /** * <code>optional .ServerLoad load = 2;</code> * * <pre> ** load the server is under * </pre> */ public Builder setLoad(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad value) { if (loadBuilder_ == null) { if (value == null) { throw new NullPointerException(); } load_ = value; onChanged(); } else { loadBuilder_.setMessage(value); } bitField0_ |= 0x00000002; return this; } /** * <code>optional .ServerLoad load = 2;</code> * * <pre> ** load the server is under * </pre> */ public Builder setLoad( org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder builderForValue) { if (loadBuilder_ == null) { load_ = builderForValue.build(); onChanged(); } else { loadBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000002; return this; } /** * <code>optional .ServerLoad load = 2;</code> * * <pre> ** load the server is under * </pre> */ public Builder mergeLoad(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad value) { if (loadBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && load_ != org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance()) { load_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.newBuilder(load_).mergeFrom(value).buildPartial(); } else { load_ = value; } onChanged(); } else { loadBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /** * <code>optional .ServerLoad load = 2;</code> * * <pre> ** load the server is under * </pre> */ public Builder clearLoad() { if (loadBuilder_ == null) { load_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance(); onChanged(); } else { loadBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } /** * <code>optional .ServerLoad load = 2;</code> * * <pre> ** load the server is under * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder getLoadBuilder() { bitField0_ |= 0x00000002; onChanged(); return getLoadFieldBuilder().getBuilder(); } /** * <code>optional .ServerLoad load = 2;</code> * * <pre> ** load the server is under * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder getLoadOrBuilder() { if (loadBuilder_ != null) { return loadBuilder_.getMessageOrBuilder(); } else { return load_; } } /** * <code>optional .ServerLoad load = 2;</code> * * <pre> ** load the server is under * </pre> */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder> getLoadFieldBuilder() { if (loadBuilder_ == null) { loadBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder>( load_, getParentForChildren(), isClean()); load_ = null; } return loadBuilder_; } // @@protoc_insertion_point(builder_scope:RegionServerReportRequest) } static { defaultInstance = new RegionServerReportRequest(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:RegionServerReportRequest) } public interface RegionServerReportResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code RegionServerReportResponse} */ public static final class RegionServerReportResponse extends com.google.protobuf.GeneratedMessage implements RegionServerReportResponseOrBuilder { // Use RegionServerReportResponse.newBuilder() to construct. private RegionServerReportResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RegionServerReportResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RegionServerReportResponse defaultInstance; public static RegionServerReportResponse getDefaultInstance() { return defaultInstance; } public RegionServerReportResponse getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RegionServerReportResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerReportResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerReportResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.class, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.Builder.class); } public static com.google.protobuf.Parser<RegionServerReportResponse> PARSER = new com.google.protobuf.AbstractParser<RegionServerReportResponse>() { public RegionServerReportResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new RegionServerReportResponse(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser<RegionServerReportResponse> getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse)) { return super.equals(obj); } org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse other = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code RegionServerReportResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder<Builder> implements org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerReportResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerReportResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.class, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.Builder.class); } // Construct using org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerReportResponse_descriptor; } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse getDefaultInstanceForType() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.getDefaultInstance(); } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse build() { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse buildPartial() { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse result = new org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse) { return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse other) { if (other == org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:RegionServerReportResponse) } static { defaultInstance = new RegionServerReportResponse(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:RegionServerReportResponse) } public interface ReportRSFatalErrorRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .ServerName server = 1; /** * <code>required .ServerName server = 1;</code> * * <pre> ** name of the server experiencing the error * </pre> */ boolean hasServer(); /** * <code>required .ServerName server = 1;</code> * * <pre> ** name of the server experiencing the error * </pre> */ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer(); /** * <code>required .ServerName server = 1;</code> * * <pre> ** name of the server experiencing the error * </pre> */ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder(); // required string error_message = 2; /** * <code>required string error_message = 2;</code> * * <pre> ** informative text to expose in the master logs and UI * </pre> */ boolean hasErrorMessage(); /** * <code>required string error_message = 2;</code> * * <pre> ** informative text to expose in the master logs and UI * </pre> */ java.lang.String getErrorMessage(); /** * <code>required string error_message = 2;</code> * * <pre> ** informative text to expose in the master logs and UI * </pre> */ com.google.protobuf.ByteString getErrorMessageBytes(); } /** * Protobuf type {@code ReportRSFatalErrorRequest} */ public static final class ReportRSFatalErrorRequest extends com.google.protobuf.GeneratedMessage implements ReportRSFatalErrorRequestOrBuilder { // Use ReportRSFatalErrorRequest.newBuilder() to construct. private ReportRSFatalErrorRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private ReportRSFatalErrorRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final ReportRSFatalErrorRequest defaultInstance; public static ReportRSFatalErrorRequest getDefaultInstance() { return defaultInstance; } public ReportRSFatalErrorRequest getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ReportRSFatalErrorRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = server_.toBuilder(); } server_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(server_); server_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 18: { bitField0_ |= 0x00000002; errorMessage_ = input.readBytes(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRSFatalErrorRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRSFatalErrorRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest.class, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest.Builder.class); } public static com.google.protobuf.Parser<ReportRSFatalErrorRequest> PARSER = new com.google.protobuf.AbstractParser<ReportRSFatalErrorRequest>() { public ReportRSFatalErrorRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new ReportRSFatalErrorRequest(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser<ReportRSFatalErrorRequest> getParserForType() { return PARSER; } private int bitField0_; // required .ServerName server = 1; public static final int SERVER_FIELD_NUMBER = 1; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_; /** * <code>required .ServerName server = 1;</code> * * <pre> ** name of the server experiencing the error * </pre> */ public boolean hasServer() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <code>required .ServerName server = 1;</code> * * <pre> ** name of the server experiencing the error * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() { return server_; } /** * <code>required .ServerName server = 1;</code> * * <pre> ** name of the server experiencing the error * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() { return server_; } // required string error_message = 2; public static final int ERROR_MESSAGE_FIELD_NUMBER = 2; private java.lang.Object errorMessage_; /** * <code>required string error_message = 2;</code> * * <pre> ** informative text to expose in the master logs and UI * </pre> */ public boolean hasErrorMessage() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * <code>required string error_message = 2;</code> * * <pre> ** informative text to expose in the master logs and UI * </pre> */ public java.lang.String getErrorMessage() { java.lang.Object ref = errorMessage_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { errorMessage_ = s; } return s; } } /** * <code>required string error_message = 2;</code> * * <pre> ** informative text to expose in the master logs and UI * </pre> */ public com.google.protobuf.ByteString getErrorMessageBytes() { java.lang.Object ref = errorMessage_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); errorMessage_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } private void initFields() { server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); errorMessage_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasServer()) { memoizedIsInitialized = 0; return false; } if (!hasErrorMessage()) { memoizedIsInitialized = 0; return false; } if (!getServer().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, server_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getErrorMessageBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, server_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(2, getErrorMessageBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest)) { return super.equals(obj); } org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest other = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest) obj; boolean result = true; result = result && (hasServer() == other.hasServer()); if (hasServer()) { result = result && getServer() .equals(other.getServer()); } result = result && (hasErrorMessage() == other.hasErrorMessage()); if (hasErrorMessage()) { result = result && getErrorMessage() .equals(other.getErrorMessage()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasServer()) { hash = (37 * hash) + SERVER_FIELD_NUMBER; hash = (53 * hash) + getServer().hashCode(); } if (hasErrorMessage()) { hash = (37 * hash) + ERROR_MESSAGE_FIELD_NUMBER; hash = (53 * hash) + getErrorMessage().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code ReportRSFatalErrorRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder<Builder> implements org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRSFatalErrorRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRSFatalErrorRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest.class, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest.Builder.class); } // Construct using org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getServerFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (serverBuilder_ == null) { server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); } else { serverBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); errorMessage_ = ""; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRSFatalErrorRequest_descriptor; } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest getDefaultInstanceForType() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest.getDefaultInstance(); } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest build() { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest buildPartial() { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest result = new org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (serverBuilder_ == null) { result.server_ = server_; } else { result.server_ = serverBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.errorMessage_ = errorMessage_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest) { return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest other) { if (other == org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest.getDefaultInstance()) return this; if (other.hasServer()) { mergeServer(other.getServer()); } if (other.hasErrorMessage()) { bitField0_ |= 0x00000002; errorMessage_ = other.errorMessage_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasServer()) { return false; } if (!hasErrorMessage()) { return false; } if (!getServer().isInitialized()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .ServerName server = 1; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverBuilder_; /** * <code>required .ServerName server = 1;</code> * * <pre> ** name of the server experiencing the error * </pre> */ public boolean hasServer() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <code>required .ServerName server = 1;</code> * * <pre> ** name of the server experiencing the error * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() { if (serverBuilder_ == null) { return server_; } else { return serverBuilder_.getMessage(); } } /** * <code>required .ServerName server = 1;</code> * * <pre> ** name of the server experiencing the error * </pre> */ public Builder setServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { if (serverBuilder_ == null) { if (value == null) { throw new NullPointerException(); } server_ = value; onChanged(); } else { serverBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * <code>required .ServerName server = 1;</code> * * <pre> ** name of the server experiencing the error * </pre> */ public Builder setServer( org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { if (serverBuilder_ == null) { server_ = builderForValue.build(); onChanged(); } else { serverBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * <code>required .ServerName server = 1;</code> * * <pre> ** name of the server experiencing the error * </pre> */ public Builder mergeServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { if (serverBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && server_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(server_).mergeFrom(value).buildPartial(); } else { server_ = value; } onChanged(); } else { serverBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * <code>required .ServerName server = 1;</code> * * <pre> ** name of the server experiencing the error * </pre> */ public Builder clearServer() { if (serverBuilder_ == null) { server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); onChanged(); } else { serverBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * <code>required .ServerName server = 1;</code> * * <pre> ** name of the server experiencing the error * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerBuilder() { bitField0_ |= 0x00000001; onChanged(); return getServerFieldBuilder().getBuilder(); } /** * <code>required .ServerName server = 1;</code> * * <pre> ** name of the server experiencing the error * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() { if (serverBuilder_ != null) { return serverBuilder_.getMessageOrBuilder(); } else { return server_; } } /** * <code>required .ServerName server = 1;</code> * * <pre> ** name of the server experiencing the error * </pre> */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> getServerFieldBuilder() { if (serverBuilder_ == null) { serverBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( server_, getParentForChildren(), isClean()); server_ = null; } return serverBuilder_; } // required string error_message = 2; private java.lang.Object errorMessage_ = ""; /** * <code>required string error_message = 2;</code> * * <pre> ** informative text to expose in the master logs and UI * </pre> */ public boolean hasErrorMessage() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * <code>required string error_message = 2;</code> * * <pre> ** informative text to expose in the master logs and UI * </pre> */ public java.lang.String getErrorMessage() { java.lang.Object ref = errorMessage_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); errorMessage_ = s; return s; } else { return (java.lang.String) ref; } } /** * <code>required string error_message = 2;</code> * * <pre> ** informative text to expose in the master logs and UI * </pre> */ public com.google.protobuf.ByteString getErrorMessageBytes() { java.lang.Object ref = errorMessage_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); errorMessage_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * <code>required string error_message = 2;</code> * * <pre> ** informative text to expose in the master logs and UI * </pre> */ public Builder setErrorMessage( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; errorMessage_ = value; onChanged(); return this; } /** * <code>required string error_message = 2;</code> * * <pre> ** informative text to expose in the master logs and UI * </pre> */ public Builder clearErrorMessage() { bitField0_ = (bitField0_ & ~0x00000002); errorMessage_ = getDefaultInstance().getErrorMessage(); onChanged(); return this; } /** * <code>required string error_message = 2;</code> * * <pre> ** informative text to expose in the master logs and UI * </pre> */ public Builder setErrorMessageBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; errorMessage_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:ReportRSFatalErrorRequest) } static { defaultInstance = new ReportRSFatalErrorRequest(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:ReportRSFatalErrorRequest) } public interface ReportRSFatalErrorResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code ReportRSFatalErrorResponse} */ public static final class ReportRSFatalErrorResponse extends com.google.protobuf.GeneratedMessage implements ReportRSFatalErrorResponseOrBuilder { // Use ReportRSFatalErrorResponse.newBuilder() to construct. private ReportRSFatalErrorResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private ReportRSFatalErrorResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final ReportRSFatalErrorResponse defaultInstance; public static ReportRSFatalErrorResponse getDefaultInstance() { return defaultInstance; } public ReportRSFatalErrorResponse getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ReportRSFatalErrorResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRSFatalErrorResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRSFatalErrorResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.class, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.Builder.class); } public static com.google.protobuf.Parser<ReportRSFatalErrorResponse> PARSER = new com.google.protobuf.AbstractParser<ReportRSFatalErrorResponse>() { public ReportRSFatalErrorResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new ReportRSFatalErrorResponse(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser<ReportRSFatalErrorResponse> getParserForType() { return PARSER; } private void initFields() { } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse)) { return super.equals(obj); } org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse other = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse) obj; boolean result = true; result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code ReportRSFatalErrorResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder<Builder> implements org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRSFatalErrorResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRSFatalErrorResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.class, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.Builder.class); } // Construct using org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRSFatalErrorResponse_descriptor; } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse getDefaultInstanceForType() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.getDefaultInstance(); } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse build() { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse buildPartial() { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse result = new org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse(this); onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse) { return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse other) { if (other == org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.getDefaultInstance()) return this; this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } // @@protoc_insertion_point(builder_scope:ReportRSFatalErrorResponse) } static { defaultInstance = new ReportRSFatalErrorResponse(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:ReportRSFatalErrorResponse) } public interface GetLastFlushedSequenceIdRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { // required bytes region_name = 1; /** * <code>required bytes region_name = 1;</code> * * <pre> ** region name * </pre> */ boolean hasRegionName(); /** * <code>required bytes region_name = 1;</code> * * <pre> ** region name * </pre> */ com.google.protobuf.ByteString getRegionName(); } /** * Protobuf type {@code GetLastFlushedSequenceIdRequest} */ public static final class GetLastFlushedSequenceIdRequest extends com.google.protobuf.GeneratedMessage implements GetLastFlushedSequenceIdRequestOrBuilder { // Use GetLastFlushedSequenceIdRequest.newBuilder() to construct. private GetLastFlushedSequenceIdRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetLastFlushedSequenceIdRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetLastFlushedSequenceIdRequest defaultInstance; public static GetLastFlushedSequenceIdRequest getDefaultInstance() { return defaultInstance; } public GetLastFlushedSequenceIdRequest getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetLastFlushedSequenceIdRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; regionName_ = input.readBytes(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_GetLastFlushedSequenceIdRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_GetLastFlushedSequenceIdRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.class, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.Builder.class); } public static com.google.protobuf.Parser<GetLastFlushedSequenceIdRequest> PARSER = new com.google.protobuf.AbstractParser<GetLastFlushedSequenceIdRequest>() { public GetLastFlushedSequenceIdRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new GetLastFlushedSequenceIdRequest(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser<GetLastFlushedSequenceIdRequest> getParserForType() { return PARSER; } private int bitField0_; // required bytes region_name = 1; public static final int REGION_NAME_FIELD_NUMBER = 1; private com.google.protobuf.ByteString regionName_; /** * <code>required bytes region_name = 1;</code> * * <pre> ** region name * </pre> */ public boolean hasRegionName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <code>required bytes region_name = 1;</code> * * <pre> ** region name * </pre> */ public com.google.protobuf.ByteString getRegionName() { return regionName_; } private void initFields() { regionName_ = com.google.protobuf.ByteString.EMPTY; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasRegionName()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, regionName_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(1, regionName_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest)) { return super.equals(obj); } org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest other = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest) obj; boolean result = true; result = result && (hasRegionName() == other.hasRegionName()); if (hasRegionName()) { result = result && getRegionName() .equals(other.getRegionName()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasRegionName()) { hash = (37 * hash) + REGION_NAME_FIELD_NUMBER; hash = (53 * hash) + getRegionName().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code GetLastFlushedSequenceIdRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder<Builder> implements org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_GetLastFlushedSequenceIdRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_GetLastFlushedSequenceIdRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.class, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.Builder.class); } // Construct using org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); regionName_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_GetLastFlushedSequenceIdRequest_descriptor; } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest getDefaultInstanceForType() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.getDefaultInstance(); } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest build() { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest buildPartial() { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest result = new org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.regionName_ = regionName_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest) { return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest other) { if (other == org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.getDefaultInstance()) return this; if (other.hasRegionName()) { setRegionName(other.getRegionName()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasRegionName()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required bytes region_name = 1; private com.google.protobuf.ByteString regionName_ = com.google.protobuf.ByteString.EMPTY; /** * <code>required bytes region_name = 1;</code> * * <pre> ** region name * </pre> */ public boolean hasRegionName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <code>required bytes region_name = 1;</code> * * <pre> ** region name * </pre> */ public com.google.protobuf.ByteString getRegionName() { return regionName_; } /** * <code>required bytes region_name = 1;</code> * * <pre> ** region name * </pre> */ public Builder setRegionName(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; regionName_ = value; onChanged(); return this; } /** * <code>required bytes region_name = 1;</code> * * <pre> ** region name * </pre> */ public Builder clearRegionName() { bitField0_ = (bitField0_ & ~0x00000001); regionName_ = getDefaultInstance().getRegionName(); onChanged(); return this; } // @@protoc_insertion_point(builder_scope:GetLastFlushedSequenceIdRequest) } static { defaultInstance = new GetLastFlushedSequenceIdRequest(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:GetLastFlushedSequenceIdRequest) } public interface GetLastFlushedSequenceIdResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { // required uint64 last_flushed_sequence_id = 1; /** * <code>required uint64 last_flushed_sequence_id = 1;</code> * * <pre> * the last WAL sequence id flushed from MemStore to HFile for the region * </pre> */ boolean hasLastFlushedSequenceId(); /** * <code>required uint64 last_flushed_sequence_id = 1;</code> * * <pre> * the last WAL sequence id flushed from MemStore to HFile for the region * </pre> */ long getLastFlushedSequenceId(); } /** * Protobuf type {@code GetLastFlushedSequenceIdResponse} */ public static final class GetLastFlushedSequenceIdResponse extends com.google.protobuf.GeneratedMessage implements GetLastFlushedSequenceIdResponseOrBuilder { // Use GetLastFlushedSequenceIdResponse.newBuilder() to construct. private GetLastFlushedSequenceIdResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private GetLastFlushedSequenceIdResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final GetLastFlushedSequenceIdResponse defaultInstance; public static GetLastFlushedSequenceIdResponse getDefaultInstance() { return defaultInstance; } public GetLastFlushedSequenceIdResponse getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private GetLastFlushedSequenceIdResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; lastFlushedSequenceId_ = input.readUInt64(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_GetLastFlushedSequenceIdResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_GetLastFlushedSequenceIdResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.class, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.Builder.class); } public static com.google.protobuf.Parser<GetLastFlushedSequenceIdResponse> PARSER = new com.google.protobuf.AbstractParser<GetLastFlushedSequenceIdResponse>() { public GetLastFlushedSequenceIdResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new GetLastFlushedSequenceIdResponse(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser<GetLastFlushedSequenceIdResponse> getParserForType() { return PARSER; } private int bitField0_; // required uint64 last_flushed_sequence_id = 1; public static final int LAST_FLUSHED_SEQUENCE_ID_FIELD_NUMBER = 1; private long lastFlushedSequenceId_; /** * <code>required uint64 last_flushed_sequence_id = 1;</code> * * <pre> * the last WAL sequence id flushed from MemStore to HFile for the region * </pre> */ public boolean hasLastFlushedSequenceId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <code>required uint64 last_flushed_sequence_id = 1;</code> * * <pre> * the last WAL sequence id flushed from MemStore to HFile for the region * </pre> */ public long getLastFlushedSequenceId() { return lastFlushedSequenceId_; } private void initFields() { lastFlushedSequenceId_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasLastFlushedSequenceId()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt64(1, lastFlushedSequenceId_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(1, lastFlushedSequenceId_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse)) { return super.equals(obj); } org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse other = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse) obj; boolean result = true; result = result && (hasLastFlushedSequenceId() == other.hasLastFlushedSequenceId()); if (hasLastFlushedSequenceId()) { result = result && (getLastFlushedSequenceId() == other.getLastFlushedSequenceId()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasLastFlushedSequenceId()) { hash = (37 * hash) + LAST_FLUSHED_SEQUENCE_ID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getLastFlushedSequenceId()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code GetLastFlushedSequenceIdResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder<Builder> implements org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_GetLastFlushedSequenceIdResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_GetLastFlushedSequenceIdResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.class, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.Builder.class); } // Construct using org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); lastFlushedSequenceId_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_GetLastFlushedSequenceIdResponse_descriptor; } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse getDefaultInstanceForType() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.getDefaultInstance(); } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse build() { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse buildPartial() { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse result = new org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.lastFlushedSequenceId_ = lastFlushedSequenceId_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse) { return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse other) { if (other == org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.getDefaultInstance()) return this; if (other.hasLastFlushedSequenceId()) { setLastFlushedSequenceId(other.getLastFlushedSequenceId()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasLastFlushedSequenceId()) { return false; } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required uint64 last_flushed_sequence_id = 1; private long lastFlushedSequenceId_ ; /** * <code>required uint64 last_flushed_sequence_id = 1;</code> * * <pre> * the last WAL sequence id flushed from MemStore to HFile for the region * </pre> */ public boolean hasLastFlushedSequenceId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <code>required uint64 last_flushed_sequence_id = 1;</code> * * <pre> * the last WAL sequence id flushed from MemStore to HFile for the region * </pre> */ public long getLastFlushedSequenceId() { return lastFlushedSequenceId_; } /** * <code>required uint64 last_flushed_sequence_id = 1;</code> * * <pre> * the last WAL sequence id flushed from MemStore to HFile for the region * </pre> */ public Builder setLastFlushedSequenceId(long value) { bitField0_ |= 0x00000001; lastFlushedSequenceId_ = value; onChanged(); return this; } /** * <code>required uint64 last_flushed_sequence_id = 1;</code> * * <pre> * the last WAL sequence id flushed from MemStore to HFile for the region * </pre> */ public Builder clearLastFlushedSequenceId() { bitField0_ = (bitField0_ & ~0x00000001); lastFlushedSequenceId_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:GetLastFlushedSequenceIdResponse) } static { defaultInstance = new GetLastFlushedSequenceIdResponse(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:GetLastFlushedSequenceIdResponse) } public interface RegionStateTransitionOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .RegionStateTransition.TransitionCode transition_code = 1; /** * <code>required .RegionStateTransition.TransitionCode transition_code = 1;</code> */ boolean hasTransitionCode(); /** * <code>required .RegionStateTransition.TransitionCode transition_code = 1;</code> */ org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode getTransitionCode(); // repeated .RegionInfo region_info = 2; /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> getRegionInfoList(); /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index); /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ int getRegionInfoCount(); /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> getRegionInfoOrBuilderList(); /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder( int index); // optional uint64 open_seq_num = 3; /** * <code>optional uint64 open_seq_num = 3;</code> * * <pre> ** For newly opened region, the open seq num is needed * </pre> */ boolean hasOpenSeqNum(); /** * <code>optional uint64 open_seq_num = 3;</code> * * <pre> ** For newly opened region, the open seq num is needed * </pre> */ long getOpenSeqNum(); } /** * Protobuf type {@code RegionStateTransition} */ public static final class RegionStateTransition extends com.google.protobuf.GeneratedMessage implements RegionStateTransitionOrBuilder { // Use RegionStateTransition.newBuilder() to construct. private RegionStateTransition(com.google.protobuf.GeneratedMessage.Builder<?> builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private RegionStateTransition(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final RegionStateTransition defaultInstance; public static RegionStateTransition getDefaultInstance() { return defaultInstance; } public RegionStateTransition getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RegionStateTransition( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { int rawValue = input.readEnum(); org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode value = org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { bitField0_ |= 0x00000001; transitionCode_ = value; } break; } case 18: { if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { regionInfo_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo>(); mutable_bitField0_ |= 0x00000002; } regionInfo_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry)); break; } case 24: { bitField0_ |= 0x00000002; openSeqNum_ = input.readUInt64(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionStateTransition_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionStateTransition_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.class, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.Builder.class); } public static com.google.protobuf.Parser<RegionStateTransition> PARSER = new com.google.protobuf.AbstractParser<RegionStateTransition>() { public RegionStateTransition parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new RegionStateTransition(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser<RegionStateTransition> getParserForType() { return PARSER; } /** * Protobuf enum {@code RegionStateTransition.TransitionCode} */ public enum TransitionCode implements com.google.protobuf.ProtocolMessageEnum { /** * <code>OPENED = 0;</code> */ OPENED(0, 0), /** * <code>FAILED_OPEN = 1;</code> */ FAILED_OPEN(1, 1), /** * <code>CLOSED = 2;</code> * * <pre> ** No failed_close, in which case region server will abort * </pre> */ CLOSED(2, 2), /** * <code>READY_TO_SPLIT = 3;</code> * * <pre> ** Ask master for ok to split/merge region(s) * </pre> */ READY_TO_SPLIT(3, 3), /** * <code>READY_TO_MERGE = 4;</code> */ READY_TO_MERGE(4, 4), /** * <code>SPLIT_PONR = 5;</code> */ SPLIT_PONR(5, 5), /** * <code>MERGE_PONR = 6;</code> */ MERGE_PONR(6, 6), /** * <code>SPLIT = 7;</code> */ SPLIT(7, 7), /** * <code>MERGED = 8;</code> */ MERGED(8, 8), /** * <code>SPLIT_REVERTED = 9;</code> */ SPLIT_REVERTED(9, 9), /** * <code>MERGE_REVERTED = 10;</code> */ MERGE_REVERTED(10, 10), ; /** * <code>OPENED = 0;</code> */ public static final int OPENED_VALUE = 0; /** * <code>FAILED_OPEN = 1;</code> */ public static final int FAILED_OPEN_VALUE = 1; /** * <code>CLOSED = 2;</code> * * <pre> ** No failed_close, in which case region server will abort * </pre> */ public static final int CLOSED_VALUE = 2; /** * <code>READY_TO_SPLIT = 3;</code> * * <pre> ** Ask master for ok to split/merge region(s) * </pre> */ public static final int READY_TO_SPLIT_VALUE = 3; /** * <code>READY_TO_MERGE = 4;</code> */ public static final int READY_TO_MERGE_VALUE = 4; /** * <code>SPLIT_PONR = 5;</code> */ public static final int SPLIT_PONR_VALUE = 5; /** * <code>MERGE_PONR = 6;</code> */ public static final int MERGE_PONR_VALUE = 6; /** * <code>SPLIT = 7;</code> */ public static final int SPLIT_VALUE = 7; /** * <code>MERGED = 8;</code> */ public static final int MERGED_VALUE = 8; /** * <code>SPLIT_REVERTED = 9;</code> */ public static final int SPLIT_REVERTED_VALUE = 9; /** * <code>MERGE_REVERTED = 10;</code> */ public static final int MERGE_REVERTED_VALUE = 10; public final int getNumber() { return value; } public static TransitionCode valueOf(int value) { switch (value) { case 0: return OPENED; case 1: return FAILED_OPEN; case 2: return CLOSED; case 3: return READY_TO_SPLIT; case 4: return READY_TO_MERGE; case 5: return SPLIT_PONR; case 6: return MERGE_PONR; case 7: return SPLIT; case 8: return MERGED; case 9: return SPLIT_REVERTED; case 10: return MERGE_REVERTED; default: return null; } } public static com.google.protobuf.Internal.EnumLiteMap<TransitionCode> internalGetValueMap() { return internalValueMap; } private static com.google.protobuf.Internal.EnumLiteMap<TransitionCode> internalValueMap = new com.google.protobuf.Internal.EnumLiteMap<TransitionCode>() { public TransitionCode findValueByNumber(int number) { return TransitionCode.valueOf(number); } }; public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(index); } public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.getDescriptor().getEnumTypes().get(0); } private static final TransitionCode[] VALUES = values(); public static TransitionCode valueOf( com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } return VALUES[desc.getIndex()]; } private final int index; private final int value; private TransitionCode(int index, int value) { this.index = index; this.value = value; } // @@protoc_insertion_point(enum_scope:RegionStateTransition.TransitionCode) } private int bitField0_; // required .RegionStateTransition.TransitionCode transition_code = 1; public static final int TRANSITION_CODE_FIELD_NUMBER = 1; private org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode transitionCode_; /** * <code>required .RegionStateTransition.TransitionCode transition_code = 1;</code> */ public boolean hasTransitionCode() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <code>required .RegionStateTransition.TransitionCode transition_code = 1;</code> */ public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode getTransitionCode() { return transitionCode_; } // repeated .RegionInfo region_info = 2; public static final int REGION_INFO_FIELD_NUMBER = 2; private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> regionInfo_; /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> getRegionInfoList() { return regionInfo_; } /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> getRegionInfoOrBuilderList() { return regionInfo_; } /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ public int getRegionInfoCount() { return regionInfo_.size(); } /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) { return regionInfo_.get(index); } /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder( int index) { return regionInfo_.get(index); } // optional uint64 open_seq_num = 3; public static final int OPEN_SEQ_NUM_FIELD_NUMBER = 3; private long openSeqNum_; /** * <code>optional uint64 open_seq_num = 3;</code> * * <pre> ** For newly opened region, the open seq num is needed * </pre> */ public boolean hasOpenSeqNum() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * <code>optional uint64 open_seq_num = 3;</code> * * <pre> ** For newly opened region, the open seq num is needed * </pre> */ public long getOpenSeqNum() { return openSeqNum_; } private void initFields() { transitionCode_ = org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode.OPENED; regionInfo_ = java.util.Collections.emptyList(); openSeqNum_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasTransitionCode()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getRegionInfoCount(); i++) { if (!getRegionInfo(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeEnum(1, transitionCode_.getNumber()); } for (int i = 0; i < regionInfo_.size(); i++) { output.writeMessage(2, regionInfo_.get(i)); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(3, openSeqNum_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeEnumSize(1, transitionCode_.getNumber()); } for (int i = 0; i < regionInfo_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(2, regionInfo_.get(i)); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(3, openSeqNum_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition)) { return super.equals(obj); } org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition other = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition) obj; boolean result = true; result = result && (hasTransitionCode() == other.hasTransitionCode()); if (hasTransitionCode()) { result = result && (getTransitionCode() == other.getTransitionCode()); } result = result && getRegionInfoList() .equals(other.getRegionInfoList()); result = result && (hasOpenSeqNum() == other.hasOpenSeqNum()); if (hasOpenSeqNum()) { result = result && (getOpenSeqNum() == other.getOpenSeqNum()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasTransitionCode()) { hash = (37 * hash) + TRANSITION_CODE_FIELD_NUMBER; hash = (53 * hash) + hashEnum(getTransitionCode()); } if (getRegionInfoCount() > 0) { hash = (37 * hash) + REGION_INFO_FIELD_NUMBER; hash = (53 * hash) + getRegionInfoList().hashCode(); } if (hasOpenSeqNum()) { hash = (37 * hash) + OPEN_SEQ_NUM_FIELD_NUMBER; hash = (53 * hash) + hashLong(getOpenSeqNum()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code RegionStateTransition} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder<Builder> implements org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransitionOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionStateTransition_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionStateTransition_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.class, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.Builder.class); } // Construct using org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getRegionInfoFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); transitionCode_ = org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode.OPENED; bitField0_ = (bitField0_ & ~0x00000001); if (regionInfoBuilder_ == null) { regionInfo_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000002); } else { regionInfoBuilder_.clear(); } openSeqNum_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionStateTransition_descriptor; } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition getDefaultInstanceForType() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.getDefaultInstance(); } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition build() { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition buildPartial() { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition result = new org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.transitionCode_ = transitionCode_; if (regionInfoBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002)) { regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_); bitField0_ = (bitField0_ & ~0x00000002); } result.regionInfo_ = regionInfo_; } else { result.regionInfo_ = regionInfoBuilder_.build(); } if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000002; } result.openSeqNum_ = openSeqNum_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition) { return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition other) { if (other == org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.getDefaultInstance()) return this; if (other.hasTransitionCode()) { setTransitionCode(other.getTransitionCode()); } if (regionInfoBuilder_ == null) { if (!other.regionInfo_.isEmpty()) { if (regionInfo_.isEmpty()) { regionInfo_ = other.regionInfo_; bitField0_ = (bitField0_ & ~0x00000002); } else { ensureRegionInfoIsMutable(); regionInfo_.addAll(other.regionInfo_); } onChanged(); } } else { if (!other.regionInfo_.isEmpty()) { if (regionInfoBuilder_.isEmpty()) { regionInfoBuilder_.dispose(); regionInfoBuilder_ = null; regionInfo_ = other.regionInfo_; bitField0_ = (bitField0_ & ~0x00000002); regionInfoBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getRegionInfoFieldBuilder() : null; } else { regionInfoBuilder_.addAllMessages(other.regionInfo_); } } } if (other.hasOpenSeqNum()) { setOpenSeqNum(other.getOpenSeqNum()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasTransitionCode()) { return false; } for (int i = 0; i < getRegionInfoCount(); i++) { if (!getRegionInfo(i).isInitialized()) { return false; } } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .RegionStateTransition.TransitionCode transition_code = 1; private org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode transitionCode_ = org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode.OPENED; /** * <code>required .RegionStateTransition.TransitionCode transition_code = 1;</code> */ public boolean hasTransitionCode() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <code>required .RegionStateTransition.TransitionCode transition_code = 1;</code> */ public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode getTransitionCode() { return transitionCode_; } /** * <code>required .RegionStateTransition.TransitionCode transition_code = 1;</code> */ public Builder setTransitionCode(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; transitionCode_ = value; onChanged(); return this; } /** * <code>required .RegionStateTransition.TransitionCode transition_code = 1;</code> */ public Builder clearTransitionCode() { bitField0_ = (bitField0_ & ~0x00000001); transitionCode_ = org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode.OPENED; onChanged(); return this; } // repeated .RegionInfo region_info = 2; private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> regionInfo_ = java.util.Collections.emptyList(); private void ensureRegionInfoIsMutable() { if (!((bitField0_ & 0x00000002) == 0x00000002)) { regionInfo_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo>(regionInfo_); bitField0_ |= 0x00000002; } } private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_; /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> getRegionInfoList() { if (regionInfoBuilder_ == null) { return java.util.Collections.unmodifiableList(regionInfo_); } else { return regionInfoBuilder_.getMessageList(); } } /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ public int getRegionInfoCount() { if (regionInfoBuilder_ == null) { return regionInfo_.size(); } else { return regionInfoBuilder_.getCount(); } } /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) { if (regionInfoBuilder_ == null) { return regionInfo_.get(index); } else { return regionInfoBuilder_.getMessage(index); } } /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ public Builder setRegionInfo( int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { if (regionInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureRegionInfoIsMutable(); regionInfo_.set(index, value); onChanged(); } else { regionInfoBuilder_.setMessage(index, value); } return this; } /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ public Builder setRegionInfo( int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { if (regionInfoBuilder_ == null) { ensureRegionInfoIsMutable(); regionInfo_.set(index, builderForValue.build()); onChanged(); } else { regionInfoBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ public Builder addRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { if (regionInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureRegionInfoIsMutable(); regionInfo_.add(value); onChanged(); } else { regionInfoBuilder_.addMessage(value); } return this; } /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ public Builder addRegionInfo( int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { if (regionInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureRegionInfoIsMutable(); regionInfo_.add(index, value); onChanged(); } else { regionInfoBuilder_.addMessage(index, value); } return this; } /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ public Builder addRegionInfo( org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { if (regionInfoBuilder_ == null) { ensureRegionInfoIsMutable(); regionInfo_.add(builderForValue.build()); onChanged(); } else { regionInfoBuilder_.addMessage(builderForValue.build()); } return this; } /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ public Builder addRegionInfo( int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { if (regionInfoBuilder_ == null) { ensureRegionInfoIsMutable(); regionInfo_.add(index, builderForValue.build()); onChanged(); } else { regionInfoBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ public Builder addAllRegionInfo( java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> values) { if (regionInfoBuilder_ == null) { ensureRegionInfoIsMutable(); super.addAll(values, regionInfo_); onChanged(); } else { regionInfoBuilder_.addAllMessages(values); } return this; } /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ public Builder clearRegionInfo() { if (regionInfoBuilder_ == null) { regionInfo_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000002); onChanged(); } else { regionInfoBuilder_.clear(); } return this; } /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ public Builder removeRegionInfo(int index) { if (regionInfoBuilder_ == null) { ensureRegionInfoIsMutable(); regionInfo_.remove(index); onChanged(); } else { regionInfoBuilder_.remove(index); } return this; } /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder( int index) { return getRegionInfoFieldBuilder().getBuilder(index); } /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder( int index) { if (regionInfoBuilder_ == null) { return regionInfo_.get(index); } else { return regionInfoBuilder_.getMessageOrBuilder(index); } } /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> getRegionInfoOrBuilderList() { if (regionInfoBuilder_ != null) { return regionInfoBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(regionInfo_); } } /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder() { return getRegionInfoFieldBuilder().addBuilder( org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()); } /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder( int index) { return getRegionInfoFieldBuilder().addBuilder( index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()); } /** * <code>repeated .RegionInfo region_info = 2;</code> * * <pre> ** Mutliple regions are involved during merging/splitting * </pre> */ public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder> getRegionInfoBuilderList() { return getRegionInfoFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> getRegionInfoFieldBuilder() { if (regionInfoBuilder_ == null) { regionInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( regionInfo_, ((bitField0_ & 0x00000002) == 0x00000002), getParentForChildren(), isClean()); regionInfo_ = null; } return regionInfoBuilder_; } // optional uint64 open_seq_num = 3; private long openSeqNum_ ; /** * <code>optional uint64 open_seq_num = 3;</code> * * <pre> ** For newly opened region, the open seq num is needed * </pre> */ public boolean hasOpenSeqNum() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * <code>optional uint64 open_seq_num = 3;</code> * * <pre> ** For newly opened region, the open seq num is needed * </pre> */ public long getOpenSeqNum() { return openSeqNum_; } /** * <code>optional uint64 open_seq_num = 3;</code> * * <pre> ** For newly opened region, the open seq num is needed * </pre> */ public Builder setOpenSeqNum(long value) { bitField0_ |= 0x00000004; openSeqNum_ = value; onChanged(); return this; } /** * <code>optional uint64 open_seq_num = 3;</code> * * <pre> ** For newly opened region, the open seq num is needed * </pre> */ public Builder clearOpenSeqNum() { bitField0_ = (bitField0_ & ~0x00000004); openSeqNum_ = 0L; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:RegionStateTransition) } static { defaultInstance = new RegionStateTransition(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:RegionStateTransition) } public interface ReportRegionStateTransitionRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .ServerName server = 1; /** * <code>required .ServerName server = 1;</code> * * <pre> ** This region server's server name * </pre> */ boolean hasServer(); /** * <code>required .ServerName server = 1;</code> * * <pre> ** This region server's server name * </pre> */ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer(); /** * <code>required .ServerName server = 1;</code> * * <pre> ** This region server's server name * </pre> */ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder(); // repeated .RegionStateTransition transition = 2; /** * <code>repeated .RegionStateTransition transition = 2;</code> */ java.util.List<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition> getTransitionList(); /** * <code>repeated .RegionStateTransition transition = 2;</code> */ org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition getTransition(int index); /** * <code>repeated .RegionStateTransition transition = 2;</code> */ int getTransitionCount(); /** * <code>repeated .RegionStateTransition transition = 2;</code> */ java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransitionOrBuilder> getTransitionOrBuilderList(); /** * <code>repeated .RegionStateTransition transition = 2;</code> */ org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransitionOrBuilder getTransitionOrBuilder( int index); } /** * Protobuf type {@code ReportRegionStateTransitionRequest} */ public static final class ReportRegionStateTransitionRequest extends com.google.protobuf.GeneratedMessage implements ReportRegionStateTransitionRequestOrBuilder { // Use ReportRegionStateTransitionRequest.newBuilder() to construct. private ReportRegionStateTransitionRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private ReportRegionStateTransitionRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final ReportRegionStateTransitionRequest defaultInstance; public static ReportRegionStateTransitionRequest getDefaultInstance() { return defaultInstance; } public ReportRegionStateTransitionRequest getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ReportRegionStateTransitionRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = server_.toBuilder(); } server_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(server_); server_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; } case 18: { if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { transition_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition>(); mutable_bitField0_ |= 0x00000002; } transition_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.PARSER, extensionRegistry)); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { transition_ = java.util.Collections.unmodifiableList(transition_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRegionStateTransitionRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRegionStateTransitionRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest.class, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest.Builder.class); } public static com.google.protobuf.Parser<ReportRegionStateTransitionRequest> PARSER = new com.google.protobuf.AbstractParser<ReportRegionStateTransitionRequest>() { public ReportRegionStateTransitionRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new ReportRegionStateTransitionRequest(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser<ReportRegionStateTransitionRequest> getParserForType() { return PARSER; } private int bitField0_; // required .ServerName server = 1; public static final int SERVER_FIELD_NUMBER = 1; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_; /** * <code>required .ServerName server = 1;</code> * * <pre> ** This region server's server name * </pre> */ public boolean hasServer() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <code>required .ServerName server = 1;</code> * * <pre> ** This region server's server name * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() { return server_; } /** * <code>required .ServerName server = 1;</code> * * <pre> ** This region server's server name * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() { return server_; } // repeated .RegionStateTransition transition = 2; public static final int TRANSITION_FIELD_NUMBER = 2; private java.util.List<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition> transition_; /** * <code>repeated .RegionStateTransition transition = 2;</code> */ public java.util.List<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition> getTransitionList() { return transition_; } /** * <code>repeated .RegionStateTransition transition = 2;</code> */ public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransitionOrBuilder> getTransitionOrBuilderList() { return transition_; } /** * <code>repeated .RegionStateTransition transition = 2;</code> */ public int getTransitionCount() { return transition_.size(); } /** * <code>repeated .RegionStateTransition transition = 2;</code> */ public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition getTransition(int index) { return transition_.get(index); } /** * <code>repeated .RegionStateTransition transition = 2;</code> */ public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransitionOrBuilder getTransitionOrBuilder( int index) { return transition_.get(index); } private void initFields() { server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); transition_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasServer()) { memoizedIsInitialized = 0; return false; } if (!getServer().isInitialized()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getTransitionCount(); i++) { if (!getTransition(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, server_); } for (int i = 0; i < transition_.size(); i++) { output.writeMessage(2, transition_.get(i)); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, server_); } for (int i = 0; i < transition_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(2, transition_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest)) { return super.equals(obj); } org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest other = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest) obj; boolean result = true; result = result && (hasServer() == other.hasServer()); if (hasServer()) { result = result && getServer() .equals(other.getServer()); } result = result && getTransitionList() .equals(other.getTransitionList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasServer()) { hash = (37 * hash) + SERVER_FIELD_NUMBER; hash = (53 * hash) + getServer().hashCode(); } if (getTransitionCount() > 0) { hash = (37 * hash) + TRANSITION_FIELD_NUMBER; hash = (53 * hash) + getTransitionList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code ReportRegionStateTransitionRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder<Builder> implements org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRegionStateTransitionRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRegionStateTransitionRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest.class, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest.Builder.class); } // Construct using org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getServerFieldBuilder(); getTransitionFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (serverBuilder_ == null) { server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); } else { serverBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); if (transitionBuilder_ == null) { transition_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000002); } else { transitionBuilder_.clear(); } return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRegionStateTransitionRequest_descriptor; } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest getDefaultInstanceForType() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest.getDefaultInstance(); } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest build() { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest buildPartial() { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest result = new org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (serverBuilder_ == null) { result.server_ = server_; } else { result.server_ = serverBuilder_.build(); } if (transitionBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002)) { transition_ = java.util.Collections.unmodifiableList(transition_); bitField0_ = (bitField0_ & ~0x00000002); } result.transition_ = transition_; } else { result.transition_ = transitionBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest) { return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest other) { if (other == org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest.getDefaultInstance()) return this; if (other.hasServer()) { mergeServer(other.getServer()); } if (transitionBuilder_ == null) { if (!other.transition_.isEmpty()) { if (transition_.isEmpty()) { transition_ = other.transition_; bitField0_ = (bitField0_ & ~0x00000002); } else { ensureTransitionIsMutable(); transition_.addAll(other.transition_); } onChanged(); } } else { if (!other.transition_.isEmpty()) { if (transitionBuilder_.isEmpty()) { transitionBuilder_.dispose(); transitionBuilder_ = null; transition_ = other.transition_; bitField0_ = (bitField0_ & ~0x00000002); transitionBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getTransitionFieldBuilder() : null; } else { transitionBuilder_.addAllMessages(other.transition_); } } } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { if (!hasServer()) { return false; } if (!getServer().isInitialized()) { return false; } for (int i = 0; i < getTransitionCount(); i++) { if (!getTransition(i).isInitialized()) { return false; } } return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // required .ServerName server = 1; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverBuilder_; /** * <code>required .ServerName server = 1;</code> * * <pre> ** This region server's server name * </pre> */ public boolean hasServer() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <code>required .ServerName server = 1;</code> * * <pre> ** This region server's server name * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() { if (serverBuilder_ == null) { return server_; } else { return serverBuilder_.getMessage(); } } /** * <code>required .ServerName server = 1;</code> * * <pre> ** This region server's server name * </pre> */ public Builder setServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { if (serverBuilder_ == null) { if (value == null) { throw new NullPointerException(); } server_ = value; onChanged(); } else { serverBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** * <code>required .ServerName server = 1;</code> * * <pre> ** This region server's server name * </pre> */ public Builder setServer( org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { if (serverBuilder_ == null) { server_ = builderForValue.build(); onChanged(); } else { serverBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** * <code>required .ServerName server = 1;</code> * * <pre> ** This region server's server name * </pre> */ public Builder mergeServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { if (serverBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && server_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(server_).mergeFrom(value).buildPartial(); } else { server_ = value; } onChanged(); } else { serverBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** * <code>required .ServerName server = 1;</code> * * <pre> ** This region server's server name * </pre> */ public Builder clearServer() { if (serverBuilder_ == null) { server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); onChanged(); } else { serverBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** * <code>required .ServerName server = 1;</code> * * <pre> ** This region server's server name * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerBuilder() { bitField0_ |= 0x00000001; onChanged(); return getServerFieldBuilder().getBuilder(); } /** * <code>required .ServerName server = 1;</code> * * <pre> ** This region server's server name * </pre> */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() { if (serverBuilder_ != null) { return serverBuilder_.getMessageOrBuilder(); } else { return server_; } } /** * <code>required .ServerName server = 1;</code> * * <pre> ** This region server's server name * </pre> */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> getServerFieldBuilder() { if (serverBuilder_ == null) { serverBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( server_, getParentForChildren(), isClean()); server_ = null; } return serverBuilder_; } // repeated .RegionStateTransition transition = 2; private java.util.List<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition> transition_ = java.util.Collections.emptyList(); private void ensureTransitionIsMutable() { if (!((bitField0_ & 0x00000002) == 0x00000002)) { transition_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition>(transition_); bitField0_ |= 0x00000002; } } private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.Builder, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransitionOrBuilder> transitionBuilder_; /** * <code>repeated .RegionStateTransition transition = 2;</code> */ public java.util.List<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition> getTransitionList() { if (transitionBuilder_ == null) { return java.util.Collections.unmodifiableList(transition_); } else { return transitionBuilder_.getMessageList(); } } /** * <code>repeated .RegionStateTransition transition = 2;</code> */ public int getTransitionCount() { if (transitionBuilder_ == null) { return transition_.size(); } else { return transitionBuilder_.getCount(); } } /** * <code>repeated .RegionStateTransition transition = 2;</code> */ public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition getTransition(int index) { if (transitionBuilder_ == null) { return transition_.get(index); } else { return transitionBuilder_.getMessage(index); } } /** * <code>repeated .RegionStateTransition transition = 2;</code> */ public Builder setTransition( int index, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition value) { if (transitionBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureTransitionIsMutable(); transition_.set(index, value); onChanged(); } else { transitionBuilder_.setMessage(index, value); } return this; } /** * <code>repeated .RegionStateTransition transition = 2;</code> */ public Builder setTransition( int index, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.Builder builderForValue) { if (transitionBuilder_ == null) { ensureTransitionIsMutable(); transition_.set(index, builderForValue.build()); onChanged(); } else { transitionBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * <code>repeated .RegionStateTransition transition = 2;</code> */ public Builder addTransition(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition value) { if (transitionBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureTransitionIsMutable(); transition_.add(value); onChanged(); } else { transitionBuilder_.addMessage(value); } return this; } /** * <code>repeated .RegionStateTransition transition = 2;</code> */ public Builder addTransition( int index, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition value) { if (transitionBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureTransitionIsMutable(); transition_.add(index, value); onChanged(); } else { transitionBuilder_.addMessage(index, value); } return this; } /** * <code>repeated .RegionStateTransition transition = 2;</code> */ public Builder addTransition( org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.Builder builderForValue) { if (transitionBuilder_ == null) { ensureTransitionIsMutable(); transition_.add(builderForValue.build()); onChanged(); } else { transitionBuilder_.addMessage(builderForValue.build()); } return this; } /** * <code>repeated .RegionStateTransition transition = 2;</code> */ public Builder addTransition( int index, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.Builder builderForValue) { if (transitionBuilder_ == null) { ensureTransitionIsMutable(); transition_.add(index, builderForValue.build()); onChanged(); } else { transitionBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * <code>repeated .RegionStateTransition transition = 2;</code> */ public Builder addAllTransition( java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition> values) { if (transitionBuilder_ == null) { ensureTransitionIsMutable(); super.addAll(values, transition_); onChanged(); } else { transitionBuilder_.addAllMessages(values); } return this; } /** * <code>repeated .RegionStateTransition transition = 2;</code> */ public Builder clearTransition() { if (transitionBuilder_ == null) { transition_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000002); onChanged(); } else { transitionBuilder_.clear(); } return this; } /** * <code>repeated .RegionStateTransition transition = 2;</code> */ public Builder removeTransition(int index) { if (transitionBuilder_ == null) { ensureTransitionIsMutable(); transition_.remove(index); onChanged(); } else { transitionBuilder_.remove(index); } return this; } /** * <code>repeated .RegionStateTransition transition = 2;</code> */ public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.Builder getTransitionBuilder( int index) { return getTransitionFieldBuilder().getBuilder(index); } /** * <code>repeated .RegionStateTransition transition = 2;</code> */ public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransitionOrBuilder getTransitionOrBuilder( int index) { if (transitionBuilder_ == null) { return transition_.get(index); } else { return transitionBuilder_.getMessageOrBuilder(index); } } /** * <code>repeated .RegionStateTransition transition = 2;</code> */ public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransitionOrBuilder> getTransitionOrBuilderList() { if (transitionBuilder_ != null) { return transitionBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(transition_); } } /** * <code>repeated .RegionStateTransition transition = 2;</code> */ public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.Builder addTransitionBuilder() { return getTransitionFieldBuilder().addBuilder( org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.getDefaultInstance()); } /** * <code>repeated .RegionStateTransition transition = 2;</code> */ public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.Builder addTransitionBuilder( int index) { return getTransitionFieldBuilder().addBuilder( index, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.getDefaultInstance()); } /** * <code>repeated .RegionStateTransition transition = 2;</code> */ public java.util.List<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.Builder> getTransitionBuilderList() { return getTransitionFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.Builder, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransitionOrBuilder> getTransitionFieldBuilder() { if (transitionBuilder_ == null) { transitionBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.Builder, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransitionOrBuilder>( transition_, ((bitField0_ & 0x00000002) == 0x00000002), getParentForChildren(), isClean()); transition_ = null; } return transitionBuilder_; } // @@protoc_insertion_point(builder_scope:ReportRegionStateTransitionRequest) } static { defaultInstance = new ReportRegionStateTransitionRequest(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:ReportRegionStateTransitionRequest) } public interface ReportRegionStateTransitionResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { // optional string error_message = 1; /** * <code>optional string error_message = 1;</code> * * <pre> ** Error message if failed to update the region state * </pre> */ boolean hasErrorMessage(); /** * <code>optional string error_message = 1;</code> * * <pre> ** Error message if failed to update the region state * </pre> */ java.lang.String getErrorMessage(); /** * <code>optional string error_message = 1;</code> * * <pre> ** Error message if failed to update the region state * </pre> */ com.google.protobuf.ByteString getErrorMessageBytes(); } /** * Protobuf type {@code ReportRegionStateTransitionResponse} */ public static final class ReportRegionStateTransitionResponse extends com.google.protobuf.GeneratedMessage implements ReportRegionStateTransitionResponseOrBuilder { // Use ReportRegionStateTransitionResponse.newBuilder() to construct. private ReportRegionStateTransitionResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private ReportRegionStateTransitionResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final ReportRegionStateTransitionResponse defaultInstance; public static ReportRegionStateTransitionResponse getDefaultInstance() { return defaultInstance; } public ReportRegionStateTransitionResponse getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ReportRegionStateTransitionResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { bitField0_ |= 0x00000001; errorMessage_ = input.readBytes(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRegionStateTransitionResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRegionStateTransitionResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse.class, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse.Builder.class); } public static com.google.protobuf.Parser<ReportRegionStateTransitionResponse> PARSER = new com.google.protobuf.AbstractParser<ReportRegionStateTransitionResponse>() { public ReportRegionStateTransitionResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new ReportRegionStateTransitionResponse(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser<ReportRegionStateTransitionResponse> getParserForType() { return PARSER; } private int bitField0_; // optional string error_message = 1; public static final int ERROR_MESSAGE_FIELD_NUMBER = 1; private java.lang.Object errorMessage_; /** * <code>optional string error_message = 1;</code> * * <pre> ** Error message if failed to update the region state * </pre> */ public boolean hasErrorMessage() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <code>optional string error_message = 1;</code> * * <pre> ** Error message if failed to update the region state * </pre> */ public java.lang.String getErrorMessage() { java.lang.Object ref = errorMessage_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { errorMessage_ = s; } return s; } } /** * <code>optional string error_message = 1;</code> * * <pre> ** Error message if failed to update the region state * </pre> */ public com.google.protobuf.ByteString getErrorMessageBytes() { java.lang.Object ref = errorMessage_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); errorMessage_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } private void initFields() { errorMessage_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getErrorMessageBytes()); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(1, getErrorMessageBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse)) { return super.equals(obj); } org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse other = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse) obj; boolean result = true; result = result && (hasErrorMessage() == other.hasErrorMessage()); if (hasErrorMessage()) { result = result && getErrorMessage() .equals(other.getErrorMessage()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasErrorMessage()) { hash = (37 * hash) + ERROR_MESSAGE_FIELD_NUMBER; hash = (53 * hash) + getErrorMessage().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code ReportRegionStateTransitionResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder<Builder> implements org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRegionStateTransitionResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRegionStateTransitionResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse.class, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse.Builder.class); } // Construct using org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); errorMessage_ = ""; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRegionStateTransitionResponse_descriptor; } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse getDefaultInstanceForType() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse.getDefaultInstance(); } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse build() { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse buildPartial() { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse result = new org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.errorMessage_ = errorMessage_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse) { return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse other) { if (other == org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse.getDefaultInstance()) return this; if (other.hasErrorMessage()) { bitField0_ |= 0x00000001; errorMessage_ = other.errorMessage_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; // optional string error_message = 1; private java.lang.Object errorMessage_ = ""; /** * <code>optional string error_message = 1;</code> * * <pre> ** Error message if failed to update the region state * </pre> */ public boolean hasErrorMessage() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <code>optional string error_message = 1;</code> * * <pre> ** Error message if failed to update the region state * </pre> */ public java.lang.String getErrorMessage() { java.lang.Object ref = errorMessage_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); errorMessage_ = s; return s; } else { return (java.lang.String) ref; } } /** * <code>optional string error_message = 1;</code> * * <pre> ** Error message if failed to update the region state * </pre> */ public com.google.protobuf.ByteString getErrorMessageBytes() { java.lang.Object ref = errorMessage_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); errorMessage_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * <code>optional string error_message = 1;</code> * * <pre> ** Error message if failed to update the region state * </pre> */ public Builder setErrorMessage( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; errorMessage_ = value; onChanged(); return this; } /** * <code>optional string error_message = 1;</code> * * <pre> ** Error message if failed to update the region state * </pre> */ public Builder clearErrorMessage() { bitField0_ = (bitField0_ & ~0x00000001); errorMessage_ = getDefaultInstance().getErrorMessage(); onChanged(); return this; } /** * <code>optional string error_message = 1;</code> * * <pre> ** Error message if failed to update the region state * </pre> */ public Builder setErrorMessageBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; errorMessage_ = value; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:ReportRegionStateTransitionResponse) } static { defaultInstance = new ReportRegionStateTransitionResponse(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:ReportRegionStateTransitionResponse) } /** * Protobuf service {@code RegionServerStatusService} */ public static abstract class RegionServerStatusService implements com.google.protobuf.Service { protected RegionServerStatusService() {} public interface Interface { /** * <code>rpc RegionServerStartup(.RegionServerStartupRequest) returns (.RegionServerStartupResponse);</code> * * <pre> ** Called when a region server first starts. * </pre> */ public abstract void regionServerStartup( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest request, com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse> done); /** * <code>rpc RegionServerReport(.RegionServerReportRequest) returns (.RegionServerReportResponse);</code> * * <pre> ** Called to report the load the RegionServer is under. * </pre> */ public abstract void regionServerReport( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest request, com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse> done); /** * <code>rpc ReportRSFatalError(.ReportRSFatalErrorRequest) returns (.ReportRSFatalErrorResponse);</code> * * <pre> ** * Called by a region server to report a fatal error that is causing it to * abort. * </pre> */ public abstract void reportRSFatalError( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest request, com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse> done); /** * <code>rpc GetLastFlushedSequenceId(.GetLastFlushedSequenceIdRequest) returns (.GetLastFlushedSequenceIdResponse);</code> * * <pre> ** Called to get the sequence id of the last MemStore entry flushed to an * HFile for a specified region. Used by the region server to speed up * log splitting. * </pre> */ public abstract void getLastFlushedSequenceId( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest request, com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse> done); /** * <code>rpc ReportRegionStateTransition(.ReportRegionStateTransitionRequest) returns (.ReportRegionStateTransitionResponse);</code> * * <pre> ** * Called by a region server to report the progress of a region * transition. If the request fails, the transition should * be aborted. * </pre> */ public abstract void reportRegionStateTransition( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest request, com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse> done); } public static com.google.protobuf.Service newReflectiveService( final Interface impl) { return new RegionServerStatusService() { @java.lang.Override public void regionServerStartup( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest request, com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse> done) { impl.regionServerStartup(controller, request, done); } @java.lang.Override public void regionServerReport( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest request, com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse> done) { impl.regionServerReport(controller, request, done); } @java.lang.Override public void reportRSFatalError( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest request, com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse> done) { impl.reportRSFatalError(controller, request, done); } @java.lang.Override public void getLastFlushedSequenceId( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest request, com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse> done) { impl.getLastFlushedSequenceId(controller, request, done); } @java.lang.Override public void reportRegionStateTransition( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest request, com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse> done) { impl.reportRegionStateTransition(controller, request, done); } }; } public static com.google.protobuf.BlockingService newReflectiveBlockingService(final BlockingInterface impl) { return new com.google.protobuf.BlockingService() { public final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptorForType() { return getDescriptor(); } public final com.google.protobuf.Message callBlockingMethod( com.google.protobuf.Descriptors.MethodDescriptor method, com.google.protobuf.RpcController controller, com.google.protobuf.Message request) throws com.google.protobuf.ServiceException { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.callBlockingMethod() given method descriptor for " + "wrong service type."); } switch(method.getIndex()) { case 0: return impl.regionServerStartup(controller, (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest)request); case 1: return impl.regionServerReport(controller, (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest)request); case 2: return impl.reportRSFatalError(controller, (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest)request); case 3: return impl.getLastFlushedSequenceId(controller, (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest)request); case 4: return impl.reportRegionStateTransition(controller, (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } } public final com.google.protobuf.Message getRequestPrototype( com.google.protobuf.Descriptors.MethodDescriptor method) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.getRequestPrototype() given method " + "descriptor for wrong service type."); } switch(method.getIndex()) { case 0: return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest.getDefaultInstance(); case 1: return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest.getDefaultInstance(); case 2: return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest.getDefaultInstance(); case 3: return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.getDefaultInstance(); case 4: return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } } public final com.google.protobuf.Message getResponsePrototype( com.google.protobuf.Descriptors.MethodDescriptor method) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.getResponsePrototype() given method " + "descriptor for wrong service type."); } switch(method.getIndex()) { case 0: return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.getDefaultInstance(); case 1: return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.getDefaultInstance(); case 2: return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.getDefaultInstance(); case 3: return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.getDefaultInstance(); case 4: return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } } }; } /** * <code>rpc RegionServerStartup(.RegionServerStartupRequest) returns (.RegionServerStartupResponse);</code> * * <pre> ** Called when a region server first starts. * </pre> */ public abstract void regionServerStartup( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest request, com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse> done); /** * <code>rpc RegionServerReport(.RegionServerReportRequest) returns (.RegionServerReportResponse);</code> * * <pre> ** Called to report the load the RegionServer is under. * </pre> */ public abstract void regionServerReport( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest request, com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse> done); /** * <code>rpc ReportRSFatalError(.ReportRSFatalErrorRequest) returns (.ReportRSFatalErrorResponse);</code> * * <pre> ** * Called by a region server to report a fatal error that is causing it to * abort. * </pre> */ public abstract void reportRSFatalError( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest request, com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse> done); /** * <code>rpc GetLastFlushedSequenceId(.GetLastFlushedSequenceIdRequest) returns (.GetLastFlushedSequenceIdResponse);</code> * * <pre> ** Called to get the sequence id of the last MemStore entry flushed to an * HFile for a specified region. Used by the region server to speed up * log splitting. * </pre> */ public abstract void getLastFlushedSequenceId( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest request, com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse> done); /** * <code>rpc ReportRegionStateTransition(.ReportRegionStateTransitionRequest) returns (.ReportRegionStateTransitionResponse);</code> * * <pre> ** * Called by a region server to report the progress of a region * transition. If the request fails, the transition should * be aborted. * </pre> */ public abstract void reportRegionStateTransition( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest request, com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse> done); public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.getDescriptor().getServices().get(0); } public final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptorForType() { return getDescriptor(); } public final void callMethod( com.google.protobuf.Descriptors.MethodDescriptor method, com.google.protobuf.RpcController controller, com.google.protobuf.Message request, com.google.protobuf.RpcCallback< com.google.protobuf.Message> done) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.callMethod() given method descriptor for wrong " + "service type."); } switch(method.getIndex()) { case 0: this.regionServerStartup(controller, (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest)request, com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse>specializeCallback( done)); return; case 1: this.regionServerReport(controller, (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest)request, com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse>specializeCallback( done)); return; case 2: this.reportRSFatalError(controller, (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest)request, com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse>specializeCallback( done)); return; case 3: this.getLastFlushedSequenceId(controller, (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest)request, com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse>specializeCallback( done)); return; case 4: this.reportRegionStateTransition(controller, (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest)request, com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse>specializeCallback( done)); return; default: throw new java.lang.AssertionError("Can't get here."); } } public final com.google.protobuf.Message getRequestPrototype( com.google.protobuf.Descriptors.MethodDescriptor method) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.getRequestPrototype() given method " + "descriptor for wrong service type."); } switch(method.getIndex()) { case 0: return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest.getDefaultInstance(); case 1: return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest.getDefaultInstance(); case 2: return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest.getDefaultInstance(); case 3: return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.getDefaultInstance(); case 4: return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } } public final com.google.protobuf.Message getResponsePrototype( com.google.protobuf.Descriptors.MethodDescriptor method) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.getResponsePrototype() given method " + "descriptor for wrong service type."); } switch(method.getIndex()) { case 0: return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.getDefaultInstance(); case 1: return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.getDefaultInstance(); case 2: return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.getDefaultInstance(); case 3: return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.getDefaultInstance(); case 4: return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } } public static Stub newStub( com.google.protobuf.RpcChannel channel) { return new Stub(channel); } public static final class Stub extends org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService implements Interface { private Stub(com.google.protobuf.RpcChannel channel) { this.channel = channel; } private final com.google.protobuf.RpcChannel channel; public com.google.protobuf.RpcChannel getChannel() { return channel; } public void regionServerStartup( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest request, com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse> done) { channel.callMethod( getDescriptor().getMethods().get(0), controller, request, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.getDefaultInstance(), com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.class, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.getDefaultInstance())); } public void regionServerReport( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest request, com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse> done) { channel.callMethod( getDescriptor().getMethods().get(1), controller, request, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.getDefaultInstance(), com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.class, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.getDefaultInstance())); } public void reportRSFatalError( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest request, com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse> done) { channel.callMethod( getDescriptor().getMethods().get(2), controller, request, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.getDefaultInstance(), com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.class, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.getDefaultInstance())); } public void getLastFlushedSequenceId( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest request, com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse> done) { channel.callMethod( getDescriptor().getMethods().get(3), controller, request, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.getDefaultInstance(), com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.class, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.getDefaultInstance())); } public void reportRegionStateTransition( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest request, com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse> done) { channel.callMethod( getDescriptor().getMethods().get(4), controller, request, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse.getDefaultInstance(), com.google.protobuf.RpcUtil.generalizeCallback( done, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse.class, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse.getDefaultInstance())); } } public static BlockingInterface newBlockingStub( com.google.protobuf.BlockingRpcChannel channel) { return new BlockingStub(channel); } public interface BlockingInterface { public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse regionServerStartup( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest request) throws com.google.protobuf.ServiceException; public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse regionServerReport( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest request) throws com.google.protobuf.ServiceException; public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse reportRSFatalError( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest request) throws com.google.protobuf.ServiceException; public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse getLastFlushedSequenceId( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest request) throws com.google.protobuf.ServiceException; public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse reportRegionStateTransition( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest request) throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { this.channel = channel; } private final com.google.protobuf.BlockingRpcChannel channel; public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse regionServerStartup( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse) channel.callBlockingMethod( getDescriptor().getMethods().get(0), controller, request, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.getDefaultInstance()); } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse regionServerReport( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse) channel.callBlockingMethod( getDescriptor().getMethods().get(1), controller, request, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.getDefaultInstance()); } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse reportRSFatalError( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse) channel.callBlockingMethod( getDescriptor().getMethods().get(2), controller, request, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.getDefaultInstance()); } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse getLastFlushedSequenceId( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse) channel.callBlockingMethod( getDescriptor().getMethods().get(3), controller, request, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.getDefaultInstance()); } public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse reportRegionStateTransition( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse) channel.callBlockingMethod( getDescriptor().getMethods().get(4), controller, request, org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse.getDefaultInstance()); } } // @@protoc_insertion_point(class_scope:RegionServerStatusService) } private static com.google.protobuf.Descriptors.Descriptor internal_static_RegionServerStartupRequest_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_RegionServerStartupRequest_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_RegionServerStartupResponse_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_RegionServerStartupResponse_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_RegionServerReportRequest_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_RegionServerReportRequest_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_RegionServerReportResponse_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_RegionServerReportResponse_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_ReportRSFatalErrorRequest_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_ReportRSFatalErrorRequest_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_ReportRSFatalErrorResponse_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_ReportRSFatalErrorResponse_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_GetLastFlushedSequenceIdRequest_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_GetLastFlushedSequenceIdRequest_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_GetLastFlushedSequenceIdResponse_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_GetLastFlushedSequenceIdResponse_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_RegionStateTransition_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_RegionStateTransition_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_ReportRegionStateTransitionRequest_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_ReportRegionStateTransitionRequest_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_ReportRegionStateTransitionResponse_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_ReportRegionStateTransitionResponse_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; } private static com.google.protobuf.Descriptors.FileDescriptor descriptor; static { java.lang.String[] descriptorData = { "\n\030RegionServerStatus.proto\032\013HBase.proto\032" + "\023ClusterStatus.proto\"b\n\032RegionServerStar" + "tupRequest\022\014\n\004port\030\001 \002(\r\022\031\n\021server_start" + "_code\030\002 \002(\004\022\033\n\023server_current_time\030\003 \002(\004" + "\"C\n\033RegionServerStartupResponse\022$\n\013map_e" + "ntries\030\001 \003(\0132\017.NameStringPair\"S\n\031RegionS" + "erverReportRequest\022\033\n\006server\030\001 \002(\0132\013.Ser" + "verName\022\031\n\004load\030\002 \001(\0132\013.ServerLoad\"\034\n\032Re" + "gionServerReportResponse\"O\n\031ReportRSFata" + "lErrorRequest\022\033\n\006server\030\001 \002(\0132\013.ServerNa", "me\022\025\n\rerror_message\030\002 \002(\t\"\034\n\032ReportRSFat" + "alErrorResponse\"6\n\037GetLastFlushedSequenc" + "eIdRequest\022\023\n\013region_name\030\001 \002(\014\"D\n GetLa" + "stFlushedSequenceIdResponse\022 \n\030last_flus" + "hed_sequence_id\030\001 \002(\004\"\322\002\n\025RegionStateTra" + "nsition\022>\n\017transition_code\030\001 \002(\0162%.Regio" + "nStateTransition.TransitionCode\022 \n\013regio" + "n_info\030\002 \003(\0132\013.RegionInfo\022\024\n\014open_seq_nu" + "m\030\003 \001(\004\"\300\001\n\016TransitionCode\022\n\n\006OPENED\020\000\022\017" + "\n\013FAILED_OPEN\020\001\022\n\n\006CLOSED\020\002\022\022\n\016READY_TO_", "SPLIT\020\003\022\022\n\016READY_TO_MERGE\020\004\022\016\n\nSPLIT_PON" + "R\020\005\022\016\n\nMERGE_PONR\020\006\022\t\n\005SPLIT\020\007\022\n\n\006MERGED" + "\020\010\022\022\n\016SPLIT_REVERTED\020\t\022\022\n\016MERGE_REVERTED" + "\020\n\"m\n\"ReportRegionStateTransitionRequest" + "\022\033\n\006server\030\001 \002(\0132\013.ServerName\022*\n\ntransit" + "ion\030\002 \003(\0132\026.RegionStateTransition\"<\n#Rep" + "ortRegionStateTransitionResponse\022\025\n\rerro" + "r_message\030\001 \001(\t2\326\003\n\031RegionServerStatusSe" + "rvice\022P\n\023RegionServerStartup\022\033.RegionSer" + "verStartupRequest\032\034.RegionServerStartupR", "esponse\022M\n\022RegionServerReport\022\032.RegionSe" + "rverReportRequest\032\033.RegionServerReportRe" + "sponse\022M\n\022ReportRSFatalError\022\032.ReportRSF" + "atalErrorRequest\032\033.ReportRSFatalErrorRes" + "ponse\022_\n\030GetLastFlushedSequenceId\022 .GetL" + "astFlushedSequenceIdRequest\032!.GetLastFlu" + "shedSequenceIdResponse\022h\n\033ReportRegionSt" + "ateTransition\022#.ReportRegionStateTransit" + "ionRequest\032$.ReportRegionStateTransition" + "ResponseBN\n*org.apache.hadoop.hbase.prot", "obuf.generatedB\030RegionServerStatusProtos" + "H\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { public com.google.protobuf.ExtensionRegistry assignDescriptors( com.google.protobuf.Descriptors.FileDescriptor root) { descriptor = root; internal_static_RegionServerStartupRequest_descriptor = getDescriptor().getMessageTypes().get(0); internal_static_RegionServerStartupRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionServerStartupRequest_descriptor, new java.lang.String[] { "Port", "ServerStartCode", "ServerCurrentTime", }); internal_static_RegionServerStartupResponse_descriptor = getDescriptor().getMessageTypes().get(1); internal_static_RegionServerStartupResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionServerStartupResponse_descriptor, new java.lang.String[] { "MapEntries", }); internal_static_RegionServerReportRequest_descriptor = getDescriptor().getMessageTypes().get(2); internal_static_RegionServerReportRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionServerReportRequest_descriptor, new java.lang.String[] { "Server", "Load", }); internal_static_RegionServerReportResponse_descriptor = getDescriptor().getMessageTypes().get(3); internal_static_RegionServerReportResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionServerReportResponse_descriptor, new java.lang.String[] { }); internal_static_ReportRSFatalErrorRequest_descriptor = getDescriptor().getMessageTypes().get(4); internal_static_ReportRSFatalErrorRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ReportRSFatalErrorRequest_descriptor, new java.lang.String[] { "Server", "ErrorMessage", }); internal_static_ReportRSFatalErrorResponse_descriptor = getDescriptor().getMessageTypes().get(5); internal_static_ReportRSFatalErrorResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ReportRSFatalErrorResponse_descriptor, new java.lang.String[] { }); internal_static_GetLastFlushedSequenceIdRequest_descriptor = getDescriptor().getMessageTypes().get(6); internal_static_GetLastFlushedSequenceIdRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetLastFlushedSequenceIdRequest_descriptor, new java.lang.String[] { "RegionName", }); internal_static_GetLastFlushedSequenceIdResponse_descriptor = getDescriptor().getMessageTypes().get(7); internal_static_GetLastFlushedSequenceIdResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetLastFlushedSequenceIdResponse_descriptor, new java.lang.String[] { "LastFlushedSequenceId", }); internal_static_RegionStateTransition_descriptor = getDescriptor().getMessageTypes().get(8); internal_static_RegionStateTransition_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionStateTransition_descriptor, new java.lang.String[] { "TransitionCode", "RegionInfo", "OpenSeqNum", }); internal_static_ReportRegionStateTransitionRequest_descriptor = getDescriptor().getMessageTypes().get(9); internal_static_ReportRegionStateTransitionRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ReportRegionStateTransitionRequest_descriptor, new java.lang.String[] { "Server", "Transition", }); internal_static_ReportRegionStateTransitionResponse_descriptor = getDescriptor().getMessageTypes().get(10); internal_static_ReportRegionStateTransitionResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ReportRegionStateTransitionResponse_descriptor, new java.lang.String[] { "ErrorMessage", }); return null; } }; com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(), org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.getDescriptor(), }, assigner); } // @@protoc_insertion_point(outer_class_scope) }