// Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/protobuf/compiler/plugin.proto package org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler; public final class PluginProtos { private PluginProtos() {} public static void registerAllExtensions( org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite registry) { } public static void registerAllExtensions( org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistry registry) { registerAllExtensions( (org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite) registry); } public interface VersionOrBuilder extends // @@protoc_insertion_point(interface_extends:google.protobuf.compiler.Version) org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { /** * <code>optional int32 major = 1;</code> */ boolean hasMajor(); /** * <code>optional int32 major = 1;</code> */ int getMajor(); /** * <code>optional int32 minor = 2;</code> */ boolean hasMinor(); /** * <code>optional int32 minor = 2;</code> */ int getMinor(); /** * <code>optional int32 patch = 3;</code> */ boolean hasPatch(); /** * <code>optional int32 patch = 3;</code> */ int getPatch(); /** * <pre> * A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should * be empty for mainline stable releases. * </pre> * * <code>optional string suffix = 4;</code> */ boolean hasSuffix(); /** * <pre> * A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should * be empty for mainline stable releases. * </pre> * * <code>optional string suffix = 4;</code> */ java.lang.String getSuffix(); /** * <pre> * A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should * be empty for mainline stable releases. * </pre> * * <code>optional string suffix = 4;</code> */ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSuffixBytes(); } /** * <pre> * The version number of protocol compiler. * </pre> * * Protobuf type {@code google.protobuf.compiler.Version} */ public static final class Version extends org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:google.protobuf.compiler.Version) VersionOrBuilder { // Use Version.newBuilder() to construct. private Version(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) { super(builder); } private Version() { major_ = 0; minor_ = 0; patch_ = 0; suffix_ = ""; } @java.lang.Override public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private Version( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { this(); int mutable_bitField0_ = 0; org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; major_ = input.readInt32(); break; } case 16: { bitField0_ |= 0x00000002; minor_ = input.readInt32(); break; } case 24: { bitField0_ |= 0x00000004; patch_ = input.readInt32(); break; } case 34: { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000008; suffix_ = bs; break; } } } } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.internal_static_google_protobuf_compiler_Version_descriptor; } protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.internal_static_google_protobuf_compiler_Version_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version.class, org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version.Builder.class); } private int bitField0_; public static final int MAJOR_FIELD_NUMBER = 1; private int major_; /** * <code>optional int32 major = 1;</code> */ public boolean hasMajor() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <code>optional int32 major = 1;</code> */ public int getMajor() { return major_; } public static final int MINOR_FIELD_NUMBER = 2; private int minor_; /** * <code>optional int32 minor = 2;</code> */ public boolean hasMinor() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * <code>optional int32 minor = 2;</code> */ public int getMinor() { return minor_; } public static final int PATCH_FIELD_NUMBER = 3; private int patch_; /** * <code>optional int32 patch = 3;</code> */ public boolean hasPatch() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * <code>optional int32 patch = 3;</code> */ public int getPatch() { return patch_; } public static final int SUFFIX_FIELD_NUMBER = 4; private volatile java.lang.Object suffix_; /** * <pre> * A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should * be empty for mainline stable releases. * </pre> * * <code>optional string suffix = 4;</code> */ public boolean hasSuffix() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * <pre> * A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should * be empty for mainline stable releases. * </pre> * * <code>optional string suffix = 4;</code> */ public java.lang.String getSuffix() { java.lang.Object ref = suffix_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { suffix_ = s; } return s; } } /** * <pre> * A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should * be empty for mainline stable releases. * </pre> * * <code>optional string suffix = 4;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSuffixBytes() { java.lang.Object ref = suffix_; if (ref instanceof java.lang.String) { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); suffix_ = b; return b; } else { return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeInt32(1, major_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeInt32(2, minor_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeInt32(3, patch_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.writeString(output, 4, suffix_); } unknownFields.writeTo(output); } public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream .computeInt32Size(1, major_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream .computeInt32Size(2, minor_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream .computeInt32Size(3, patch_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.computeStringSize(4, suffix_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version)) { return super.equals(obj); } org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version other = (org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version) obj; boolean result = true; result = result && (hasMajor() == other.hasMajor()); if (hasMajor()) { result = result && (getMajor() == other.getMajor()); } result = result && (hasMinor() == other.hasMinor()); if (hasMinor()) { result = result && (getMinor() == other.getMinor()); } result = result && (hasPatch() == other.hasPatch()); if (hasPatch()) { result = result && (getPatch() == other.getPatch()); } result = result && (hasSuffix() == other.hasSuffix()); if (hasSuffix()) { result = result && getSuffix() .equals(other.getSuffix()); } result = result && unknownFields.equals(other.unknownFields); return result; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasMajor()) { hash = (37 * hash) + MAJOR_FIELD_NUMBER; hash = (53 * hash) + getMajor(); } if (hasMinor()) { hash = (37 * hash) + MINOR_FIELD_NUMBER; hash = (53 * hash) + getMinor(); } if (hasPatch()) { hash = (37 * hash) + PATCH_FIELD_NUMBER; hash = (53 * hash) + getPatch(); } if (hasSuffix()) { hash = (37 * hash) + SUFFIX_FIELD_NUMBER; hash = (53 * hash) + getSuffix().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version parseFrom(byte[] data) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version parseFrom( byte[] data, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version parseFrom( java.io.InputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * <pre> * The version number of protocol compiler. * </pre> * * Protobuf type {@code google.protobuf.compiler.Version} */ public static final class Builder extends org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements // @@protoc_insertion_point(builder_implements:google.protobuf.compiler.Version) org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.VersionOrBuilder { public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.internal_static_google_protobuf_compiler_Version_descriptor; } protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.internal_static_google_protobuf_compiler_Version_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version.class, org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version.Builder.class); } // Construct using org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } public Builder clear() { super.clear(); major_ = 0; bitField0_ = (bitField0_ & ~0x00000001); minor_ = 0; bitField0_ = (bitField0_ & ~0x00000002); patch_ = 0; bitField0_ = (bitField0_ & ~0x00000004); suffix_ = ""; bitField0_ = (bitField0_ & ~0x00000008); return this; } public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.internal_static_google_protobuf_compiler_Version_descriptor; } public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version getDefaultInstanceForType() { return org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version.getDefaultInstance(); } public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version build() { org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version buildPartial() { org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version result = new org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.major_ = major_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.minor_ = minor_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.patch_ = patch_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.suffix_ = suffix_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder clone() { return (Builder) super.clone(); } public Builder setField( org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { return (Builder) super.setField(field, value); } public Builder clearField( org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { return (Builder) super.clearField(field); } public Builder clearOneof( org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { return (Builder) super.clearOneof(oneof); } public Builder setRepeatedField( org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { return (Builder) super.setRepeatedField(field, index, value); } public Builder addRepeatedField( org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { return (Builder) super.addRepeatedField(field, value); } public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version) { return mergeFrom((org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version other) { if (other == org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version.getDefaultInstance()) return this; if (other.hasMajor()) { setMajor(other.getMajor()); } if (other.hasMinor()) { setMinor(other.getMinor()); } if (other.hasPatch()) { setPatch(other.getPatch()); } if (other.hasSuffix()) { bitField0_ |= 0x00000008; suffix_ = other.suffix_; onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private int major_ ; /** * <code>optional int32 major = 1;</code> */ public boolean hasMajor() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <code>optional int32 major = 1;</code> */ public int getMajor() { return major_; } /** * <code>optional int32 major = 1;</code> */ public Builder setMajor(int value) { bitField0_ |= 0x00000001; major_ = value; onChanged(); return this; } /** * <code>optional int32 major = 1;</code> */ public Builder clearMajor() { bitField0_ = (bitField0_ & ~0x00000001); major_ = 0; onChanged(); return this; } private int minor_ ; /** * <code>optional int32 minor = 2;</code> */ public boolean hasMinor() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * <code>optional int32 minor = 2;</code> */ public int getMinor() { return minor_; } /** * <code>optional int32 minor = 2;</code> */ public Builder setMinor(int value) { bitField0_ |= 0x00000002; minor_ = value; onChanged(); return this; } /** * <code>optional int32 minor = 2;</code> */ public Builder clearMinor() { bitField0_ = (bitField0_ & ~0x00000002); minor_ = 0; onChanged(); return this; } private int patch_ ; /** * <code>optional int32 patch = 3;</code> */ public boolean hasPatch() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * <code>optional int32 patch = 3;</code> */ public int getPatch() { return patch_; } /** * <code>optional int32 patch = 3;</code> */ public Builder setPatch(int value) { bitField0_ |= 0x00000004; patch_ = value; onChanged(); return this; } /** * <code>optional int32 patch = 3;</code> */ public Builder clearPatch() { bitField0_ = (bitField0_ & ~0x00000004); patch_ = 0; onChanged(); return this; } private java.lang.Object suffix_ = ""; /** * <pre> * A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should * be empty for mainline stable releases. * </pre> * * <code>optional string suffix = 4;</code> */ public boolean hasSuffix() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * <pre> * A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should * be empty for mainline stable releases. * </pre> * * <code>optional string suffix = 4;</code> */ public java.lang.String getSuffix() { java.lang.Object ref = suffix_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { suffix_ = s; } return s; } else { return (java.lang.String) ref; } } /** * <pre> * A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should * be empty for mainline stable releases. * </pre> * * <code>optional string suffix = 4;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSuffixBytes() { java.lang.Object ref = suffix_; if (ref instanceof String) { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); suffix_ = b; return b; } else { return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; } } /** * <pre> * A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should * be empty for mainline stable releases. * </pre> * * <code>optional string suffix = 4;</code> */ public Builder setSuffix( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; suffix_ = value; onChanged(); return this; } /** * <pre> * A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should * be empty for mainline stable releases. * </pre> * * <code>optional string suffix = 4;</code> */ public Builder clearSuffix() { bitField0_ = (bitField0_ & ~0x00000008); suffix_ = getDefaultInstance().getSuffix(); onChanged(); return this; } /** * <pre> * A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should * be empty for mainline stable releases. * </pre> * * <code>optional string suffix = 4;</code> */ public Builder setSuffixBytes( org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000008; suffix_ = value; onChanged(); return this; } public final Builder setUnknownFields( final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } public final Builder mergeUnknownFields( final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:google.protobuf.compiler.Version) } // @@protoc_insertion_point(class_scope:google.protobuf.compiler.Version) private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version(); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<Version> PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<Version>() { public Version parsePartialFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return new Version(input, extensionRegistry); } }; public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<Version> parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<Version> getParserForType() { return PARSER; } public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface CodeGeneratorRequestOrBuilder extends // @@protoc_insertion_point(interface_extends:google.protobuf.compiler.CodeGeneratorRequest) org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { /** * <pre> * The .proto files that were explicitly listed on the command-line. The * code generator should generate code only for these files. Each file's * descriptor will be included in proto_file, below. * </pre> * * <code>repeated string file_to_generate = 1;</code> */ java.util.List<java.lang.String> getFileToGenerateList(); /** * <pre> * The .proto files that were explicitly listed on the command-line. The * code generator should generate code only for these files. Each file's * descriptor will be included in proto_file, below. * </pre> * * <code>repeated string file_to_generate = 1;</code> */ int getFileToGenerateCount(); /** * <pre> * The .proto files that were explicitly listed on the command-line. The * code generator should generate code only for these files. Each file's * descriptor will be included in proto_file, below. * </pre> * * <code>repeated string file_to_generate = 1;</code> */ java.lang.String getFileToGenerate(int index); /** * <pre> * The .proto files that were explicitly listed on the command-line. The * code generator should generate code only for these files. Each file's * descriptor will be included in proto_file, below. * </pre> * * <code>repeated string file_to_generate = 1;</code> */ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getFileToGenerateBytes(int index); /** * <pre> * The generator parameter passed on the command-line. * </pre> * * <code>optional string parameter = 2;</code> */ boolean hasParameter(); /** * <pre> * The generator parameter passed on the command-line. * </pre> * * <code>optional string parameter = 2;</code> */ java.lang.String getParameter(); /** * <pre> * The generator parameter passed on the command-line. * </pre> * * <code>optional string parameter = 2;</code> */ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getParameterBytes(); /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ java.util.List<org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto> getProtoFileList(); /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto getProtoFile(int index); /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ int getProtoFileCount(); /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ java.util.List<? extends org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProtoOrBuilder> getProtoFileOrBuilderList(); /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProtoOrBuilder getProtoFileOrBuilder( int index); /** * <pre> * The version number of protocol compiler. * </pre> * * <code>optional .google.protobuf.compiler.Version compiler_version = 3;</code> */ boolean hasCompilerVersion(); /** * <pre> * The version number of protocol compiler. * </pre> * * <code>optional .google.protobuf.compiler.Version compiler_version = 3;</code> */ org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version getCompilerVersion(); /** * <pre> * The version number of protocol compiler. * </pre> * * <code>optional .google.protobuf.compiler.Version compiler_version = 3;</code> */ org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.VersionOrBuilder getCompilerVersionOrBuilder(); } /** * <pre> * An encoded CodeGeneratorRequest is written to the plugin's stdin. * </pre> * * Protobuf type {@code google.protobuf.compiler.CodeGeneratorRequest} */ public static final class CodeGeneratorRequest extends org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:google.protobuf.compiler.CodeGeneratorRequest) CodeGeneratorRequestOrBuilder { // Use CodeGeneratorRequest.newBuilder() to construct. private CodeGeneratorRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) { super(builder); } private CodeGeneratorRequest() { fileToGenerate_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.LazyStringArrayList.EMPTY; parameter_ = ""; protoFile_ = java.util.Collections.emptyList(); } @java.lang.Override public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CodeGeneratorRequest( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { this(); int mutable_bitField0_ = 0; org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = input.readBytes(); if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { fileToGenerate_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.LazyStringArrayList(); mutable_bitField0_ |= 0x00000001; } fileToGenerate_.add(bs); break; } case 18: { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; parameter_ = bs; break; } case 26: { org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version.Builder subBuilder = null; if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = compilerVersion_.toBuilder(); } compilerVersion_ = input.readMessage(org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(compilerVersion_); compilerVersion_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000002; break; } case 122: { if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { protoFile_ = new java.util.ArrayList<org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto>(); mutable_bitField0_ |= 0x00000004; } protoFile_.add( input.readMessage(org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto.PARSER, extensionRegistry)); break; } } } } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { fileToGenerate_ = fileToGenerate_.getUnmodifiableView(); } if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { protoFile_ = java.util.Collections.unmodifiableList(protoFile_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.internal_static_google_protobuf_compiler_CodeGeneratorRequest_descriptor; } protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.internal_static_google_protobuf_compiler_CodeGeneratorRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest.class, org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest.Builder.class); } private int bitField0_; public static final int FILE_TO_GENERATE_FIELD_NUMBER = 1; private org.apache.hadoop.hbase.shaded.com.google.protobuf.LazyStringList fileToGenerate_; /** * <pre> * The .proto files that were explicitly listed on the command-line. The * code generator should generate code only for these files. Each file's * descriptor will be included in proto_file, below. * </pre> * * <code>repeated string file_to_generate = 1;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ProtocolStringList getFileToGenerateList() { return fileToGenerate_; } /** * <pre> * The .proto files that were explicitly listed on the command-line. The * code generator should generate code only for these files. Each file's * descriptor will be included in proto_file, below. * </pre> * * <code>repeated string file_to_generate = 1;</code> */ public int getFileToGenerateCount() { return fileToGenerate_.size(); } /** * <pre> * The .proto files that were explicitly listed on the command-line. The * code generator should generate code only for these files. Each file's * descriptor will be included in proto_file, below. * </pre> * * <code>repeated string file_to_generate = 1;</code> */ public java.lang.String getFileToGenerate(int index) { return fileToGenerate_.get(index); } /** * <pre> * The .proto files that were explicitly listed on the command-line. The * code generator should generate code only for these files. Each file's * descriptor will be included in proto_file, below. * </pre> * * <code>repeated string file_to_generate = 1;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getFileToGenerateBytes(int index) { return fileToGenerate_.getByteString(index); } public static final int PARAMETER_FIELD_NUMBER = 2; private volatile java.lang.Object parameter_; /** * <pre> * The generator parameter passed on the command-line. * </pre> * * <code>optional string parameter = 2;</code> */ public boolean hasParameter() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <pre> * The generator parameter passed on the command-line. * </pre> * * <code>optional string parameter = 2;</code> */ public java.lang.String getParameter() { java.lang.Object ref = parameter_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { parameter_ = s; } return s; } } /** * <pre> * The generator parameter passed on the command-line. * </pre> * * <code>optional string parameter = 2;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getParameterBytes() { java.lang.Object ref = parameter_; if (ref instanceof java.lang.String) { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); parameter_ = b; return b; } else { return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; } } public static final int PROTO_FILE_FIELD_NUMBER = 15; private java.util.List<org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto> protoFile_; /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ public java.util.List<org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto> getProtoFileList() { return protoFile_; } /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ public java.util.List<? extends org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProtoOrBuilder> getProtoFileOrBuilderList() { return protoFile_; } /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ public int getProtoFileCount() { return protoFile_.size(); } /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto getProtoFile(int index) { return protoFile_.get(index); } /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProtoOrBuilder getProtoFileOrBuilder( int index) { return protoFile_.get(index); } public static final int COMPILER_VERSION_FIELD_NUMBER = 3; private org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version compilerVersion_; /** * <pre> * The version number of protocol compiler. * </pre> * * <code>optional .google.protobuf.compiler.Version compiler_version = 3;</code> */ public boolean hasCompilerVersion() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * <pre> * The version number of protocol compiler. * </pre> * * <code>optional .google.protobuf.compiler.Version compiler_version = 3;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version getCompilerVersion() { return compilerVersion_ == null ? org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version.getDefaultInstance() : compilerVersion_; } /** * <pre> * The version number of protocol compiler. * </pre> * * <code>optional .google.protobuf.compiler.Version compiler_version = 3;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.VersionOrBuilder getCompilerVersionOrBuilder() { return compilerVersion_ == null ? org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version.getDefaultInstance() : compilerVersion_; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; for (int i = 0; i < getProtoFileCount(); i++) { if (!getProtoFile(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; } public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { for (int i = 0; i < fileToGenerate_.size(); i++) { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.writeString(output, 1, fileToGenerate_.getRaw(i)); } if (((bitField0_ & 0x00000001) == 0x00000001)) { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.writeString(output, 2, parameter_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeMessage(3, getCompilerVersion()); } for (int i = 0; i < protoFile_.size(); i++) { output.writeMessage(15, protoFile_.get(i)); } unknownFields.writeTo(output); } public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; { int dataSize = 0; for (int i = 0; i < fileToGenerate_.size(); i++) { dataSize += computeStringSizeNoTag(fileToGenerate_.getRaw(i)); } size += dataSize; size += 1 * getFileToGenerateList().size(); } if (((bitField0_ & 0x00000001) == 0x00000001)) { size += org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.computeStringSize(2, parameter_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream .computeMessageSize(3, getCompilerVersion()); } for (int i = 0; i < protoFile_.size(); i++) { size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream .computeMessageSize(15, protoFile_.get(i)); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest)) { return super.equals(obj); } org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest other = (org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest) obj; boolean result = true; result = result && getFileToGenerateList() .equals(other.getFileToGenerateList()); result = result && (hasParameter() == other.hasParameter()); if (hasParameter()) { result = result && getParameter() .equals(other.getParameter()); } result = result && getProtoFileList() .equals(other.getProtoFileList()); result = result && (hasCompilerVersion() == other.hasCompilerVersion()); if (hasCompilerVersion()) { result = result && getCompilerVersion() .equals(other.getCompilerVersion()); } result = result && unknownFields.equals(other.unknownFields); return result; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getFileToGenerateCount() > 0) { hash = (37 * hash) + FILE_TO_GENERATE_FIELD_NUMBER; hash = (53 * hash) + getFileToGenerateList().hashCode(); } if (hasParameter()) { hash = (37 * hash) + PARAMETER_FIELD_NUMBER; hash = (53 * hash) + getParameter().hashCode(); } if (getProtoFileCount() > 0) { hash = (37 * hash) + PROTO_FILE_FIELD_NUMBER; hash = (53 * hash) + getProtoFileList().hashCode(); } if (hasCompilerVersion()) { hash = (37 * hash) + COMPILER_VERSION_FIELD_NUMBER; hash = (53 * hash) + getCompilerVersion().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest parseFrom(byte[] data) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest parseFrom( byte[] data, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest parseFrom( java.io.InputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * <pre> * An encoded CodeGeneratorRequest is written to the plugin's stdin. * </pre> * * Protobuf type {@code google.protobuf.compiler.CodeGeneratorRequest} */ public static final class Builder extends org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements // @@protoc_insertion_point(builder_implements:google.protobuf.compiler.CodeGeneratorRequest) org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequestOrBuilder { public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.internal_static_google_protobuf_compiler_CodeGeneratorRequest_descriptor; } protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.internal_static_google_protobuf_compiler_CodeGeneratorRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest.class, org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest.Builder.class); } // Construct using org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getProtoFileFieldBuilder(); getCompilerVersionFieldBuilder(); } } public Builder clear() { super.clear(); fileToGenerate_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); parameter_ = ""; bitField0_ = (bitField0_ & ~0x00000002); if (protoFileBuilder_ == null) { protoFile_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000004); } else { protoFileBuilder_.clear(); } if (compilerVersionBuilder_ == null) { compilerVersion_ = null; } else { compilerVersionBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); return this; } public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.internal_static_google_protobuf_compiler_CodeGeneratorRequest_descriptor; } public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest getDefaultInstanceForType() { return org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest.getDefaultInstance(); } public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest build() { org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest buildPartial() { org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest result = new org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { fileToGenerate_ = fileToGenerate_.getUnmodifiableView(); bitField0_ = (bitField0_ & ~0x00000001); } result.fileToGenerate_ = fileToGenerate_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000001; } result.parameter_ = parameter_; if (protoFileBuilder_ == null) { if (((bitField0_ & 0x00000004) == 0x00000004)) { protoFile_ = java.util.Collections.unmodifiableList(protoFile_); bitField0_ = (bitField0_ & ~0x00000004); } result.protoFile_ = protoFile_; } else { result.protoFile_ = protoFileBuilder_.build(); } if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000002; } if (compilerVersionBuilder_ == null) { result.compilerVersion_ = compilerVersion_; } else { result.compilerVersion_ = compilerVersionBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder clone() { return (Builder) super.clone(); } public Builder setField( org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { return (Builder) super.setField(field, value); } public Builder clearField( org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { return (Builder) super.clearField(field); } public Builder clearOneof( org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { return (Builder) super.clearOneof(oneof); } public Builder setRepeatedField( org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { return (Builder) super.setRepeatedField(field, index, value); } public Builder addRepeatedField( org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { return (Builder) super.addRepeatedField(field, value); } public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest) { return mergeFrom((org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest other) { if (other == org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest.getDefaultInstance()) return this; if (!other.fileToGenerate_.isEmpty()) { if (fileToGenerate_.isEmpty()) { fileToGenerate_ = other.fileToGenerate_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureFileToGenerateIsMutable(); fileToGenerate_.addAll(other.fileToGenerate_); } onChanged(); } if (other.hasParameter()) { bitField0_ |= 0x00000002; parameter_ = other.parameter_; onChanged(); } if (protoFileBuilder_ == null) { if (!other.protoFile_.isEmpty()) { if (protoFile_.isEmpty()) { protoFile_ = other.protoFile_; bitField0_ = (bitField0_ & ~0x00000004); } else { ensureProtoFileIsMutable(); protoFile_.addAll(other.protoFile_); } onChanged(); } } else { if (!other.protoFile_.isEmpty()) { if (protoFileBuilder_.isEmpty()) { protoFileBuilder_.dispose(); protoFileBuilder_ = null; protoFile_ = other.protoFile_; bitField0_ = (bitField0_ & ~0x00000004); protoFileBuilder_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getProtoFileFieldBuilder() : null; } else { protoFileBuilder_.addAllMessages(other.protoFile_); } } } if (other.hasCompilerVersion()) { mergeCompilerVersion(other.getCompilerVersion()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } public final boolean isInitialized() { for (int i = 0; i < getProtoFileCount(); i++) { if (!getProtoFile(i).isInitialized()) { return false; } } return true; } public Builder mergeFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private org.apache.hadoop.hbase.shaded.com.google.protobuf.LazyStringList fileToGenerate_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.LazyStringArrayList.EMPTY; private void ensureFileToGenerateIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { fileToGenerate_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.LazyStringArrayList(fileToGenerate_); bitField0_ |= 0x00000001; } } /** * <pre> * The .proto files that were explicitly listed on the command-line. The * code generator should generate code only for these files. Each file's * descriptor will be included in proto_file, below. * </pre> * * <code>repeated string file_to_generate = 1;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ProtocolStringList getFileToGenerateList() { return fileToGenerate_.getUnmodifiableView(); } /** * <pre> * The .proto files that were explicitly listed on the command-line. The * code generator should generate code only for these files. Each file's * descriptor will be included in proto_file, below. * </pre> * * <code>repeated string file_to_generate = 1;</code> */ public int getFileToGenerateCount() { return fileToGenerate_.size(); } /** * <pre> * The .proto files that were explicitly listed on the command-line. The * code generator should generate code only for these files. Each file's * descriptor will be included in proto_file, below. * </pre> * * <code>repeated string file_to_generate = 1;</code> */ public java.lang.String getFileToGenerate(int index) { return fileToGenerate_.get(index); } /** * <pre> * The .proto files that were explicitly listed on the command-line. The * code generator should generate code only for these files. Each file's * descriptor will be included in proto_file, below. * </pre> * * <code>repeated string file_to_generate = 1;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getFileToGenerateBytes(int index) { return fileToGenerate_.getByteString(index); } /** * <pre> * The .proto files that were explicitly listed on the command-line. The * code generator should generate code only for these files. Each file's * descriptor will be included in proto_file, below. * </pre> * * <code>repeated string file_to_generate = 1;</code> */ public Builder setFileToGenerate( int index, java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureFileToGenerateIsMutable(); fileToGenerate_.set(index, value); onChanged(); return this; } /** * <pre> * The .proto files that were explicitly listed on the command-line. The * code generator should generate code only for these files. Each file's * descriptor will be included in proto_file, below. * </pre> * * <code>repeated string file_to_generate = 1;</code> */ public Builder addFileToGenerate( java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureFileToGenerateIsMutable(); fileToGenerate_.add(value); onChanged(); return this; } /** * <pre> * The .proto files that were explicitly listed on the command-line. The * code generator should generate code only for these files. Each file's * descriptor will be included in proto_file, below. * </pre> * * <code>repeated string file_to_generate = 1;</code> */ public Builder addAllFileToGenerate( java.lang.Iterable<java.lang.String> values) { ensureFileToGenerateIsMutable(); org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll( values, fileToGenerate_); onChanged(); return this; } /** * <pre> * The .proto files that were explicitly listed on the command-line. The * code generator should generate code only for these files. Each file's * descriptor will be included in proto_file, below. * </pre> * * <code>repeated string file_to_generate = 1;</code> */ public Builder clearFileToGenerate() { fileToGenerate_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } /** * <pre> * The .proto files that were explicitly listed on the command-line. The * code generator should generate code only for these files. Each file's * descriptor will be included in proto_file, below. * </pre> * * <code>repeated string file_to_generate = 1;</code> */ public Builder addFileToGenerateBytes( org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } ensureFileToGenerateIsMutable(); fileToGenerate_.add(value); onChanged(); return this; } private java.lang.Object parameter_ = ""; /** * <pre> * The generator parameter passed on the command-line. * </pre> * * <code>optional string parameter = 2;</code> */ public boolean hasParameter() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * <pre> * The generator parameter passed on the command-line. * </pre> * * <code>optional string parameter = 2;</code> */ public java.lang.String getParameter() { java.lang.Object ref = parameter_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { parameter_ = s; } return s; } else { return (java.lang.String) ref; } } /** * <pre> * The generator parameter passed on the command-line. * </pre> * * <code>optional string parameter = 2;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getParameterBytes() { java.lang.Object ref = parameter_; if (ref instanceof String) { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); parameter_ = b; return b; } else { return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; } } /** * <pre> * The generator parameter passed on the command-line. * </pre> * * <code>optional string parameter = 2;</code> */ public Builder setParameter( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; parameter_ = value; onChanged(); return this; } /** * <pre> * The generator parameter passed on the command-line. * </pre> * * <code>optional string parameter = 2;</code> */ public Builder clearParameter() { bitField0_ = (bitField0_ & ~0x00000002); parameter_ = getDefaultInstance().getParameter(); onChanged(); return this; } /** * <pre> * The generator parameter passed on the command-line. * </pre> * * <code>optional string parameter = 2;</code> */ public Builder setParameterBytes( org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; parameter_ = value; onChanged(); return this; } private java.util.List<org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto> protoFile_ = java.util.Collections.emptyList(); private void ensureProtoFileIsMutable() { if (!((bitField0_ & 0x00000004) == 0x00000004)) { protoFile_ = new java.util.ArrayList<org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto>(protoFile_); bitField0_ |= 0x00000004; } } private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto, org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto.Builder, org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProtoOrBuilder> protoFileBuilder_; /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ public java.util.List<org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto> getProtoFileList() { if (protoFileBuilder_ == null) { return java.util.Collections.unmodifiableList(protoFile_); } else { return protoFileBuilder_.getMessageList(); } } /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ public int getProtoFileCount() { if (protoFileBuilder_ == null) { return protoFile_.size(); } else { return protoFileBuilder_.getCount(); } } /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto getProtoFile(int index) { if (protoFileBuilder_ == null) { return protoFile_.get(index); } else { return protoFileBuilder_.getMessage(index); } } /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ public Builder setProtoFile( int index, org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto value) { if (protoFileBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureProtoFileIsMutable(); protoFile_.set(index, value); onChanged(); } else { protoFileBuilder_.setMessage(index, value); } return this; } /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ public Builder setProtoFile( int index, org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto.Builder builderForValue) { if (protoFileBuilder_ == null) { ensureProtoFileIsMutable(); protoFile_.set(index, builderForValue.build()); onChanged(); } else { protoFileBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ public Builder addProtoFile(org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto value) { if (protoFileBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureProtoFileIsMutable(); protoFile_.add(value); onChanged(); } else { protoFileBuilder_.addMessage(value); } return this; } /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ public Builder addProtoFile( int index, org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto value) { if (protoFileBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureProtoFileIsMutable(); protoFile_.add(index, value); onChanged(); } else { protoFileBuilder_.addMessage(index, value); } return this; } /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ public Builder addProtoFile( org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto.Builder builderForValue) { if (protoFileBuilder_ == null) { ensureProtoFileIsMutable(); protoFile_.add(builderForValue.build()); onChanged(); } else { protoFileBuilder_.addMessage(builderForValue.build()); } return this; } /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ public Builder addProtoFile( int index, org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto.Builder builderForValue) { if (protoFileBuilder_ == null) { ensureProtoFileIsMutable(); protoFile_.add(index, builderForValue.build()); onChanged(); } else { protoFileBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ public Builder addAllProtoFile( java.lang.Iterable<? extends org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto> values) { if (protoFileBuilder_ == null) { ensureProtoFileIsMutable(); org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll( values, protoFile_); onChanged(); } else { protoFileBuilder_.addAllMessages(values); } return this; } /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ public Builder clearProtoFile() { if (protoFileBuilder_ == null) { protoFile_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000004); onChanged(); } else { protoFileBuilder_.clear(); } return this; } /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ public Builder removeProtoFile(int index) { if (protoFileBuilder_ == null) { ensureProtoFileIsMutable(); protoFile_.remove(index); onChanged(); } else { protoFileBuilder_.remove(index); } return this; } /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto.Builder getProtoFileBuilder( int index) { return getProtoFileFieldBuilder().getBuilder(index); } /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProtoOrBuilder getProtoFileOrBuilder( int index) { if (protoFileBuilder_ == null) { return protoFile_.get(index); } else { return protoFileBuilder_.getMessageOrBuilder(index); } } /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ public java.util.List<? extends org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProtoOrBuilder> getProtoFileOrBuilderList() { if (protoFileBuilder_ != null) { return protoFileBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(protoFile_); } } /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto.Builder addProtoFileBuilder() { return getProtoFileFieldBuilder().addBuilder( org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto.getDefaultInstance()); } /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto.Builder addProtoFileBuilder( int index) { return getProtoFileFieldBuilder().addBuilder( index, org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto.getDefaultInstance()); } /** * <pre> * FileDescriptorProtos for all files in files_to_generate and everything * they import. The files will appear in topological order, so each file * appears before any file that imports it. * protoc guarantees that all proto_files will be written after * the fields above, even though this is not technically guaranteed by the * protobuf wire format. This theoretically could allow a plugin to stream * in the FileDescriptorProtos and handle them one by one rather than read * the entire set into memory at once. However, as of this writing, this * is not similarly optimized on protoc's end -- it will store all fields in * memory at once before sending them to the plugin. * </pre> * * <code>repeated .google.protobuf.FileDescriptorProto proto_file = 15;</code> */ public java.util.List<org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto.Builder> getProtoFileBuilderList() { return getProtoFileFieldBuilder().getBuilderList(); } private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto, org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto.Builder, org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProtoOrBuilder> getProtoFileFieldBuilder() { if (protoFileBuilder_ == null) { protoFileBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto, org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto.Builder, org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProtoOrBuilder>( protoFile_, ((bitField0_ & 0x00000004) == 0x00000004), getParentForChildren(), isClean()); protoFile_ = null; } return protoFileBuilder_; } private org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version compilerVersion_ = null; private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version, org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version.Builder, org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.VersionOrBuilder> compilerVersionBuilder_; /** * <pre> * The version number of protocol compiler. * </pre> * * <code>optional .google.protobuf.compiler.Version compiler_version = 3;</code> */ public boolean hasCompilerVersion() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** * <pre> * The version number of protocol compiler. * </pre> * * <code>optional .google.protobuf.compiler.Version compiler_version = 3;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version getCompilerVersion() { if (compilerVersionBuilder_ == null) { return compilerVersion_ == null ? org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version.getDefaultInstance() : compilerVersion_; } else { return compilerVersionBuilder_.getMessage(); } } /** * <pre> * The version number of protocol compiler. * </pre> * * <code>optional .google.protobuf.compiler.Version compiler_version = 3;</code> */ public Builder setCompilerVersion(org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version value) { if (compilerVersionBuilder_ == null) { if (value == null) { throw new NullPointerException(); } compilerVersion_ = value; onChanged(); } else { compilerVersionBuilder_.setMessage(value); } bitField0_ |= 0x00000008; return this; } /** * <pre> * The version number of protocol compiler. * </pre> * * <code>optional .google.protobuf.compiler.Version compiler_version = 3;</code> */ public Builder setCompilerVersion( org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version.Builder builderForValue) { if (compilerVersionBuilder_ == null) { compilerVersion_ = builderForValue.build(); onChanged(); } else { compilerVersionBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000008; return this; } /** * <pre> * The version number of protocol compiler. * </pre> * * <code>optional .google.protobuf.compiler.Version compiler_version = 3;</code> */ public Builder mergeCompilerVersion(org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version value) { if (compilerVersionBuilder_ == null) { if (((bitField0_ & 0x00000008) == 0x00000008) && compilerVersion_ != null && compilerVersion_ != org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version.getDefaultInstance()) { compilerVersion_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version.newBuilder(compilerVersion_).mergeFrom(value).buildPartial(); } else { compilerVersion_ = value; } onChanged(); } else { compilerVersionBuilder_.mergeFrom(value); } bitField0_ |= 0x00000008; return this; } /** * <pre> * The version number of protocol compiler. * </pre> * * <code>optional .google.protobuf.compiler.Version compiler_version = 3;</code> */ public Builder clearCompilerVersion() { if (compilerVersionBuilder_ == null) { compilerVersion_ = null; onChanged(); } else { compilerVersionBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); return this; } /** * <pre> * The version number of protocol compiler. * </pre> * * <code>optional .google.protobuf.compiler.Version compiler_version = 3;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version.Builder getCompilerVersionBuilder() { bitField0_ |= 0x00000008; onChanged(); return getCompilerVersionFieldBuilder().getBuilder(); } /** * <pre> * The version number of protocol compiler. * </pre> * * <code>optional .google.protobuf.compiler.Version compiler_version = 3;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.VersionOrBuilder getCompilerVersionOrBuilder() { if (compilerVersionBuilder_ != null) { return compilerVersionBuilder_.getMessageOrBuilder(); } else { return compilerVersion_ == null ? org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version.getDefaultInstance() : compilerVersion_; } } /** * <pre> * The version number of protocol compiler. * </pre> * * <code>optional .google.protobuf.compiler.Version compiler_version = 3;</code> */ private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version, org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version.Builder, org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.VersionOrBuilder> getCompilerVersionFieldBuilder() { if (compilerVersionBuilder_ == null) { compilerVersionBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version, org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.Version.Builder, org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.VersionOrBuilder>( getCompilerVersion(), getParentForChildren(), isClean()); compilerVersion_ = null; } return compilerVersionBuilder_; } public final Builder setUnknownFields( final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } public final Builder mergeUnknownFields( final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:google.protobuf.compiler.CodeGeneratorRequest) } // @@protoc_insertion_point(class_scope:google.protobuf.compiler.CodeGeneratorRequest) private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest(); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<CodeGeneratorRequest> PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<CodeGeneratorRequest>() { public CodeGeneratorRequest parsePartialFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return new CodeGeneratorRequest(input, extensionRegistry); } }; public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<CodeGeneratorRequest> parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<CodeGeneratorRequest> getParserForType() { return PARSER; } public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface CodeGeneratorResponseOrBuilder extends // @@protoc_insertion_point(interface_extends:google.protobuf.compiler.CodeGeneratorResponse) org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { /** * <pre> * Error message. If non-empty, code generation failed. The plugin process * should exit with status code zero even if it reports an error in this way. * This should be used to indicate errors in .proto files which prevent the * code generator from generating correct code. Errors which indicate a * problem in protoc itself -- such as the input CodeGeneratorRequest being * unparseable -- should be reported by writing a message to stderr and * exiting with a non-zero status code. * </pre> * * <code>optional string error = 1;</code> */ boolean hasError(); /** * <pre> * Error message. If non-empty, code generation failed. The plugin process * should exit with status code zero even if it reports an error in this way. * This should be used to indicate errors in .proto files which prevent the * code generator from generating correct code. Errors which indicate a * problem in protoc itself -- such as the input CodeGeneratorRequest being * unparseable -- should be reported by writing a message to stderr and * exiting with a non-zero status code. * </pre> * * <code>optional string error = 1;</code> */ java.lang.String getError(); /** * <pre> * Error message. If non-empty, code generation failed. The plugin process * should exit with status code zero even if it reports an error in this way. * This should be used to indicate errors in .proto files which prevent the * code generator from generating correct code. Errors which indicate a * problem in protoc itself -- such as the input CodeGeneratorRequest being * unparseable -- should be reported by writing a message to stderr and * exiting with a non-zero status code. * </pre> * * <code>optional string error = 1;</code> */ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getErrorBytes(); /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ java.util.List<org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File> getFileList(); /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File getFile(int index); /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ int getFileCount(); /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ java.util.List<? extends org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.FileOrBuilder> getFileOrBuilderList(); /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.FileOrBuilder getFileOrBuilder( int index); } /** * <pre> * The plugin writes an encoded CodeGeneratorResponse to stdout. * </pre> * * Protobuf type {@code google.protobuf.compiler.CodeGeneratorResponse} */ public static final class CodeGeneratorResponse extends org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:google.protobuf.compiler.CodeGeneratorResponse) CodeGeneratorResponseOrBuilder { // Use CodeGeneratorResponse.newBuilder() to construct. private CodeGeneratorResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) { super(builder); } private CodeGeneratorResponse() { error_ = ""; file_ = java.util.Collections.emptyList(); } @java.lang.Override public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private CodeGeneratorResponse( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { this(); int mutable_bitField0_ = 0; org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; error_ = bs; break; } case 122: { if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { file_ = new java.util.ArrayList<org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File>(); mutable_bitField0_ |= 0x00000002; } file_.add( input.readMessage(org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File.PARSER, extensionRegistry)); break; } } } } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { file_ = java.util.Collections.unmodifiableList(file_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.internal_static_google_protobuf_compiler_CodeGeneratorResponse_descriptor; } protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.internal_static_google_protobuf_compiler_CodeGeneratorResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.class, org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.Builder.class); } public interface FileOrBuilder extends // @@protoc_insertion_point(interface_extends:google.protobuf.compiler.CodeGeneratorResponse.File) org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { /** * <pre> * The file name, relative to the output directory. The name must not * contain "." or ".." components and must be relative, not be absolute (so, * the file cannot lie outside the output directory). "/" must be used as * the path separator, not "\". * If the name is omitted, the content will be appended to the previous * file. This allows the generator to break large files into small chunks, * and allows the generated text to be streamed back to protoc so that large * files need not reside completely in memory at one time. Note that as of * this writing protoc does not optimize for this -- it will read the entire * CodeGeneratorResponse before writing files to disk. * </pre> * * <code>optional string name = 1;</code> */ boolean hasName(); /** * <pre> * The file name, relative to the output directory. The name must not * contain "." or ".." components and must be relative, not be absolute (so, * the file cannot lie outside the output directory). "/" must be used as * the path separator, not "\". * If the name is omitted, the content will be appended to the previous * file. This allows the generator to break large files into small chunks, * and allows the generated text to be streamed back to protoc so that large * files need not reside completely in memory at one time. Note that as of * this writing protoc does not optimize for this -- it will read the entire * CodeGeneratorResponse before writing files to disk. * </pre> * * <code>optional string name = 1;</code> */ java.lang.String getName(); /** * <pre> * The file name, relative to the output directory. The name must not * contain "." or ".." components and must be relative, not be absolute (so, * the file cannot lie outside the output directory). "/" must be used as * the path separator, not "\". * If the name is omitted, the content will be appended to the previous * file. This allows the generator to break large files into small chunks, * and allows the generated text to be streamed back to protoc so that large * files need not reside completely in memory at one time. Note that as of * this writing protoc does not optimize for this -- it will read the entire * CodeGeneratorResponse before writing files to disk. * </pre> * * <code>optional string name = 1;</code> */ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getNameBytes(); /** * <pre> * If non-empty, indicates that the named file should already exist, and the * content here is to be inserted into that file at a defined insertion * point. This feature allows a code generator to extend the output * produced by another code generator. The original generator may provide * insertion points by placing special annotations in the file that look * like: * @@protoc_insertion_point(NAME) * The annotation can have arbitrary text before and after it on the line, * which allows it to be placed in a comment. NAME should be replaced with * an identifier naming the point -- this is what other generators will use * as the insertion_point. Code inserted at this point will be placed * immediately above the line containing the insertion point (thus multiple * insertions to the same point will come out in the order they were added). * The double-@ is intended to make it unlikely that the generated code * could contain things that look like insertion points by accident. * For example, the C++ code generator places the following line in the * .pb.h files that it generates: * // @@protoc_insertion_point(namespace_scope) * This line appears within the scope of the file's package namespace, but * outside of any particular class. Another plugin can then specify the * insertion_point "namespace_scope" to generate additional classes or * other declarations that should be placed in this scope. * Note that if the line containing the insertion point begins with * whitespace, the same whitespace will be added to every line of the * inserted text. This is useful for languages like Python, where * indentation matters. In these languages, the insertion point comment * should be indented the same amount as any inserted code will need to be * in order to work correctly in that context. * The code generator that generates the initial file and the one which * inserts into it must both run as part of a single invocation of protoc. * Code generators are executed in the order in which they appear on the * command line. * If |insertion_point| is present, |name| must also be present. * </pre> * * <code>optional string insertion_point = 2;</code> */ boolean hasInsertionPoint(); /** * <pre> * If non-empty, indicates that the named file should already exist, and the * content here is to be inserted into that file at a defined insertion * point. This feature allows a code generator to extend the output * produced by another code generator. The original generator may provide * insertion points by placing special annotations in the file that look * like: * @@protoc_insertion_point(NAME) * The annotation can have arbitrary text before and after it on the line, * which allows it to be placed in a comment. NAME should be replaced with * an identifier naming the point -- this is what other generators will use * as the insertion_point. Code inserted at this point will be placed * immediately above the line containing the insertion point (thus multiple * insertions to the same point will come out in the order they were added). * The double-@ is intended to make it unlikely that the generated code * could contain things that look like insertion points by accident. * For example, the C++ code generator places the following line in the * .pb.h files that it generates: * // @@protoc_insertion_point(namespace_scope) * This line appears within the scope of the file's package namespace, but * outside of any particular class. Another plugin can then specify the * insertion_point "namespace_scope" to generate additional classes or * other declarations that should be placed in this scope. * Note that if the line containing the insertion point begins with * whitespace, the same whitespace will be added to every line of the * inserted text. This is useful for languages like Python, where * indentation matters. In these languages, the insertion point comment * should be indented the same amount as any inserted code will need to be * in order to work correctly in that context. * The code generator that generates the initial file and the one which * inserts into it must both run as part of a single invocation of protoc. * Code generators are executed in the order in which they appear on the * command line. * If |insertion_point| is present, |name| must also be present. * </pre> * * <code>optional string insertion_point = 2;</code> */ java.lang.String getInsertionPoint(); /** * <pre> * If non-empty, indicates that the named file should already exist, and the * content here is to be inserted into that file at a defined insertion * point. This feature allows a code generator to extend the output * produced by another code generator. The original generator may provide * insertion points by placing special annotations in the file that look * like: * @@protoc_insertion_point(NAME) * The annotation can have arbitrary text before and after it on the line, * which allows it to be placed in a comment. NAME should be replaced with * an identifier naming the point -- this is what other generators will use * as the insertion_point. Code inserted at this point will be placed * immediately above the line containing the insertion point (thus multiple * insertions to the same point will come out in the order they were added). * The double-@ is intended to make it unlikely that the generated code * could contain things that look like insertion points by accident. * For example, the C++ code generator places the following line in the * .pb.h files that it generates: * // @@protoc_insertion_point(namespace_scope) * This line appears within the scope of the file's package namespace, but * outside of any particular class. Another plugin can then specify the * insertion_point "namespace_scope" to generate additional classes or * other declarations that should be placed in this scope. * Note that if the line containing the insertion point begins with * whitespace, the same whitespace will be added to every line of the * inserted text. This is useful for languages like Python, where * indentation matters. In these languages, the insertion point comment * should be indented the same amount as any inserted code will need to be * in order to work correctly in that context. * The code generator that generates the initial file and the one which * inserts into it must both run as part of a single invocation of protoc. * Code generators are executed in the order in which they appear on the * command line. * If |insertion_point| is present, |name| must also be present. * </pre> * * <code>optional string insertion_point = 2;</code> */ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getInsertionPointBytes(); /** * <pre> * The file contents. * </pre> * * <code>optional string content = 15;</code> */ boolean hasContent(); /** * <pre> * The file contents. * </pre> * * <code>optional string content = 15;</code> */ java.lang.String getContent(); /** * <pre> * The file contents. * </pre> * * <code>optional string content = 15;</code> */ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getContentBytes(); } /** * <pre> * Represents a single generated file. * </pre> * * Protobuf type {@code google.protobuf.compiler.CodeGeneratorResponse.File} */ public static final class File extends org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:google.protobuf.compiler.CodeGeneratorResponse.File) FileOrBuilder { // Use File.newBuilder() to construct. private File(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) { super(builder); } private File() { name_ = ""; insertionPoint_ = ""; content_ = ""; } @java.lang.Override public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private File( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { this(); int mutable_bitField0_ = 0; org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; name_ = bs; break; } case 18: { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000002; insertionPoint_ = bs; break; } case 122: { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000004; content_ = bs; break; } } } } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.internal_static_google_protobuf_compiler_CodeGeneratorResponse_File_descriptor; } protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.internal_static_google_protobuf_compiler_CodeGeneratorResponse_File_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File.class, org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File.Builder.class); } private int bitField0_; public static final int NAME_FIELD_NUMBER = 1; private volatile java.lang.Object name_; /** * <pre> * The file name, relative to the output directory. The name must not * contain "." or ".." components and must be relative, not be absolute (so, * the file cannot lie outside the output directory). "/" must be used as * the path separator, not "\". * If the name is omitted, the content will be appended to the previous * file. This allows the generator to break large files into small chunks, * and allows the generated text to be streamed back to protoc so that large * files need not reside completely in memory at one time. Note that as of * this writing protoc does not optimize for this -- it will read the entire * CodeGeneratorResponse before writing files to disk. * </pre> * * <code>optional string name = 1;</code> */ public boolean hasName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <pre> * The file name, relative to the output directory. The name must not * contain "." or ".." components and must be relative, not be absolute (so, * the file cannot lie outside the output directory). "/" must be used as * the path separator, not "\". * If the name is omitted, the content will be appended to the previous * file. This allows the generator to break large files into small chunks, * and allows the generated text to be streamed back to protoc so that large * files need not reside completely in memory at one time. Note that as of * this writing protoc does not optimize for this -- it will read the entire * CodeGeneratorResponse before writing files to disk. * </pre> * * <code>optional string name = 1;</code> */ public java.lang.String getName() { java.lang.Object ref = name_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { name_ = s; } return s; } } /** * <pre> * The file name, relative to the output directory. The name must not * contain "." or ".." components and must be relative, not be absolute (so, * the file cannot lie outside the output directory). "/" must be used as * the path separator, not "\". * If the name is omitted, the content will be appended to the previous * file. This allows the generator to break large files into small chunks, * and allows the generated text to be streamed back to protoc so that large * files need not reside completely in memory at one time. Note that as of * this writing protoc does not optimize for this -- it will read the entire * CodeGeneratorResponse before writing files to disk. * </pre> * * <code>optional string name = 1;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getNameBytes() { java.lang.Object ref = name_; if (ref instanceof java.lang.String) { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); name_ = b; return b; } else { return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; } } public static final int INSERTION_POINT_FIELD_NUMBER = 2; private volatile java.lang.Object insertionPoint_; /** * <pre> * If non-empty, indicates that the named file should already exist, and the * content here is to be inserted into that file at a defined insertion * point. This feature allows a code generator to extend the output * produced by another code generator. The original generator may provide * insertion points by placing special annotations in the file that look * like: * @@protoc_insertion_point(NAME) * The annotation can have arbitrary text before and after it on the line, * which allows it to be placed in a comment. NAME should be replaced with * an identifier naming the point -- this is what other generators will use * as the insertion_point. Code inserted at this point will be placed * immediately above the line containing the insertion point (thus multiple * insertions to the same point will come out in the order they were added). * The double-@ is intended to make it unlikely that the generated code * could contain things that look like insertion points by accident. * For example, the C++ code generator places the following line in the * .pb.h files that it generates: * // @@protoc_insertion_point(namespace_scope) * This line appears within the scope of the file's package namespace, but * outside of any particular class. Another plugin can then specify the * insertion_point "namespace_scope" to generate additional classes or * other declarations that should be placed in this scope. * Note that if the line containing the insertion point begins with * whitespace, the same whitespace will be added to every line of the * inserted text. This is useful for languages like Python, where * indentation matters. In these languages, the insertion point comment * should be indented the same amount as any inserted code will need to be * in order to work correctly in that context. * The code generator that generates the initial file and the one which * inserts into it must both run as part of a single invocation of protoc. * Code generators are executed in the order in which they appear on the * command line. * If |insertion_point| is present, |name| must also be present. * </pre> * * <code>optional string insertion_point = 2;</code> */ public boolean hasInsertionPoint() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * <pre> * If non-empty, indicates that the named file should already exist, and the * content here is to be inserted into that file at a defined insertion * point. This feature allows a code generator to extend the output * produced by another code generator. The original generator may provide * insertion points by placing special annotations in the file that look * like: * @@protoc_insertion_point(NAME) * The annotation can have arbitrary text before and after it on the line, * which allows it to be placed in a comment. NAME should be replaced with * an identifier naming the point -- this is what other generators will use * as the insertion_point. Code inserted at this point will be placed * immediately above the line containing the insertion point (thus multiple * insertions to the same point will come out in the order they were added). * The double-@ is intended to make it unlikely that the generated code * could contain things that look like insertion points by accident. * For example, the C++ code generator places the following line in the * .pb.h files that it generates: * // @@protoc_insertion_point(namespace_scope) * This line appears within the scope of the file's package namespace, but * outside of any particular class. Another plugin can then specify the * insertion_point "namespace_scope" to generate additional classes or * other declarations that should be placed in this scope. * Note that if the line containing the insertion point begins with * whitespace, the same whitespace will be added to every line of the * inserted text. This is useful for languages like Python, where * indentation matters. In these languages, the insertion point comment * should be indented the same amount as any inserted code will need to be * in order to work correctly in that context. * The code generator that generates the initial file and the one which * inserts into it must both run as part of a single invocation of protoc. * Code generators are executed in the order in which they appear on the * command line. * If |insertion_point| is present, |name| must also be present. * </pre> * * <code>optional string insertion_point = 2;</code> */ public java.lang.String getInsertionPoint() { java.lang.Object ref = insertionPoint_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { insertionPoint_ = s; } return s; } } /** * <pre> * If non-empty, indicates that the named file should already exist, and the * content here is to be inserted into that file at a defined insertion * point. This feature allows a code generator to extend the output * produced by another code generator. The original generator may provide * insertion points by placing special annotations in the file that look * like: * @@protoc_insertion_point(NAME) * The annotation can have arbitrary text before and after it on the line, * which allows it to be placed in a comment. NAME should be replaced with * an identifier naming the point -- this is what other generators will use * as the insertion_point. Code inserted at this point will be placed * immediately above the line containing the insertion point (thus multiple * insertions to the same point will come out in the order they were added). * The double-@ is intended to make it unlikely that the generated code * could contain things that look like insertion points by accident. * For example, the C++ code generator places the following line in the * .pb.h files that it generates: * // @@protoc_insertion_point(namespace_scope) * This line appears within the scope of the file's package namespace, but * outside of any particular class. Another plugin can then specify the * insertion_point "namespace_scope" to generate additional classes or * other declarations that should be placed in this scope. * Note that if the line containing the insertion point begins with * whitespace, the same whitespace will be added to every line of the * inserted text. This is useful for languages like Python, where * indentation matters. In these languages, the insertion point comment * should be indented the same amount as any inserted code will need to be * in order to work correctly in that context. * The code generator that generates the initial file and the one which * inserts into it must both run as part of a single invocation of protoc. * Code generators are executed in the order in which they appear on the * command line. * If |insertion_point| is present, |name| must also be present. * </pre> * * <code>optional string insertion_point = 2;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getInsertionPointBytes() { java.lang.Object ref = insertionPoint_; if (ref instanceof java.lang.String) { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); insertionPoint_ = b; return b; } else { return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; } } public static final int CONTENT_FIELD_NUMBER = 15; private volatile java.lang.Object content_; /** * <pre> * The file contents. * </pre> * * <code>optional string content = 15;</code> */ public boolean hasContent() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * <pre> * The file contents. * </pre> * * <code>optional string content = 15;</code> */ public java.lang.String getContent() { java.lang.Object ref = content_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { content_ = s; } return s; } } /** * <pre> * The file contents. * </pre> * * <code>optional string content = 15;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getContentBytes() { java.lang.Object ref = content_; if (ref instanceof java.lang.String) { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); content_ = b; return b; } else { return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) == 0x00000001)) { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.writeString(output, 2, insertionPoint_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.writeString(output, 15, content_); } unknownFields.writeTo(output); } public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.computeStringSize(2, insertionPoint_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.computeStringSize(15, content_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File)) { return super.equals(obj); } org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File other = (org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File) obj; boolean result = true; result = result && (hasName() == other.hasName()); if (hasName()) { result = result && getName() .equals(other.getName()); } result = result && (hasInsertionPoint() == other.hasInsertionPoint()); if (hasInsertionPoint()) { result = result && getInsertionPoint() .equals(other.getInsertionPoint()); } result = result && (hasContent() == other.hasContent()); if (hasContent()) { result = result && getContent() .equals(other.getContent()); } result = result && unknownFields.equals(other.unknownFields); return result; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasName()) { hash = (37 * hash) + NAME_FIELD_NUMBER; hash = (53 * hash) + getName().hashCode(); } if (hasInsertionPoint()) { hash = (37 * hash) + INSERTION_POINT_FIELD_NUMBER; hash = (53 * hash) + getInsertionPoint().hashCode(); } if (hasContent()) { hash = (37 * hash) + CONTENT_FIELD_NUMBER; hash = (53 * hash) + getContent().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File parseFrom(byte[] data) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File parseFrom( byte[] data, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File parseFrom( java.io.InputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * <pre> * Represents a single generated file. * </pre> * * Protobuf type {@code google.protobuf.compiler.CodeGeneratorResponse.File} */ public static final class Builder extends org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements // @@protoc_insertion_point(builder_implements:google.protobuf.compiler.CodeGeneratorResponse.File) org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.FileOrBuilder { public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.internal_static_google_protobuf_compiler_CodeGeneratorResponse_File_descriptor; } protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.internal_static_google_protobuf_compiler_CodeGeneratorResponse_File_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File.class, org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File.Builder.class); } // Construct using org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } public Builder clear() { super.clear(); name_ = ""; bitField0_ = (bitField0_ & ~0x00000001); insertionPoint_ = ""; bitField0_ = (bitField0_ & ~0x00000002); content_ = ""; bitField0_ = (bitField0_ & ~0x00000004); return this; } public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.internal_static_google_protobuf_compiler_CodeGeneratorResponse_File_descriptor; } public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File getDefaultInstanceForType() { return org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File.getDefaultInstance(); } public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File build() { org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File buildPartial() { org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File result = new org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.name_ = name_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.insertionPoint_ = insertionPoint_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.content_ = content_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder clone() { return (Builder) super.clone(); } public Builder setField( org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { return (Builder) super.setField(field, value); } public Builder clearField( org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { return (Builder) super.clearField(field); } public Builder clearOneof( org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { return (Builder) super.clearOneof(oneof); } public Builder setRepeatedField( org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { return (Builder) super.setRepeatedField(field, index, value); } public Builder addRepeatedField( org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { return (Builder) super.addRepeatedField(field, value); } public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File) { return mergeFrom((org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File other) { if (other == org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File.getDefaultInstance()) return this; if (other.hasName()) { bitField0_ |= 0x00000001; name_ = other.name_; onChanged(); } if (other.hasInsertionPoint()) { bitField0_ |= 0x00000002; insertionPoint_ = other.insertionPoint_; onChanged(); } if (other.hasContent()) { bitField0_ |= 0x00000004; content_ = other.content_; onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.lang.Object name_ = ""; /** * <pre> * The file name, relative to the output directory. The name must not * contain "." or ".." components and must be relative, not be absolute (so, * the file cannot lie outside the output directory). "/" must be used as * the path separator, not "\". * If the name is omitted, the content will be appended to the previous * file. This allows the generator to break large files into small chunks, * and allows the generated text to be streamed back to protoc so that large * files need not reside completely in memory at one time. Note that as of * this writing protoc does not optimize for this -- it will read the entire * CodeGeneratorResponse before writing files to disk. * </pre> * * <code>optional string name = 1;</code> */ public boolean hasName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <pre> * The file name, relative to the output directory. The name must not * contain "." or ".." components and must be relative, not be absolute (so, * the file cannot lie outside the output directory). "/" must be used as * the path separator, not "\". * If the name is omitted, the content will be appended to the previous * file. This allows the generator to break large files into small chunks, * and allows the generated text to be streamed back to protoc so that large * files need not reside completely in memory at one time. Note that as of * this writing protoc does not optimize for this -- it will read the entire * CodeGeneratorResponse before writing files to disk. * </pre> * * <code>optional string name = 1;</code> */ public java.lang.String getName() { java.lang.Object ref = name_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { name_ = s; } return s; } else { return (java.lang.String) ref; } } /** * <pre> * The file name, relative to the output directory. The name must not * contain "." or ".." components and must be relative, not be absolute (so, * the file cannot lie outside the output directory). "/" must be used as * the path separator, not "\". * If the name is omitted, the content will be appended to the previous * file. This allows the generator to break large files into small chunks, * and allows the generated text to be streamed back to protoc so that large * files need not reside completely in memory at one time. Note that as of * this writing protoc does not optimize for this -- it will read the entire * CodeGeneratorResponse before writing files to disk. * </pre> * * <code>optional string name = 1;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getNameBytes() { java.lang.Object ref = name_; if (ref instanceof String) { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); name_ = b; return b; } else { return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; } } /** * <pre> * The file name, relative to the output directory. The name must not * contain "." or ".." components and must be relative, not be absolute (so, * the file cannot lie outside the output directory). "/" must be used as * the path separator, not "\". * If the name is omitted, the content will be appended to the previous * file. This allows the generator to break large files into small chunks, * and allows the generated text to be streamed back to protoc so that large * files need not reside completely in memory at one time. Note that as of * this writing protoc does not optimize for this -- it will read the entire * CodeGeneratorResponse before writing files to disk. * </pre> * * <code>optional string name = 1;</code> */ public Builder setName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; name_ = value; onChanged(); return this; } /** * <pre> * The file name, relative to the output directory. The name must not * contain "." or ".." components and must be relative, not be absolute (so, * the file cannot lie outside the output directory). "/" must be used as * the path separator, not "\". * If the name is omitted, the content will be appended to the previous * file. This allows the generator to break large files into small chunks, * and allows the generated text to be streamed back to protoc so that large * files need not reside completely in memory at one time. Note that as of * this writing protoc does not optimize for this -- it will read the entire * CodeGeneratorResponse before writing files to disk. * </pre> * * <code>optional string name = 1;</code> */ public Builder clearName() { bitField0_ = (bitField0_ & ~0x00000001); name_ = getDefaultInstance().getName(); onChanged(); return this; } /** * <pre> * The file name, relative to the output directory. The name must not * contain "." or ".." components and must be relative, not be absolute (so, * the file cannot lie outside the output directory). "/" must be used as * the path separator, not "\". * If the name is omitted, the content will be appended to the previous * file. This allows the generator to break large files into small chunks, * and allows the generated text to be streamed back to protoc so that large * files need not reside completely in memory at one time. Note that as of * this writing protoc does not optimize for this -- it will read the entire * CodeGeneratorResponse before writing files to disk. * </pre> * * <code>optional string name = 1;</code> */ public Builder setNameBytes( org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; name_ = value; onChanged(); return this; } private java.lang.Object insertionPoint_ = ""; /** * <pre> * If non-empty, indicates that the named file should already exist, and the * content here is to be inserted into that file at a defined insertion * point. This feature allows a code generator to extend the output * produced by another code generator. The original generator may provide * insertion points by placing special annotations in the file that look * like: * @@protoc_insertion_point(NAME) * The annotation can have arbitrary text before and after it on the line, * which allows it to be placed in a comment. NAME should be replaced with * an identifier naming the point -- this is what other generators will use * as the insertion_point. Code inserted at this point will be placed * immediately above the line containing the insertion point (thus multiple * insertions to the same point will come out in the order they were added). * The double-@ is intended to make it unlikely that the generated code * could contain things that look like insertion points by accident. * For example, the C++ code generator places the following line in the * .pb.h files that it generates: * // @@protoc_insertion_point(namespace_scope) * This line appears within the scope of the file's package namespace, but * outside of any particular class. Another plugin can then specify the * insertion_point "namespace_scope" to generate additional classes or * other declarations that should be placed in this scope. * Note that if the line containing the insertion point begins with * whitespace, the same whitespace will be added to every line of the * inserted text. This is useful for languages like Python, where * indentation matters. In these languages, the insertion point comment * should be indented the same amount as any inserted code will need to be * in order to work correctly in that context. * The code generator that generates the initial file and the one which * inserts into it must both run as part of a single invocation of protoc. * Code generators are executed in the order in which they appear on the * command line. * If |insertion_point| is present, |name| must also be present. * </pre> * * <code>optional string insertion_point = 2;</code> */ public boolean hasInsertionPoint() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * <pre> * If non-empty, indicates that the named file should already exist, and the * content here is to be inserted into that file at a defined insertion * point. This feature allows a code generator to extend the output * produced by another code generator. The original generator may provide * insertion points by placing special annotations in the file that look * like: * @@protoc_insertion_point(NAME) * The annotation can have arbitrary text before and after it on the line, * which allows it to be placed in a comment. NAME should be replaced with * an identifier naming the point -- this is what other generators will use * as the insertion_point. Code inserted at this point will be placed * immediately above the line containing the insertion point (thus multiple * insertions to the same point will come out in the order they were added). * The double-@ is intended to make it unlikely that the generated code * could contain things that look like insertion points by accident. * For example, the C++ code generator places the following line in the * .pb.h files that it generates: * // @@protoc_insertion_point(namespace_scope) * This line appears within the scope of the file's package namespace, but * outside of any particular class. Another plugin can then specify the * insertion_point "namespace_scope" to generate additional classes or * other declarations that should be placed in this scope. * Note that if the line containing the insertion point begins with * whitespace, the same whitespace will be added to every line of the * inserted text. This is useful for languages like Python, where * indentation matters. In these languages, the insertion point comment * should be indented the same amount as any inserted code will need to be * in order to work correctly in that context. * The code generator that generates the initial file and the one which * inserts into it must both run as part of a single invocation of protoc. * Code generators are executed in the order in which they appear on the * command line. * If |insertion_point| is present, |name| must also be present. * </pre> * * <code>optional string insertion_point = 2;</code> */ public java.lang.String getInsertionPoint() { java.lang.Object ref = insertionPoint_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { insertionPoint_ = s; } return s; } else { return (java.lang.String) ref; } } /** * <pre> * If non-empty, indicates that the named file should already exist, and the * content here is to be inserted into that file at a defined insertion * point. This feature allows a code generator to extend the output * produced by another code generator. The original generator may provide * insertion points by placing special annotations in the file that look * like: * @@protoc_insertion_point(NAME) * The annotation can have arbitrary text before and after it on the line, * which allows it to be placed in a comment. NAME should be replaced with * an identifier naming the point -- this is what other generators will use * as the insertion_point. Code inserted at this point will be placed * immediately above the line containing the insertion point (thus multiple * insertions to the same point will come out in the order they were added). * The double-@ is intended to make it unlikely that the generated code * could contain things that look like insertion points by accident. * For example, the C++ code generator places the following line in the * .pb.h files that it generates: * // @@protoc_insertion_point(namespace_scope) * This line appears within the scope of the file's package namespace, but * outside of any particular class. Another plugin can then specify the * insertion_point "namespace_scope" to generate additional classes or * other declarations that should be placed in this scope. * Note that if the line containing the insertion point begins with * whitespace, the same whitespace will be added to every line of the * inserted text. This is useful for languages like Python, where * indentation matters. In these languages, the insertion point comment * should be indented the same amount as any inserted code will need to be * in order to work correctly in that context. * The code generator that generates the initial file and the one which * inserts into it must both run as part of a single invocation of protoc. * Code generators are executed in the order in which they appear on the * command line. * If |insertion_point| is present, |name| must also be present. * </pre> * * <code>optional string insertion_point = 2;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getInsertionPointBytes() { java.lang.Object ref = insertionPoint_; if (ref instanceof String) { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); insertionPoint_ = b; return b; } else { return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; } } /** * <pre> * If non-empty, indicates that the named file should already exist, and the * content here is to be inserted into that file at a defined insertion * point. This feature allows a code generator to extend the output * produced by another code generator. The original generator may provide * insertion points by placing special annotations in the file that look * like: * @@protoc_insertion_point(NAME) * The annotation can have arbitrary text before and after it on the line, * which allows it to be placed in a comment. NAME should be replaced with * an identifier naming the point -- this is what other generators will use * as the insertion_point. Code inserted at this point will be placed * immediately above the line containing the insertion point (thus multiple * insertions to the same point will come out in the order they were added). * The double-@ is intended to make it unlikely that the generated code * could contain things that look like insertion points by accident. * For example, the C++ code generator places the following line in the * .pb.h files that it generates: * // @@protoc_insertion_point(namespace_scope) * This line appears within the scope of the file's package namespace, but * outside of any particular class. Another plugin can then specify the * insertion_point "namespace_scope" to generate additional classes or * other declarations that should be placed in this scope. * Note that if the line containing the insertion point begins with * whitespace, the same whitespace will be added to every line of the * inserted text. This is useful for languages like Python, where * indentation matters. In these languages, the insertion point comment * should be indented the same amount as any inserted code will need to be * in order to work correctly in that context. * The code generator that generates the initial file and the one which * inserts into it must both run as part of a single invocation of protoc. * Code generators are executed in the order in which they appear on the * command line. * If |insertion_point| is present, |name| must also be present. * </pre> * * <code>optional string insertion_point = 2;</code> */ public Builder setInsertionPoint( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; insertionPoint_ = value; onChanged(); return this; } /** * <pre> * If non-empty, indicates that the named file should already exist, and the * content here is to be inserted into that file at a defined insertion * point. This feature allows a code generator to extend the output * produced by another code generator. The original generator may provide * insertion points by placing special annotations in the file that look * like: * @@protoc_insertion_point(NAME) * The annotation can have arbitrary text before and after it on the line, * which allows it to be placed in a comment. NAME should be replaced with * an identifier naming the point -- this is what other generators will use * as the insertion_point. Code inserted at this point will be placed * immediately above the line containing the insertion point (thus multiple * insertions to the same point will come out in the order they were added). * The double-@ is intended to make it unlikely that the generated code * could contain things that look like insertion points by accident. * For example, the C++ code generator places the following line in the * .pb.h files that it generates: * // @@protoc_insertion_point(namespace_scope) * This line appears within the scope of the file's package namespace, but * outside of any particular class. Another plugin can then specify the * insertion_point "namespace_scope" to generate additional classes or * other declarations that should be placed in this scope. * Note that if the line containing the insertion point begins with * whitespace, the same whitespace will be added to every line of the * inserted text. This is useful for languages like Python, where * indentation matters. In these languages, the insertion point comment * should be indented the same amount as any inserted code will need to be * in order to work correctly in that context. * The code generator that generates the initial file and the one which * inserts into it must both run as part of a single invocation of protoc. * Code generators are executed in the order in which they appear on the * command line. * If |insertion_point| is present, |name| must also be present. * </pre> * * <code>optional string insertion_point = 2;</code> */ public Builder clearInsertionPoint() { bitField0_ = (bitField0_ & ~0x00000002); insertionPoint_ = getDefaultInstance().getInsertionPoint(); onChanged(); return this; } /** * <pre> * If non-empty, indicates that the named file should already exist, and the * content here is to be inserted into that file at a defined insertion * point. This feature allows a code generator to extend the output * produced by another code generator. The original generator may provide * insertion points by placing special annotations in the file that look * like: * @@protoc_insertion_point(NAME) * The annotation can have arbitrary text before and after it on the line, * which allows it to be placed in a comment. NAME should be replaced with * an identifier naming the point -- this is what other generators will use * as the insertion_point. Code inserted at this point will be placed * immediately above the line containing the insertion point (thus multiple * insertions to the same point will come out in the order they were added). * The double-@ is intended to make it unlikely that the generated code * could contain things that look like insertion points by accident. * For example, the C++ code generator places the following line in the * .pb.h files that it generates: * // @@protoc_insertion_point(namespace_scope) * This line appears within the scope of the file's package namespace, but * outside of any particular class. Another plugin can then specify the * insertion_point "namespace_scope" to generate additional classes or * other declarations that should be placed in this scope. * Note that if the line containing the insertion point begins with * whitespace, the same whitespace will be added to every line of the * inserted text. This is useful for languages like Python, where * indentation matters. In these languages, the insertion point comment * should be indented the same amount as any inserted code will need to be * in order to work correctly in that context. * The code generator that generates the initial file and the one which * inserts into it must both run as part of a single invocation of protoc. * Code generators are executed in the order in which they appear on the * command line. * If |insertion_point| is present, |name| must also be present. * </pre> * * <code>optional string insertion_point = 2;</code> */ public Builder setInsertionPointBytes( org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; insertionPoint_ = value; onChanged(); return this; } private java.lang.Object content_ = ""; /** * <pre> * The file contents. * </pre> * * <code>optional string content = 15;</code> */ public boolean hasContent() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** * <pre> * The file contents. * </pre> * * <code>optional string content = 15;</code> */ public java.lang.String getContent() { java.lang.Object ref = content_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { content_ = s; } return s; } else { return (java.lang.String) ref; } } /** * <pre> * The file contents. * </pre> * * <code>optional string content = 15;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getContentBytes() { java.lang.Object ref = content_; if (ref instanceof String) { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); content_ = b; return b; } else { return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; } } /** * <pre> * The file contents. * </pre> * * <code>optional string content = 15;</code> */ public Builder setContent( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; content_ = value; onChanged(); return this; } /** * <pre> * The file contents. * </pre> * * <code>optional string content = 15;</code> */ public Builder clearContent() { bitField0_ = (bitField0_ & ~0x00000004); content_ = getDefaultInstance().getContent(); onChanged(); return this; } /** * <pre> * The file contents. * </pre> * * <code>optional string content = 15;</code> */ public Builder setContentBytes( org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; content_ = value; onChanged(); return this; } public final Builder setUnknownFields( final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } public final Builder mergeUnknownFields( final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:google.protobuf.compiler.CodeGeneratorResponse.File) } // @@protoc_insertion_point(class_scope:google.protobuf.compiler.CodeGeneratorResponse.File) private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File(); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<File> PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<File>() { public File parsePartialFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return new File(input, extensionRegistry); } }; public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<File> parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<File> getParserForType() { return PARSER; } public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } private int bitField0_; public static final int ERROR_FIELD_NUMBER = 1; private volatile java.lang.Object error_; /** * <pre> * Error message. If non-empty, code generation failed. The plugin process * should exit with status code zero even if it reports an error in this way. * This should be used to indicate errors in .proto files which prevent the * code generator from generating correct code. Errors which indicate a * problem in protoc itself -- such as the input CodeGeneratorRequest being * unparseable -- should be reported by writing a message to stderr and * exiting with a non-zero status code. * </pre> * * <code>optional string error = 1;</code> */ public boolean hasError() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <pre> * Error message. If non-empty, code generation failed. The plugin process * should exit with status code zero even if it reports an error in this way. * This should be used to indicate errors in .proto files which prevent the * code generator from generating correct code. Errors which indicate a * problem in protoc itself -- such as the input CodeGeneratorRequest being * unparseable -- should be reported by writing a message to stderr and * exiting with a non-zero status code. * </pre> * * <code>optional string error = 1;</code> */ public java.lang.String getError() { java.lang.Object ref = error_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { error_ = s; } return s; } } /** * <pre> * Error message. If non-empty, code generation failed. The plugin process * should exit with status code zero even if it reports an error in this way. * This should be used to indicate errors in .proto files which prevent the * code generator from generating correct code. Errors which indicate a * problem in protoc itself -- such as the input CodeGeneratorRequest being * unparseable -- should be reported by writing a message to stderr and * exiting with a non-zero status code. * </pre> * * <code>optional string error = 1;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getErrorBytes() { java.lang.Object ref = error_; if (ref instanceof java.lang.String) { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); error_ = b; return b; } else { return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; } } public static final int FILE_FIELD_NUMBER = 15; private java.util.List<org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File> file_; /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ public java.util.List<org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File> getFileList() { return file_; } /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ public java.util.List<? extends org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.FileOrBuilder> getFileOrBuilderList() { return file_; } /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ public int getFileCount() { return file_.size(); } /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File getFile(int index) { return file_.get(index); } /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.FileOrBuilder getFileOrBuilder( int index) { return file_.get(index); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) == 0x00000001)) { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.writeString(output, 1, error_); } for (int i = 0; i < file_.size(); i++) { output.writeMessage(15, file_.get(i)); } unknownFields.writeTo(output); } public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.computeStringSize(1, error_); } for (int i = 0; i < file_.size(); i++) { size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream .computeMessageSize(15, file_.get(i)); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse)) { return super.equals(obj); } org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse other = (org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse) obj; boolean result = true; result = result && (hasError() == other.hasError()); if (hasError()) { result = result && getError() .equals(other.getError()); } result = result && getFileList() .equals(other.getFileList()); result = result && unknownFields.equals(other.unknownFields); return result; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasError()) { hash = (37 * hash) + ERROR_FIELD_NUMBER; hash = (53 * hash) + getError().hashCode(); } if (getFileCount() > 0) { hash = (37 * hash) + FILE_FIELD_NUMBER; hash = (53 * hash) + getFileList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse parseFrom(byte[] data) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse parseFrom( byte[] data, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse parseFrom( java.io.InputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse parseDelimitedFrom( java.io.InputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse parseFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * <pre> * The plugin writes an encoded CodeGeneratorResponse to stdout. * </pre> * * Protobuf type {@code google.protobuf.compiler.CodeGeneratorResponse} */ public static final class Builder extends org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements // @@protoc_insertion_point(builder_implements:google.protobuf.compiler.CodeGeneratorResponse) org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponseOrBuilder { public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.internal_static_google_protobuf_compiler_CodeGeneratorResponse_descriptor; } protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.internal_static_google_protobuf_compiler_CodeGeneratorResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.class, org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.Builder.class); } // Construct using org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getFileFieldBuilder(); } } public Builder clear() { super.clear(); error_ = ""; bitField0_ = (bitField0_ & ~0x00000001); if (fileBuilder_ == null) { file_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000002); } else { fileBuilder_.clear(); } return this; } public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.internal_static_google_protobuf_compiler_CodeGeneratorResponse_descriptor; } public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse getDefaultInstanceForType() { return org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.getDefaultInstance(); } public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse build() { org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse buildPartial() { org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse result = new org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.error_ = error_; if (fileBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002)) { file_ = java.util.Collections.unmodifiableList(file_); bitField0_ = (bitField0_ & ~0x00000002); } result.file_ = file_; } else { result.file_ = fileBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder clone() { return (Builder) super.clone(); } public Builder setField( org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { return (Builder) super.setField(field, value); } public Builder clearField( org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { return (Builder) super.clearField(field); } public Builder clearOneof( org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { return (Builder) super.clearOneof(oneof); } public Builder setRepeatedField( org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { return (Builder) super.setRepeatedField(field, index, value); } public Builder addRepeatedField( org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { return (Builder) super.addRepeatedField(field, value); } public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse) { return mergeFrom((org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse other) { if (other == org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.getDefaultInstance()) return this; if (other.hasError()) { bitField0_ |= 0x00000001; error_ = other.error_; onChanged(); } if (fileBuilder_ == null) { if (!other.file_.isEmpty()) { if (file_.isEmpty()) { file_ = other.file_; bitField0_ = (bitField0_ & ~0x00000002); } else { ensureFileIsMutable(); file_.addAll(other.file_); } onChanged(); } } else { if (!other.file_.isEmpty()) { if (fileBuilder_.isEmpty()) { fileBuilder_.dispose(); fileBuilder_ = null; file_ = other.file_; bitField0_ = (bitField0_ & ~0x00000002); fileBuilder_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getFileFieldBuilder() : null; } else { fileBuilder_.addAllMessages(other.file_); } } } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.lang.Object error_ = ""; /** * <pre> * Error message. If non-empty, code generation failed. The plugin process * should exit with status code zero even if it reports an error in this way. * This should be used to indicate errors in .proto files which prevent the * code generator from generating correct code. Errors which indicate a * problem in protoc itself -- such as the input CodeGeneratorRequest being * unparseable -- should be reported by writing a message to stderr and * exiting with a non-zero status code. * </pre> * * <code>optional string error = 1;</code> */ public boolean hasError() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <pre> * Error message. If non-empty, code generation failed. The plugin process * should exit with status code zero even if it reports an error in this way. * This should be used to indicate errors in .proto files which prevent the * code generator from generating correct code. Errors which indicate a * problem in protoc itself -- such as the input CodeGeneratorRequest being * unparseable -- should be reported by writing a message to stderr and * exiting with a non-zero status code. * </pre> * * <code>optional string error = 1;</code> */ public java.lang.String getError() { java.lang.Object ref = error_; if (!(ref instanceof java.lang.String)) { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { error_ = s; } return s; } else { return (java.lang.String) ref; } } /** * <pre> * Error message. If non-empty, code generation failed. The plugin process * should exit with status code zero even if it reports an error in this way. * This should be used to indicate errors in .proto files which prevent the * code generator from generating correct code. Errors which indicate a * problem in protoc itself -- such as the input CodeGeneratorRequest being * unparseable -- should be reported by writing a message to stderr and * exiting with a non-zero status code. * </pre> * * <code>optional string error = 1;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getErrorBytes() { java.lang.Object ref = error_; if (ref instanceof String) { org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); error_ = b; return b; } else { return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; } } /** * <pre> * Error message. If non-empty, code generation failed. The plugin process * should exit with status code zero even if it reports an error in this way. * This should be used to indicate errors in .proto files which prevent the * code generator from generating correct code. Errors which indicate a * problem in protoc itself -- such as the input CodeGeneratorRequest being * unparseable -- should be reported by writing a message to stderr and * exiting with a non-zero status code. * </pre> * * <code>optional string error = 1;</code> */ public Builder setError( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; error_ = value; onChanged(); return this; } /** * <pre> * Error message. If non-empty, code generation failed. The plugin process * should exit with status code zero even if it reports an error in this way. * This should be used to indicate errors in .proto files which prevent the * code generator from generating correct code. Errors which indicate a * problem in protoc itself -- such as the input CodeGeneratorRequest being * unparseable -- should be reported by writing a message to stderr and * exiting with a non-zero status code. * </pre> * * <code>optional string error = 1;</code> */ public Builder clearError() { bitField0_ = (bitField0_ & ~0x00000001); error_ = getDefaultInstance().getError(); onChanged(); return this; } /** * <pre> * Error message. If non-empty, code generation failed. The plugin process * should exit with status code zero even if it reports an error in this way. * This should be used to indicate errors in .proto files which prevent the * code generator from generating correct code. Errors which indicate a * problem in protoc itself -- such as the input CodeGeneratorRequest being * unparseable -- should be reported by writing a message to stderr and * exiting with a non-zero status code. * </pre> * * <code>optional string error = 1;</code> */ public Builder setErrorBytes( org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; error_ = value; onChanged(); return this; } private java.util.List<org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File> file_ = java.util.Collections.emptyList(); private void ensureFileIsMutable() { if (!((bitField0_ & 0x00000002) == 0x00000002)) { file_ = new java.util.ArrayList<org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File>(file_); bitField0_ |= 0x00000002; } } private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File, org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File.Builder, org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.FileOrBuilder> fileBuilder_; /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ public java.util.List<org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File> getFileList() { if (fileBuilder_ == null) { return java.util.Collections.unmodifiableList(file_); } else { return fileBuilder_.getMessageList(); } } /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ public int getFileCount() { if (fileBuilder_ == null) { return file_.size(); } else { return fileBuilder_.getCount(); } } /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File getFile(int index) { if (fileBuilder_ == null) { return file_.get(index); } else { return fileBuilder_.getMessage(index); } } /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ public Builder setFile( int index, org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File value) { if (fileBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureFileIsMutable(); file_.set(index, value); onChanged(); } else { fileBuilder_.setMessage(index, value); } return this; } /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ public Builder setFile( int index, org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File.Builder builderForValue) { if (fileBuilder_ == null) { ensureFileIsMutable(); file_.set(index, builderForValue.build()); onChanged(); } else { fileBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ public Builder addFile(org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File value) { if (fileBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureFileIsMutable(); file_.add(value); onChanged(); } else { fileBuilder_.addMessage(value); } return this; } /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ public Builder addFile( int index, org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File value) { if (fileBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureFileIsMutable(); file_.add(index, value); onChanged(); } else { fileBuilder_.addMessage(index, value); } return this; } /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ public Builder addFile( org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File.Builder builderForValue) { if (fileBuilder_ == null) { ensureFileIsMutable(); file_.add(builderForValue.build()); onChanged(); } else { fileBuilder_.addMessage(builderForValue.build()); } return this; } /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ public Builder addFile( int index, org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File.Builder builderForValue) { if (fileBuilder_ == null) { ensureFileIsMutable(); file_.add(index, builderForValue.build()); onChanged(); } else { fileBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ public Builder addAllFile( java.lang.Iterable<? extends org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File> values) { if (fileBuilder_ == null) { ensureFileIsMutable(); org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll( values, file_); onChanged(); } else { fileBuilder_.addAllMessages(values); } return this; } /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ public Builder clearFile() { if (fileBuilder_ == null) { file_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000002); onChanged(); } else { fileBuilder_.clear(); } return this; } /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ public Builder removeFile(int index) { if (fileBuilder_ == null) { ensureFileIsMutable(); file_.remove(index); onChanged(); } else { fileBuilder_.remove(index); } return this; } /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File.Builder getFileBuilder( int index) { return getFileFieldBuilder().getBuilder(index); } /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.FileOrBuilder getFileOrBuilder( int index) { if (fileBuilder_ == null) { return file_.get(index); } else { return fileBuilder_.getMessageOrBuilder(index); } } /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ public java.util.List<? extends org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.FileOrBuilder> getFileOrBuilderList() { if (fileBuilder_ != null) { return fileBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(file_); } } /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File.Builder addFileBuilder() { return getFileFieldBuilder().addBuilder( org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File.getDefaultInstance()); } /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File.Builder addFileBuilder( int index) { return getFileFieldBuilder().addBuilder( index, org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File.getDefaultInstance()); } /** * <code>repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;</code> */ public java.util.List<org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File.Builder> getFileBuilderList() { return getFileFieldBuilder().getBuilderList(); } private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File, org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File.Builder, org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.FileOrBuilder> getFileFieldBuilder() { if (fileBuilder_ == null) { fileBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File, org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File.Builder, org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.FileOrBuilder>( file_, ((bitField0_ & 0x00000002) == 0x00000002), getParentForChildren(), isClean()); file_ = null; } return fileBuilder_; } public final Builder setUnknownFields( final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } public final Builder mergeUnknownFields( final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:google.protobuf.compiler.CodeGeneratorResponse) } // @@protoc_insertion_point(class_scope:google.protobuf.compiler.CodeGeneratorResponse) private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse(); } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse getDefaultInstance() { return DEFAULT_INSTANCE; } @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<CodeGeneratorResponse> PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<CodeGeneratorResponse>() { public CodeGeneratorResponse parsePartialFrom( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { return new CodeGeneratorResponse(input, extensionRegistry); } }; public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<CodeGeneratorResponse> parser() { return PARSER; } @java.lang.Override public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<CodeGeneratorResponse> getParserForType() { return PARSER; } public org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor internal_static_google_protobuf_compiler_Version_descriptor; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_protobuf_compiler_Version_fieldAccessorTable; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor internal_static_google_protobuf_compiler_CodeGeneratorRequest_descriptor; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_protobuf_compiler_CodeGeneratorRequest_fieldAccessorTable; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor internal_static_google_protobuf_compiler_CodeGeneratorResponse_descriptor; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_protobuf_compiler_CodeGeneratorResponse_fieldAccessorTable; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor internal_static_google_protobuf_compiler_CodeGeneratorResponse_File_descriptor; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_protobuf_compiler_CodeGeneratorResponse_File_fieldAccessorTable; public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; } private static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor descriptor; static { java.lang.String[] descriptorData = { "\n%google/protobuf/compiler/plugin.proto\022" + "\030google.protobuf.compiler\032 google/protob" + "uf/descriptor.proto\"F\n\007Version\022\r\n\005major\030" + "\001 \001(\005\022\r\n\005minor\030\002 \001(\005\022\r\n\005patch\030\003 \001(\005\022\016\n\006s" + "uffix\030\004 \001(\t\"\272\001\n\024CodeGeneratorRequest\022\030\n\020" + "file_to_generate\030\001 \003(\t\022\021\n\tparameter\030\002 \001(" + "\t\0228\n\nproto_file\030\017 \003(\0132$.google.protobuf." + "FileDescriptorProto\022;\n\020compiler_version\030" + "\003 \001(\0132!.google.protobuf.compiler.Version" + "\"\252\001\n\025CodeGeneratorResponse\022\r\n\005error\030\001 \001(", "\t\022B\n\004file\030\017 \003(\01324.google.protobuf.compil" + "er.CodeGeneratorResponse.File\032>\n\004File\022\014\n" + "\004name\030\001 \001(\t\022\027\n\017insertion_point\030\002 \001(\t\022\017\n\007" + "content\030\017 \001(\tB7\n\034com.google.protobuf.com" + "pilerB\014PluginProtosZ\tplugin_go" }; org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { public org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistry assignDescriptors( org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor root) { descriptor = root; return null; } }; org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor[] { org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.getDescriptor(), }, assigner); internal_static_google_protobuf_compiler_Version_descriptor = getDescriptor().getMessageTypes().get(0); internal_static_google_protobuf_compiler_Version_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_protobuf_compiler_Version_descriptor, new java.lang.String[] { "Major", "Minor", "Patch", "Suffix", }); internal_static_google_protobuf_compiler_CodeGeneratorRequest_descriptor = getDescriptor().getMessageTypes().get(1); internal_static_google_protobuf_compiler_CodeGeneratorRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_protobuf_compiler_CodeGeneratorRequest_descriptor, new java.lang.String[] { "FileToGenerate", "Parameter", "ProtoFile", "CompilerVersion", }); internal_static_google_protobuf_compiler_CodeGeneratorResponse_descriptor = getDescriptor().getMessageTypes().get(2); internal_static_google_protobuf_compiler_CodeGeneratorResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_protobuf_compiler_CodeGeneratorResponse_descriptor, new java.lang.String[] { "Error", "File", }); internal_static_google_protobuf_compiler_CodeGeneratorResponse_File_descriptor = internal_static_google_protobuf_compiler_CodeGeneratorResponse_descriptor.getNestedTypes().get(0); internal_static_google_protobuf_compiler_CodeGeneratorResponse_File_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_protobuf_compiler_CodeGeneratorResponse_File_descriptor, new java.lang.String[] { "Name", "InsertionPoint", "Content", }); org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.getDescriptor(); } // @@protoc_insertion_point(outer_class_scope) }