Java Examples for org.elasticsearch.common.xcontent.XContentParser

The following java examples will help you to understand the usage of org.elasticsearch.common.xcontent.XContentParser. These source code samples are taken from different open source projects.

Example 1
Project: elasticsearch-http-master  File: HitTest.java View source code
@Test
public void should_parse_hit() throws IOException {
    String json = readFromClasspath("json/entity/hit.json");
    String source = readFromClasspath("json/entity/source.json");
    XContentParser parser = XContentHelper.createParser(json.getBytes(), 0, json.length());
    parser.nextToken();
    Hit hit = new Hit().parse(parser);
    assertThat(hit.getId()).isEqualTo("1");
    assertThat(hit.getType()).isEqualTo("tweet");
    assertThat(hit.getIndex()).isEqualTo("twitter");
    assertThat(hit.getScore()).isEqualTo(1.7f, Offset.offset(0.01f));
    assertThatJson(new String(hit.getSource())).isEqualTo(source);
}
Example 2
Project: elasticsearch-gatherer-master  File: GathererState.java View source code
private List<JobEvent> parseSetting(String value) throws IOException {
    XContentParser parser = xContent(JSON).createParser(value);
    Builder<JobEvent> builder = ImmutableList.builder();
    parser.nextToken();
    while (parser.nextToken() != END_ARRAY) {
        JobEvent status = new JobEvent();
        builder.add(status.fromXContent(parser));
    }
    return builder.build();
}
Example 3
Project: elasticsearch-inout-plugin-master  File: Importer.java View source code
private IndexRequest parseObject(String line) throws ObjectImportException {
    XContentParser parser = null;
    try {
        IndexRequest indexRequest = new IndexRequest();
        parser = XContentFactory.xContent(line.getBytes()).createParser(line.getBytes());
        Token token;
        XContentBuilder sourceBuilder = XContentFactory.contentBuilder(XContentType.JSON);
        long ttl = 0;
        while ((token = parser.nextToken()) != Token.END_OBJECT) {
            if (token == XContentParser.Token.FIELD_NAME) {
                String fieldName = parser.currentName();
                token = parser.nextToken();
                if (fieldName.equals(IdFieldMapper.NAME) && token == Token.VALUE_STRING) {
                    indexRequest.id(parser.text());
                } else if (fieldName.equals(IndexFieldMapper.NAME) && token == Token.VALUE_STRING) {
                    indexRequest.index(parser.text());
                } else if (fieldName.equals(TypeFieldMapper.NAME) && token == Token.VALUE_STRING) {
                    indexRequest.type(parser.text());
                } else if (fieldName.equals(RoutingFieldMapper.NAME) && token == Token.VALUE_STRING) {
                    indexRequest.routing(parser.text());
                } else if (fieldName.equals(TimestampFieldMapper.NAME) && token == Token.VALUE_NUMBER) {
                    indexRequest.timestamp(String.valueOf(parser.longValue()));
                } else if (fieldName.equals(TTLFieldMapper.NAME) && token == Token.VALUE_NUMBER) {
                    ttl = parser.longValue();
                } else if (fieldName.equals("_version") && token == Token.VALUE_NUMBER) {
                    indexRequest.version(parser.longValue());
                    indexRequest.versionType(VersionType.EXTERNAL);
                } else if (fieldName.equals(SourceFieldMapper.NAME) && token == Token.START_OBJECT) {
                    sourceBuilder.copyCurrentStructure(parser);
                }
            } else if (token == null) {
                break;
            }
        }
        if (ttl > 0) {
            String ts = indexRequest.timestamp();
            long start;
            if (ts != null) {
                start = Long.valueOf(ts);
            } else {
                start = new Date().getTime();
            }
            ttl = ttl - start;
            if (ttl > 0) {
                indexRequest.ttl(ttl);
            } else {
                // object is invalid, do not import
                return null;
            }
        }
        indexRequest.source(sourceBuilder);
        return indexRequest;
    } catch (ElasticSearchParseException e) {
        throw new ObjectImportException(e);
    } catch (IOException e) {
        throw new ObjectImportException(e);
    }
}
Example 4
Project: elassandra-master  File: SimpleIndexQueryParserTests.java View source code
// https://github.com/elasticsearch/elasticsearch/issues/7240
@Test
public void testEmptyBooleanQueryInsideFQuery() throws Exception {
    IndexQueryParserService queryParser = queryParser();
    String query = copyToStringFromClasspath("/org/elasticsearch/index/query/fquery-with-empty-bool-query.json");
    XContentParser parser = XContentHelper.createParser(new BytesArray(query));
    ParsedQuery parsedQuery = queryParser.parseInnerFilter(parser);
    assertEquals(new ConstantScoreQuery(Queries.filtered(new TermQuery(new Term("text", "apache")), new TermQuery(new Term("text", "apache")))), parsedQuery.query());
}
Example 5
Project: elasticsearch-knapsack-master  File: KnapsackService.java View source code
private List<KnapsackState> get(String name) throws IOException {
    ImmutableList.Builder<KnapsackState> builder = ImmutableList.builder();
    try {
        logger.debug("get knapsack states: {}", name);
        final Client client = injector.getInstance(Client.class);
        createIndexIfNotExist(client);
        GetResponse getResponse = client.prepareGet(INDEX_NAME, MAPPING_NAME, name).execute().actionGet();
        if (!getResponse.isExists()) {
            return builder.build();
        }
        XContentParser parser = xContent(JSON).createParser(getResponse.getSourceAsBytes());
        while (parser.nextToken() != START_ARRAY) {
        // forward
        }
        while (parser.nextToken() != END_ARRAY) {
            KnapsackState state = new KnapsackState();
            builder.add(state.fromXContent(parser));
        }
        return builder.build();
    } catch (Throwable t) {
        logger.error("get settings failed", t);
        return null;
    }
}
Example 6
Project: elasticsearch-computed-fields-master  File: CompletionFieldHelper.java View source code
public static void parse(String value, final CompletionFieldMapper mapper, final Document doc) throws IOException {
    XContentParser parser = null;
    if (!value.startsWith("{") && !value.endsWith("}")) {
        value = "[\"" + value + "\"]";
        parser = JsonXContent.jsonXContent.createParser(value);
        parser.nextToken();
    } else
        parser = JsonXContent.jsonXContent.createParser(value);
    parser.nextToken();
    XContentParser.Token token = parser.currentToken();
    String surfaceForm = null;
    BytesRef payload = null;
    long weight = -1;
    List<String> inputs = Lists.newArrayListWithExpectedSize(4);
    if (token == XContentParser.Token.VALUE_STRING) {
        inputs.add(parser.text());
    } else {
        String currentFieldName = null;
        while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
            if (token == XContentParser.Token.FIELD_NAME) {
                currentFieldName = parser.currentName();
                if (!CompletionFieldMapper.ALLOWED_CONTENT_FIELD_NAMES.contains(currentFieldName)) {
                    throw new ElasticsearchIllegalArgumentException("Unknown field name[" + currentFieldName + "], must be one of " + CompletionFieldMapper.ALLOWED_CONTENT_FIELD_NAMES);
                }
            } else if (Fields.CONTENT_FIELD_NAME_PAYLOAD.equals(currentFieldName)) {
                if (!mapper.isStoringPayloads()) {
                    throw new MapperException("Payloads disabled in mapping");
                }
                if (token == XContentParser.Token.START_OBJECT) {
                    XContentBuilder payloadBuilder = XContentFactory.contentBuilder(parser.contentType()).copyCurrentStructure(parser);
                    payload = payloadBuilder.bytes().toBytesRef();
                    payloadBuilder.close();
                } else if (token.isValue()) {
                    payload = parser.bytesOrNull();
                } else {
                    throw new MapperException("payload doesn't support type " + token);
                }
            } else if (token == XContentParser.Token.VALUE_STRING) {
                if (Fields.CONTENT_FIELD_NAME_OUTPUT.equals(currentFieldName)) {
                    surfaceForm = parser.text();
                }
                if (Fields.CONTENT_FIELD_NAME_INPUT.equals(currentFieldName)) {
                    inputs.add(parser.text());
                }
            } else if (token == XContentParser.Token.VALUE_NUMBER) {
                if (Fields.CONTENT_FIELD_NAME_WEIGHT.equals(currentFieldName)) {
                    NumberType numberType = parser.numberType();
                    if (NumberType.LONG != numberType && NumberType.INT != numberType) {
                        throw new ElasticsearchIllegalArgumentException("Weight must be an integer, but was [" + parser.numberValue() + "]");
                    }
                    // always parse a long to make sure we don't get the overflow value
                    weight = parser.longValue();
                    if (weight < 0 || weight > Integer.MAX_VALUE) {
                        throw new ElasticsearchIllegalArgumentException("Weight must be in the interval [0..2147483647], but was [" + weight + "]");
                    }
                }
            } else if (token == XContentParser.Token.START_ARRAY) {
                if (Fields.CONTENT_FIELD_NAME_INPUT.equals(currentFieldName)) {
                    while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
                        inputs.add(parser.text());
                    }
                }
            }
        }
    }
    payload = payload == null ? EMPTY : payload;
    if (surfaceForm == null) {
        // no surface form use the input
        for (String input : inputs) {
            BytesRef suggestPayload = mapper.buildPayload(new BytesRef(input), weight, payload);
            doc.add(mapper.getCompletionField(input, suggestPayload));
        }
    } else {
        BytesRef suggestPayload = mapper.buildPayload(new BytesRef(surfaceForm), weight, payload);
        for (String input : inputs) {
            doc.add(mapper.getCompletionField(input, suggestPayload));
        }
    }
    parser.close();
}
Example 7
Project: elasticsearch-master  File: BlobStoreRepository.java View source code
@Override
public RepositoryData getRepositoryData() {
    try {
        final long indexGen = latestIndexBlobId();
        final String snapshotsIndexBlobName = INDEX_FILE_PREFIX + Long.toString(indexGen);
        RepositoryData repositoryData;
        try (InputStream blob = snapshotsBlobContainer.readBlob(snapshotsIndexBlobName)) {
            BytesStreamOutput out = new BytesStreamOutput();
            Streams.copy(blob, out);
            // EMPTY is safe here because RepositoryData#fromXContent calls namedObject
            try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, out.bytes())) {
                repositoryData = RepositoryData.snapshotsFromXContent(parser, indexGen);
            } catch (NotXContentException e) {
                logger.warn("[{}] index blob is not valid x-content [{} bytes]", snapshotsIndexBlobName, out.bytes().length());
                throw e;
            }
        }
        // now load the incompatible snapshot ids, if they exist
        try (InputStream blob = snapshotsBlobContainer.readBlob(INCOMPATIBLE_SNAPSHOTS_BLOB)) {
            BytesStreamOutput out = new BytesStreamOutput();
            Streams.copy(blob, out);
            try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, out.bytes())) {
                repositoryData = repositoryData.incompatibleSnapshotsFromXContent(parser);
            }
        } catch (NoSuchFileException e) {
            if (isReadOnly()) {
                logger.debug("[{}] Incompatible snapshots blob [{}] does not exist, the likely " + "reason is that there are no incompatible snapshots in the repository", metadata.name(), INCOMPATIBLE_SNAPSHOTS_BLOB);
            } else {
                writeIncompatibleSnapshots(RepositoryData.EMPTY);
            }
        }
        return repositoryData;
    } catch (NoSuchFileException ex) {
        return RepositoryData.EMPTY;
    } catch (IOException ioe) {
        throw new RepositoryException(metadata.name(), "could not read repository data from index blob", ioe);
    }
}
Example 8
Project: crate-master  File: SQLTransportExecutor.java View source code
private Object jsonToObject(String json) {
    try {
        if (json != null) {
            byte[] bytes = json.getBytes(StandardCharsets.UTF_8);
            XContentParser parser = JsonXContent.jsonXContent.createParser(bytes);
            if (bytes.length >= 1 && bytes[0] == '[') {
                parser.nextToken();
                return recursiveListToArray(parser.list());
            } else {
                return parser.mapOrdered();
            }
        } else {
            return null;
        }
    } catch (IOException e) {
        throw Throwables.propagate(e);
    }
}
Example 9
Project: elasticsearch-langdetect-master  File: LangdetectMapper.java View source code
@Override
protected void parseCreateField(ParseContext context, List<IndexableField> fields) throws IOException {
    if (context.externalValueSet()) {
        return;
    }
    XContentParser parser = context.parser();
    if (parser.currentToken() == XContentParser.Token.VALUE_NULL) {
        return;
    }
    String value = fieldType().nullValueAsString();
    if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
        XContentParser.Token token;
        String currentFieldName = null;
        while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
            if (token == XContentParser.Token.FIELD_NAME) {
                currentFieldName = parser.currentName();
            } else {
                if ("value".equals(currentFieldName) || "_value".equals(currentFieldName)) {
                    value = parser.textOrNull();
                }
            }
        }
    } else {
        value = parser.textOrNull();
    }
    if (langdetectService.getSettings().getAsBoolean("binary", false)) {
        try {
            byte[] b = parser.binaryValue();
            if (b != null && b.length > 0) {
                value = new String(b, StandardCharsets.UTF_8);
            }
        } catch (JsonParseException e) {
            logger.trace(e.getMessage(), e);
        } catch (Exception e) {
            logger.error(e.getMessage(), e);
        }
    }
    try {
        List<Language> langs = langdetectService.detectAll(value);
        for (Language lang : langs) {
            Field field = new Field(fieldType().name(), lang.getLanguage(), fieldType());
            fields.add(field);
            if (languageTo.languageToFields().containsKey(lang.getLanguage())) {
                parseLanguageToFields(context, languageTo.languageToFields().get(lang.getLanguage()));
            }
        }
    } catch (LanguageDetectionException e) {
        logger.trace(e.getMessage(), e);
        context.createExternalValueContext("unknown");
    }
}
Example 10
Project: elasticsearch-plugin-bundle-master  File: LangdetectMapper.java View source code
@Override
protected void parseCreateField(ParseContext context, List<IndexableField> fields) throws IOException {
    if (context.externalValueSet()) {
        return;
    }
    XContentParser parser = context.parser();
    if (parser.currentToken() == XContentParser.Token.VALUE_NULL) {
        return;
    }
    String value = fieldType().nullValueAsString();
    if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
        XContentParser.Token token;
        String currentFieldName = null;
        while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
            if (token == XContentParser.Token.FIELD_NAME) {
                currentFieldName = parser.currentName();
            } else {
                if ("value".equals(currentFieldName) || "_value".equals(currentFieldName)) {
                    value = parser.textOrNull();
                }
            }
        }
    } else {
        value = parser.textOrNull();
    }
    if (langdetectService.getSettings().getAsBoolean("binary", false)) {
        try {
            byte[] b = parser.binaryValue();
            if (b != null && b.length > 0) {
                value = new String(b, StandardCharsets.UTF_8);
            }
        } catch (JsonParseException e) {
            logger.trace(e.getMessage(), e);
        } catch (Exception e) {
            logger.error(e.getMessage(), e);
        }
    }
    try {
        List<Language> langs = langdetectService.detectAll(value);
        for (Language lang : langs) {
            Field field = new Field(fieldType().name(), lang.getLanguage(), fieldType());
            fields.add(field);
            if (languageTo.languageToFields().containsKey(lang.getLanguage())) {
                parseLanguageToFields(context, languageTo.languageToFields().get(lang.getLanguage()));
            }
        }
    } catch (LanguageDetectionException e) {
        logger.trace(e.getMessage(), e);
        context.createExternalValueContext("unknown");
    }
}
Example 11
Project: elasticsearch-server-master  File: ObjectMapper.java View source code
public void parse(ParseContext context) throws IOException {
    if (!enabled) {
        context.parser().skipChildren();
        return;
    }
    XContentParser parser = context.parser();
    String currentFieldName = parser.currentName();
    XContentParser.Token token = parser.currentToken();
    if (token == XContentParser.Token.VALUE_NULL) {
        // the object is null ("obj1" : null), simply bail
        return;
    }
    if (token.isValue() && !allowValue()) {
        // is a field name with the same name as the type
        throw new MapperParsingException("object mapping for [" + name + "] tried to parse as object, but found a concrete value");
    }
    Document restoreDoc = null;
    if (nested.isNested()) {
        Document nestedDoc = new Document();
        // pre add the uid field if possible (id was already provided)
        Fieldable uidField = context.doc().getFieldable(UidFieldMapper.NAME);
        if (uidField != null) {
            // this is a deeply nested field
            if (uidField.stringValue() != null) {
                nestedDoc.add(new Field(UidFieldMapper.NAME, uidField.stringValue(), Field.Store.NO, Field.Index.NOT_ANALYZED));
            } else {
                nestedDoc.add(new Field(UidFieldMapper.NAME, ((UidField) uidField).uid(), Field.Store.NO, Field.Index.NOT_ANALYZED));
            }
        }
        // the type of the nested doc starts with __, so we can identify that its a nested one in filters
        // note, we don't prefix it with the type of the doc since it allows us to execute a nested query
        // across types (for example, with similar nested objects)
        nestedDoc.add(new Field(TypeFieldMapper.NAME, nestedTypePath, Field.Store.NO, Field.Index.NOT_ANALYZED));
        restoreDoc = context.switchDoc(nestedDoc);
        context.addDoc(nestedDoc);
    }
    ContentPath.Type origPathType = context.path().pathType();
    context.path().pathType(pathType);
    // if we are at the end of the previous object, advance
    if (token == XContentParser.Token.END_OBJECT) {
        token = parser.nextToken();
    }
    if (token == XContentParser.Token.START_OBJECT) {
        // if we are just starting an OBJECT, advance, this is the object we are parsing, we need the name first
        token = parser.nextToken();
    }
    while (token != XContentParser.Token.END_OBJECT) {
        if (token == XContentParser.Token.START_OBJECT) {
            serializeObject(context, currentFieldName);
        } else if (token == XContentParser.Token.START_ARRAY) {
            serializeArray(context, currentFieldName);
        } else if (token == XContentParser.Token.FIELD_NAME) {
            currentFieldName = parser.currentName();
        } else if (token == XContentParser.Token.VALUE_NULL) {
            serializeNullValue(context, currentFieldName);
        } else if (token == null) {
            throw new MapperParsingException("object mapping for [" + name + "] tried to parse as object, but got EOF, has a concrete value been provided to it?");
        } else if (token.isValue()) {
            serializeValue(context, currentFieldName, token);
        }
        token = parser.nextToken();
    }
    // restore the enable path flag
    context.path().pathType(origPathType);
    if (nested.isNested()) {
        Document nestedDoc = context.switchDoc(restoreDoc);
        if (nested.isIncludeInParent()) {
            for (Fieldable field : nestedDoc.getFields()) {
                if (field.name().equals(UidFieldMapper.NAME) || field.name().equals(TypeFieldMapper.NAME)) {
                    continue;
                } else {
                    context.doc().add(field);
                }
            }
        }
        if (nested.isIncludeInRoot()) {
            // don't add it twice, if its included in parent, and we are handling the master doc...
            if (!(nested.isIncludeInParent() && context.doc() == context.rootDoc())) {
                for (Fieldable field : nestedDoc.getFields()) {
                    if (field.name().equals(UidFieldMapper.NAME) || field.name().equals(TypeFieldMapper.NAME)) {
                        continue;
                    } else {
                        context.rootDoc().add(field);
                    }
                }
            }
        }
    }
}
Example 12
Project: elasticsearch-skywalker-master  File: Skywalker.java View source code
@Nullable
private static IndexMetaData loadIndex(List<File> files, String index, NodeEnvironment nodeEnv) {
    long highestVersion = -1;
    IndexMetaData indexMetaData = null;
    for (File indexLocation : nodeEnv.indexLocations(new Index(index))) {
        File stateDir = new File(indexLocation, "_state");
        if (!stateDir.exists() || !stateDir.isDirectory()) {
            continue;
        }
        // now, iterate over the current versions, and find latest one
        File[] stateFiles = stateDir.listFiles();
        if (stateFiles == null) {
            continue;
        }
        for (File stateFile : stateFiles) {
            if (!stateFile.getName().startsWith("state-")) {
                continue;
            }
            files.add(stateFile);
            try {
                long version = Long.parseLong(stateFile.getName().substring("state-".length()));
                if (version > highestVersion) {
                    byte[] data = Streams.copyToByteArray(new FileInputStream(stateFile));
                    if (data.length == 0) {
                        continue;
                    }
                    XContentParser parser = null;
                    try {
                        parser = XContentHelper.createParser(data, 0, data.length);
                        // move to START_OBJECT
                        parser.nextToken();
                        indexMetaData = IndexMetaData.Builder.fromXContent(parser);
                        highestVersion = version;
                    } finally {
                        if (parser != null) {
                            parser.close();
                        }
                    }
                }
            } catch (Exception e) {
                continue;
            }
        }
    }
    return indexMetaData;
}
Example 13
Project: elasticsearch-suggest-plugin-master  File: RestSuggestActionTest.java View source code
private void assertThatResponseHasNoShardFailures(Response r) throws IOException {
    XContentParser parser = JsonXContent.jsonXContent.createParser(r.getResponseBody());
    Map<String, Object> jsonResponse = parser.mapAndClose();
    assertThat(jsonResponse, hasKey("_shards"));
    Map<String, Object> shardResponse = (Map<String, Object>) jsonResponse.get("_shards");
    assertThat(shardResponse, not(hasKey("failures")));
}
Example 14
Project: es-token-plugin-master  File: PrepareSpecTests.java View source code
private MappingMetaData getMappingMetaData() throws IOException {
    XContentBuilder mapping = jsonBuilder();
    mapping.startObject();
    mapping.startObject("type");
    mapping.startObject("properties");
    mapping.startObject("text");
    mapping.field("type", "string");
    mapping.endObject();
    mapping.endObject();
    mapping.endObject();
    mapping.endObject();
    XContentParser parser = XContentFactory.xContent(mapping.bytes()).createParser(mapping.bytes());
    return new MappingMetaData("type", parser.mapOrdered());
}
Example 15
Project: structured-content-tools-master  File: TestUtils.java View source code
/**
	 * Read JSON file from classpath into Map of Map structure.
	 * 
	 * @param filePath path in classpath pointing to JSON file to read
	 * @return parsed JSON file
	 * @throws SettingsException
	 */
public static Map<String, Object> loadJSONFromClasspathFile(String filePath) throws SettingsException {
    XContentParser parser = null;
    try {
        parser = XContentFactory.xContent(XContentType.JSON).createParser(TestUtils.class.getResourceAsStream(filePath));
        return parser.mapAndClose();
    } catch (IOException e) {
        throw new SettingsException(e.getMessage(), e);
    } finally {
        if (parser != null)
            parser.close();
    }
}
Example 16
Project: crawl2-master  File: RegexQueryParser.java View source code
@Override
public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
    XContentParser parser = parseContext.parser();
    XContentParser.Token token = parser.nextToken();
    assert token == XContentParser.Token.FIELD_NAME;
    String fieldName = parser.currentName();
    String value = null;
    float boost = 1.0f;
    token = parser.nextToken();
    if (token == XContentParser.Token.START_OBJECT) {
        String currentFieldName = null;
        while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
            if (token == XContentParser.Token.FIELD_NAME) {
                currentFieldName = parser.currentName();
            } else {
                if ("term".equals(currentFieldName)) {
                    value = parser.text();
                } else if ("value".equals(currentFieldName)) {
                    value = parser.text();
                } else if ("boost".equals(currentFieldName)) {
                    boost = parser.floatValue();
                }
            }
        }
        parser.nextToken();
    } else {
        value = parser.text();
        // move to the next token
        parser.nextToken();
    }
    if (value == null) {
        throw new QueryParsingException(index, "No value specified for regex query");
    }
    RegexQuery query = new RegexQuery(new Term(fieldName, value));
    //JakartaRegexpCapabilities capabilities = new JakartaRegexpCapabilities();
    JavaUtilRegexCapabilities capabilites = new JavaUtilRegexCapabilities(JavaUtilRegexCapabilities.FLAG_CASE_INSENSITIVE + JavaUtilRegexCapabilities.FLAG_DOTALL);
    query.setRegexImplementation(capabilites);
    query.setBoost(boost);
    logger.info(fieldName + ":" + value);
    logger.info("???");
    logger.info(query.getRegexImplementation().getClass().toString());
    logger.info("___");
    logger.info("terms number : " + query.getTotalNumberOfTerms());
    logger.info("rewrite method : " + query.getRewriteMethod());
    return query;
}
Example 17
Project: elasticsearch-aggregation-geoclustering-master  File: GeoHashClusteringParser.java View source code
@Override
public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
    ValuesSourceParser vsParser = ValuesSourceParser.geoPoint(aggregationName, InternalGeoHashClustering.TYPE, context).build();
    int zoom = DEFAULT_ZOOM;
    int distance = DEFAULT_DISTANCE;
    XContentParser.Token token;
    String currentFieldName = null;
    while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
        if (token == XContentParser.Token.FIELD_NAME) {
            currentFieldName = parser.currentName();
        } else if (vsParser.token(currentFieldName, token, parser)) {
            continue;
        } else if (token == XContentParser.Token.VALUE_NUMBER) {
            if ("zoom".equals(currentFieldName)) {
                zoom = parser.intValue();
            } else if ("distance".equals(currentFieldName)) {
                distance = parser.intValue();
            }
        }
    }
    return new GeoGridFactory(aggregationName, vsParser.config(), zoom, distance);
}
Example 18
Project: elasticsearch-approx-plugin-master  File: DateFacetParser.java View source code
@Override
public FacetExecutor parse(final String facetName, final XContentParser parser, final SearchContext context) throws IOException {
    String keyField = null;
    String distinctField = null;
    String valueField = null;
    String sliceField = null;
    //        final String valueScript = null;
    //        String scriptLang = null;
    //        Map<String, Object> params = null;
    String interval = null;
    DateTimeZone preZone = DateTimeZone.UTC;
    DateTimeZone postZone = DateTimeZone.UTC;
    boolean preZoneAdjustLargeInterval = false;
    long preOffset = 0;
    long postOffset = 0;
    float factor = 1.0f;
    final Chronology chronology = ISOChronology.getInstanceUTC();
    XContentParser.Token token;
    String fieldName = null;
    int exactThreshold = 1000;
    while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
        if (token == XContentParser.Token.FIELD_NAME) {
            fieldName = parser.currentName();
        } else if (token == XContentParser.Token.START_OBJECT) {
        //                if("params".equals(fieldName)) {
        //                    params = parser.map();
        //                }
        } else if (token.isValue()) {
            if ("field".equals(fieldName)) {
                keyField = parser.text();
            } else if ("key_field".equals(fieldName) || "keyField".equals(fieldName)) {
                keyField = parser.text();
            } else if ("value_field".equals(fieldName) || "valueField".equals(fieldName)) {
                valueField = parser.text();
            } else if ("distinct_field".equals(fieldName) || "distinctField".equals(fieldName)) {
                distinctField = parser.text();
            } else if ("slice_field".equals(fieldName) || "sliceField".equals(fieldName)) {
                sliceField = parser.text();
            } else if ("interval".equals(fieldName)) {
                interval = parser.text();
            } else if ("time_zone".equals(fieldName) || "timeZone".equals(fieldName)) {
                preZone = parseZone(parser, token);
            } else if ("pre_zone".equals(fieldName) || "preZone".equals(fieldName)) {
                preZone = parseZone(parser, token);
            } else if ("pre_zone_adjust_large_interval".equals(fieldName) || "preZoneAdjustLargeInterval".equals(fieldName)) {
                preZoneAdjustLargeInterval = parser.booleanValue();
            } else if ("post_zone".equals(fieldName) || "postZone".equals(fieldName)) {
                postZone = parseZone(parser, token);
            } else if ("pre_offset".equals(fieldName) || "preOffset".equals(fieldName)) {
                preOffset = parseOffset(parser.text());
            } else if ("post_offset".equals(fieldName) || "postOffset".equals(fieldName)) {
                postOffset = parseOffset(parser.text());
            } else if ("factor".equals(fieldName)) {
                factor = parser.floatValue();
            /*
                    } else if("value_script".equals(fieldName) || "valueScript".equals(fieldName)) {
                    valueScript = parser.text();
                    */
            //                } else if("lang".equals(fieldName)) {
            //                    scriptLang = parser.text();
            } else if ("exact_threshold".equals(fieldName) || "exactThreshold".equals(fieldName)) {
                exactThreshold = parser.intValue();
            }
        }
    }
    if (valueField != null && distinctField != null)
        throw new FacetPhaseExecutionException(facetName, "[value_field] and [distinct_field] may not be used together");
    if (interval == null) {
        throw new FacetPhaseExecutionException(facetName, "[interval] is required to be set for histogram facet");
    }
    TimeZoneRounding.Builder tzRoundingBuilder;
    final DateFieldParser fieldParser = dateFieldParsers.get(interval);
    if (fieldParser != null) {
        tzRoundingBuilder = TimeZoneRounding.builder(fieldParser.parse(chronology));
    } else {
        // the interval is a time value?
        tzRoundingBuilder = TimeZoneRounding.builder(TimeValue.parseTimeValue(interval, null));
    }
    final TimeZoneRounding tzRounding = tzRoundingBuilder.preZone(preZone).postZone(postZone).preZoneAdjustLargeInterval(preZoneAdjustLargeInterval).preOffset(preOffset).postOffset(postOffset).factor(factor).build();
    final TypedFieldData keyFieldData = getKeyFieldData(keyField, context);
    if (keyFieldData == null)
        throw new FacetPhaseExecutionException(facetName, "[key_field] is required to be set for distinct date histogram facet");
    final TypedFieldData valueFieldData = getFieldData(valueField, context);
    final TypedFieldData distinctFieldData = getFieldData(distinctField, context);
    final TypedFieldData sliceFieldData = getFieldData(sliceField, context);
    return new DateFacetExecutor(keyFieldData, valueFieldData, distinctFieldData, sliceFieldData, tzRounding, exactThreshold);
// TODO implement scripts
/*
        if (valueScript != null) {
            SearchScript script = context.scriptService().search(context.lookup(), scriptLang, valueScript, params);
            return new ValueScriptDateHistogramFacetExecutor(keyIndexFieldData, script, tzRounding, comparatorType);
        } else if (valueField != null) {
            FieldMapper valueMapper = context.smartNameFieldMapper(valueField);
            if (valueMapper == null) {
                throw new FacetPhaseExecutionException(facetName, "(value) field [" + valueField + "] not found");
            }
            IndexNumericFieldData valueIndexFieldData = context.fieldData().getForField(valueMapper);
            return new ValueDateHistogramFacetExecutor(keyIndexFieldData, valueIndexFieldData, tzRounding, comparatorType);
        } else {
            return new CountDateHistogramFacetExecutor(keyIndexFieldData, tzRounding, comparatorType);
        }
        */
//            if(distinctFieldMapper.fieldDataType().getType().equals("string")) {
//                final PagedBytesIndexFieldData distinctFieldData = context.fieldData().getForField(distinctFieldMapper);
//                final LongArrayIndexFieldData keyIndexFieldData = context.fieldData().getForField(keyMapper);
//                return new StringDistinctDateHistogramFacetExecutor(
//                        keyIndexFieldData, distinctFieldData, tzRounding, comparatorType, maxExactPerShard);
//            } else if(distinctFieldMapper.fieldDataType().getType().equals("long")
//                    || distinctFieldMapper.fieldDataType().getType().equals("int")
//                    || distinctFieldMapper.fieldDataType().getType().equals("short")
//                    || distinctFieldMapper.fieldDataType().getType().equals("byte")) {
//                final IndexNumericFieldData distinctFieldData = context.fieldData().getForField(distinctFieldMapper);
//                final LongArrayIndexFieldData keyIndexFieldData = context.fieldData().getForField(keyMapper);
//                return new LongDistinctDateHistogramFacetExecutor(
//                        keyIndexFieldData, distinctFieldData, tzRounding, comparatorType, maxExactPerShard);
//            } else {
//                throw new FacetPhaseExecutionException(facetName, "distinct field [" + distinctField + "] is not of type string or long");
//            }
}
Example 19
Project: elasticsearch-functionscore-conditionalboost-master  File: CondBoostFactorFunctionParser.java View source code
@Override
public ScoreFunction parse(QueryParseContext parseContext, XContentParser parser) throws IOException, QueryParsingException {
    String currentFieldName = null;
    List<CondBoostEntry> condArray = new LinkedList<>();
    float defaultBoost = 1.0f;
    float boostFactor = 1.0f;
    CondBoostFactorFunction.Modifier modifier = CondBoostFactorFunction.Modifier.NONE;
    XContentParser.Token token;
    while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
        if (token == XContentParser.Token.FIELD_NAME) {
            currentFieldName = parser.currentName();
        } else if (token == XContentParser.Token.START_ARRAY) {
            condArray = parseCondArray(parseContext, parser, currentFieldName);
        } else if (token.isValue()) {
            if (currentFieldName != null) {
                switch(currentFieldName) {
                    case CondBoostEntry.BOOST:
                        defaultBoost = parser.floatValue();
                        break;
                    case "factor":
                        boostFactor = parser.floatValue();
                        break;
                    case "modifier":
                        modifier = CondBoostFactorFunction.Modifier.valueOf(parser.text().toUpperCase(Locale.ROOT));
                        break;
                    default:
                        throw new QueryParsingException(parseContext, NAMES[0] + " query does not support [" + currentFieldName + "]");
                }
            }
        }
    }
    return new CondBoostFactorFunction(parseContext, condArray, defaultBoost, boostFactor, modifier);
}
Example 20
Project: flume-ng-elasticsearch-serializer-num-master  File: ContentBuilderUtil.java View source code
public static void addComplexField(XContentBuilder builder, String fieldName, XContentType contentType, byte[] data) throws IOException {
    XContentParser parser = null;
    try {
        XContentBuilder tmp = jsonBuilder();
        parser = XContentFactory.xContent(contentType).createParser(data);
        parser.nextToken();
        tmp.copyCurrentStructure(parser);
        builder.field(fieldName, tmp);
    } catch (JsonParseException ex) {
        addSimpleField(builder, fieldName, data);
    } finally {
        if (parser != null) {
            parser.close();
        }
    }
}
Example 21
Project: mt-flume-master  File: ContentBuilderUtil.java View source code
public static void addComplexField(XContentBuilder builder, String fieldName, XContentType contentType, byte[] data) throws IOException {
    XContentParser parser = null;
    try {
        XContentBuilder tmp = jsonBuilder();
        parser = XContentFactory.xContent(contentType).createParser(data);
        parser.nextToken();
        tmp.copyCurrentStructure(parser);
        builder.field(fieldName, tmp);
    } catch (JsonParseException ex) {
        addSimpleField(builder, fieldName, data);
    } finally {
        if (parser != null) {
            parser.close();
        }
    }
}
Example 22
Project: significance-showcase-master  File: SignificanceShowcaseRestHandler.java View source code
@Override
public void handleRequest(final RestRequest request, final RestChannel channel) throws ExecutionException, IOException {
    String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
    String[] types = Strings.splitStringByCommaToArray(request.param("type"));
    BytesReference data = request.content();
    XContent xContent = XContentFactory.xContent(data);
    XContentParser parser = xContent.createParser(data);
    XContentParser.Token token;
    // default values
    String query = "{\"match_all\":{}}";
    String field = "_all";
    int size = 20;
    String currentFieldName = null;
    while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
        if (token == XContentParser.Token.FIELD_NAME) {
            currentFieldName = parser.currentName();
        } else if ("query".equals(currentFieldName)) {
            if (token == XContentParser.Token.START_OBJECT && !parser.hasTextCharacters()) {
                XContentBuilder builder = XContentBuilder.builder(parser.contentType().xContent());
                builder.copyCurrentStructure(parser);
                query = builder.string();
            } else {
                query = parser.text();
            }
        } else if ("size".equals(currentFieldName)) {
            size = parser.intValue();
        } else if ("field".equals(currentFieldName)) {
            field = parser.text();
        }
    }
    this.significantTermsProvider.writeSignificantTerms(channel, indices, types, field, size, query);
}
Example 23
Project: elasticsearch-analysis-hashsplitter-master  File: HashSplitterWildcardQueryParser.java View source code
@Override
public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
    XContentParser parser = parseContext.parser();
    XContentParser.Token token = parser.nextToken();
    if (token != XContentParser.Token.FIELD_NAME) {
        throw new QueryParsingException(parseContext.index(), "[" + NAME + "] query malformed, no field");
    }
    String fieldName = parser.currentName();
    String rewriteMethod = null;
    String value = null;
    float boost = 1.0f;
    token = parser.nextToken();
    if (token == XContentParser.Token.START_OBJECT) {
        String currentFieldName = null;
        while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
            if (token == XContentParser.Token.FIELD_NAME) {
                currentFieldName = parser.currentName();
            } else {
                if ("wildcard".equals(currentFieldName)) {
                    value = parser.text();
                } else if ("value".equals(currentFieldName)) {
                    value = parser.text();
                } else if ("boost".equals(currentFieldName)) {
                    boost = parser.floatValue();
                } else if ("rewrite".equals(currentFieldName)) {
                    rewriteMethod = parser.textOrNull();
                } else {
                    throw new QueryParsingException(parseContext.index(), "[" + NAME + "] query does not support [" + currentFieldName + "]");
                }
            }
        }
        parser.nextToken();
    } else {
        value = parser.text();
        parser.nextToken();
    }
    if (value == null) {
        throw new QueryParsingException(parseContext.index(), "No value specified for " + NAME + " query");
    }
    Query query = null;
    MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);
    if (smartNameFieldMappers != null && smartNameFieldMappers.hasMapper()) {
        FieldMapper mapper = smartNameFieldMappers.mapper();
        if (mapper != null && mapper instanceof CustomWildcardSearchFieldMapper) {
            CustomWildcardSearchFieldMapper hashsplitterMapper = (CustomWildcardSearchFieldMapper) mapper;
            query = hashsplitterMapper.wildcardQuery(value, QueryParsers.parseRewriteMethod(rewriteMethod), parseContext);
        }
        if (query == null) {
            // not associated with a HashSplitterFieldMapper OR wildcardQuery() returned null
            // Fallback on the same code as org.elasticsearch.index.query.WildcardQueryParser
            fieldName = smartNameFieldMappers.mapper().names().indexName();
            value = smartNameFieldMappers.mapper().indexedValue(value);
        }
    }
    if (query == null) {
        WildcardQuery q = new WildcardQuery(new Term(fieldName, value));
        q.setRewriteMethod(QueryParsers.parseRewriteMethod(rewriteMethod));
        query = q;
    }
    query.setBoost(boost);
    return wrapSmartNameQuery(query, smartNameFieldMappers, parseContext);
}
Example 24
Project: elasticsearch-facet-script-master  File: ScriptFacetParser.java View source code
@Override
public FacetExecutor parse(String facetName, XContentParser parser, SearchContext context) throws IOException {
    String initScript = null;
    String mapScript = null;
    String combineScript = null;
    String reduceScript = null;
    String scriptLang = null;
    Map<String, Object> params = null;
    Map<String, Object> reduceParams = null;
    XContentParser.Token token;
    String fieldName = null;
    while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
        if (token == XContentParser.Token.FIELD_NAME) {
            fieldName = parser.currentName();
        } else if (token == XContentParser.Token.START_OBJECT) {
            if ("params".equals(fieldName)) {
                params = parser.map();
            } else if ("reduce_params".equals(fieldName)) {
                reduceParams = parser.map();
            }
        } else if (token.isValue()) {
            if ("init_script".equals(fieldName) || "initScript".equals(fieldName)) {
                initScript = parser.text();
            } else if ("map_script".equals(fieldName) || "mapScript".equals(fieldName)) {
                mapScript = parser.text();
            } else if ("combine_script".equals(fieldName) || "combineScript".equals(fieldName)) {
                combineScript = parser.text();
            } else if ("reduce_script".equals(fieldName) || "reduceScript".equals(fieldName)) {
                reduceScript = parser.text();
            } else if ("lang".equals(fieldName)) {
                scriptLang = parser.text();
            }
        }
    }
    if (mapScript == null) {
        throw new FacetPhaseExecutionException(facetName, "map_script field is required");
    }
    return new ScriptFacetCollector(scriptLang, initScript, mapScript, combineScript, reduceScript, params, reduceParams, context, client);
}
Example 25
Project: elasticshell-master  File: GetIndexTemplateRequestBuilder.java View source code
@Override
@SuppressWarnings("unchecked")
protected XContentBuilder toXContent(ClusterStateRequest request, ClusterStateResponse response, XContentBuilder builder) throws IOException {
    MetaData metaData = response.getState().metaData();
    builder.startObject();
    for (IndexTemplateMetaData indexMetaData : metaData.templates().values()) {
        builder.startObject(indexMetaData.name(), XContentBuilder.FieldCaseConversion.NONE);
        builder.field("template", indexMetaData.template());
        builder.field("order", indexMetaData.order());
        builder.startObject("settings");
        for (Map.Entry<String, String> entry : indexMetaData.settings().getAsMap().entrySet()) {
            builder.field(entry.getKey(), entry.getValue());
        }
        builder.endObject();
        builder.startObject("mappings");
        for (Map.Entry<String, CompressedString> entry : indexMetaData.mappings().entrySet()) {
            byte[] mappingSource = entry.getValue().uncompressed();
            XContentParser parser = XContentFactory.xContent(mappingSource).createParser(mappingSource);
            Map<String, Object> mapping = parser.map();
            if (mapping.size() == 1 && mapping.containsKey(entry.getKey())) {
                // the type name is the root value, reduce it
                mapping = (Map<String, Object>) mapping.get(entry.getKey());
            }
            builder.field(entry.getKey());
            builder.map(mapping);
        }
        builder.endObject();
        builder.endObject();
    }
    builder.endObject();
    return builder;
}
Example 26
Project: flume-master  File: ContentBuilderUtil.java View source code
public static void addComplexField(XContentBuilder builder, String fieldName, XContentType contentType, byte[] data) throws IOException {
    XContentParser parser = null;
    try {
        // Elasticsearch will accept JSON directly but we need to validate that
        // the incoming event is JSON first. Sadly, the elasticsearch JSON parser
        // is a stream parser so we need to instantiate it, parse the event to
        // validate it, then instantiate it again to provide the JSON to
        // elasticsearch.
        // If validation fails then the incoming event is submitted to
        // elasticsearch as plain text.
        parser = XContentFactory.xContent(contentType).createParser(data);
        while (parser.nextToken() != null) {
        }
        ;
        // If the JSON is valid then include it
        parser = XContentFactory.xContent(contentType).createParser(data);
        // Add the field name, but not the value.
        builder.field(fieldName);
        // This will add the whole parsed content as the value of the field.
        builder.copyCurrentStructure(parser);
    } catch (JsonParseException ex) {
        addSimpleField(builder, fieldName, data);
    } finally {
        if (parser != null) {
            parser.close();
        }
    }
}
Example 27
Project: flume-ng-elasticsearch-ser-ex-master  File: ContentBuilderUtilEx.java View source code
public static void addComplexField(XContentBuilder builder, String fieldName, XContentType contentType, byte[] data) throws IOException {
    XContentParser parser = null;
    try {
        parser = XContentFactory.xContent(contentType).createParser(data);
        Map<String, Object> map = parser.map();
        builder.field(fieldName, map);
    } catch (JsonParseException ex) {
        addSimpleField(builder, fieldName, data);
    } finally {
        if (parser != null) {
            parser.close();
        }
    }
}
Example 28
Project: Ingestion-master  File: ContentBuilderUtil.java View source code
public static void addComplexField(XContentBuilder builder, String fieldName, XContentType contentType, byte[] data) throws IOException {
    XContentParser parser = null;
    try {
        // Elasticsearch will accept JSON directly but we need to validate that
        // the incoming event is JSON first. Sadly, the elasticsearch JSON parser
        // is a stream parser so we need to instantiate it, parse the event to
        // validate it, then instantiate it again to provide the JSON to
        // elasticsearch.
        // If validation fails then the incoming event is submitted to
        // elasticsearch as plain text.
        parser = XContentFactory.xContent(contentType).createParser(data);
        while (parser.nextToken() != null) {
        }
        ;
        // If the JSON is valid then include it
        parser = XContentFactory.xContent(contentType).createParser(data);
        // Add the field name, but not the value.
        builder.field(fieldName);
        // This will add the whole parsed content as the value of the field.
        builder.copyCurrentStructure(parser);
    } catch (JsonParseException ex) {
        addSimpleField(builder, fieldName, data);
    } finally {
        if (parser != null) {
            parser.close();
        }
    }
}
Example 29
Project: elasticsearch-image-master  File: ImageMapper.java View source code
@Override
public void parse(ParseContext context) throws IOException {
    byte[] content = null;
    XContentParser parser = context.parser();
    XContentParser.Token token = parser.currentToken();
    if (token == XContentParser.Token.VALUE_STRING) {
        content = parser.binaryValue();
    }
    if (content == null) {
        throw new MapperParsingException("No content is provided.");
    }
    final Boolean useThreadPool = settings.getAsBoolean("index.image.use_thread_pool", true);
    final Boolean ignoreMetadataError = settings.getAsBoolean("index.image.ignore_metadata_error", true);
    BufferedImage img = ImageIO.read(new BytesStreamInput(content, false));
    if (Math.max(img.getHeight(), img.getWidth()) > MAX_IMAGE_DIMENSION) {
        img = ImageUtils.scaleImage(img, MAX_IMAGE_DIMENSION);
    }
    final BufferedImage finalImg = img;
    final Map<FeatureEnum, LireFeature> featureExtractMap = new MapMaker().makeMap();
    // have multiple features, use ThreadPool to process each feature
    if (useThreadPool && features.size() > 1) {
        final CountDownLatch latch = new CountDownLatch(features.size());
        Executor executor = threadPool.generic();
        for (ObjectObjectCursor<FeatureEnum, Map<String, Object>> cursor : features) {
            final FeatureEnum featureEnum = cursor.key;
            executor.execute(new Runnable() {

                @Override
                public void run() {
                    try {
                        LireFeature lireFeature = featureEnum.getFeatureClass().newInstance();
                        lireFeature.extract(finalImg);
                        featureExtractMap.put(featureEnum, lireFeature);
                    } catch (Throwable e) {
                        logger.error("Failed to extract feature from image", e);
                    } finally {
                        latch.countDown();
                    }
                }
            });
        }
        try {
            latch.await();
        } catch (InterruptedException e) {
            logger.debug("Interrupted extract feature from image", e);
            Thread.currentThread().interrupt();
        }
    }
    for (ObjectObjectCursor<FeatureEnum, Map<String, Object>> cursor : features) {
        FeatureEnum featureEnum = cursor.key;
        Map<String, Object> featureMap = cursor.value;
        try {
            LireFeature lireFeature;
            if (featureExtractMap.containsKey(featureEnum)) {
                // already processed
                lireFeature = featureExtractMap.get(featureEnum);
            } else {
                lireFeature = featureEnum.getFeatureClass().newInstance();
                lireFeature.extract(img);
            }
            byte[] parsedContent = lireFeature.getByteArrayRepresentation();
            Mapper featureMapper = featureMappers.get(featureEnum.name());
            context.externalValue(parsedContent);
            featureMapper.parse(context);
            context.doc().add(new BinaryDocValuesField(name() + "." + featureEnum.name(), new BytesRef(parsedContent)));
            // add hash if required
            if (featureMap.containsKey(HASH)) {
                List<String> hashes = (List<String>) featureMap.get(HASH);
                for (String h : hashes) {
                    HashEnum hashEnum = HashEnum.valueOf(h);
                    int[] hashVals = null;
                    if (hashEnum.equals(HashEnum.BIT_SAMPLING)) {
                        hashVals = BitSampling.generateHashes(lireFeature.getDoubleHistogram());
                    } else if (hashEnum.equals(HashEnum.LSH)) {
                        hashVals = LocalitySensitiveHashing.generateHashes(lireFeature.getDoubleHistogram());
                    }
                    String mapperName = featureEnum.name() + "." + HASH + "." + h;
                    Mapper hashMapper = hashMappers.get(mapperName);
                    context.externalValue(SerializationUtils.arrayToString(hashVals));
                    hashMapper.parse(context);
                }
            }
        } catch (Exception e) {
            throw new ElasticsearchImageProcessException("Failed to index feature " + featureEnum.name(), e);
        }
    }
    // process metadata if required
    if (!metadataMappers.isEmpty()) {
        try {
            Metadata metadata = ImageMetadataReader.readMetadata(new BufferedInputStream(new BytesStreamInput(content, false)), false);
            for (Directory directory : metadata.getDirectories()) {
                for (Tag tag : directory.getTags()) {
                    String metadataName = tag.getDirectoryName().toLowerCase().replaceAll("\\s+", "_") + "." + tag.getTagName().toLowerCase().replaceAll("\\s+", "_");
                    if (metadataMappers.containsKey(metadataName)) {
                        Mapper mapper = metadataMappers.get(metadataName);
                        context.externalValue(tag.getDescription());
                        mapper.parse(context);
                    }
                }
            }
        } catch (ImageProcessingException e) {
            logger.error("Failed to extract metadata from image", e);
            if (!ignoreMetadataError) {
                throw new ElasticsearchImageProcessException("Failed to extract metadata from image", e);
            }
        }
    }
}
Example 30
Project: elasticsearch-term-plugin-master  File: TermListFacetParser.java View source code
public FacetExecutor parse(String facetName, XContentParser parser, SearchContext context) throws IOException {
    List<String> fields = null;
    XContentParser.Token token;
    String currentfieldName = null;
    String searchText = null;
    boolean prefix = false;
    boolean caseInsenstive = true;
    boolean sort = true;
    int maxPerShard = 100;
    while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
        if (token == XContentParser.Token.FIELD_NAME) {
            currentfieldName = parser.currentName();
        } else if (token.isValue()) {
            if ("max_per_shard".equals(currentfieldName)) {
                maxPerShard = parser.intValue();
            } else if ("search".equals(currentfieldName)) {
                searchText = parser.text();
            } else if ("prefix".equals(currentfieldName)) {
                prefix = parser.booleanValue();
            } else if ("case_insenstive".equals(currentfieldName)) {
                caseInsenstive = parser.booleanValue();
            } else if ("sort".equals(currentfieldName)) {
                sort = parser.booleanValue();
            }
        } else if (token == XContentParser.Token.START_ARRAY) {
            if ("fields".equals(currentfieldName)) {
                fields = new ArrayList<String>();
                while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
                    fields.add(parser.text());
                }
            }
        }
    }
    //a field is required
    if (fields == null || fields.isEmpty()) {
        throw new FacetPhaseExecutionException(facetName, "fields is required to be set for term list facet, either using [fields]");
    }
    // check fields for correct mapping
    for (String field : fields) {
        final FieldMapper mapper = context.smartNameFieldMapper(field);
        if (mapper == null) {
            logger.warn("No mapping found for Field : {} ", field);
            throw new FacetPhaseExecutionException(facetName, "(key) field [" + field + "] not found");
        }
        if (!"string".equals(mapper.fieldDataType().getType())) {
            logger.warn("No String mapping found for Field : {} ", field);
            throw new FacetPhaseExecutionException(facetName, "No String mapping found for field [" + field + "] not found");
        }
    }
    return new TermListFacetExecutor(facetName, fields, searchText, prefix, context, maxPerShard, caseInsenstive, sort);
}
Example 31
Project: elasticsearch-xml-master  File: XmlXContentGenerator.java View source code
public static void copyCurrentStructure(XContentGenerator generator, XContentParser parser) throws IOException {
    XContentParser.Token t = parser.currentToken();
    // Let's handle field-name separately first
    if (t == XContentParser.Token.FIELD_NAME) {
        generator.writeFieldName(parser.currentName());
        t = parser.nextToken();
    // fall-through to copy the associated value
    }
    switch(t) {
        case START_ARRAY:
            generator.writeStartArray();
            while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
                copyCurrentStructure(generator, parser);
            }
            generator.writeEndArray();
            break;
        case START_OBJECT:
            generator.writeStartObject();
            while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
                copyCurrentStructure(generator, parser);
            }
            generator.writeEndObject();
            break;
        default:
            // others are simple:
            copyCurrentEvent(generator, parser);
    }
}
Example 32
Project: elasticflume-master  File: ElasticSearchSink.java View source code
private void addComplexField(XContentBuilder builder, String fieldName, XContentType contentType, byte[] data) throws IOException {
    XContentParser parser = null;
    try {
        parser = XContentFactory.xContent(contentType).createParser(data);
        parser.nextToken();
        builder.field(fieldName).copyCurrentStructure(parser);
    } finally {
        if (parser != null) {
            parser.close();
        }
    }
}
Example 33
Project: elasticsearch-analysis-german-master  File: LangdetectMapper.java View source code
@Override
public void parse(ParseContext context) throws IOException {
    String content = null;
    XContentParser parser = context.parser();
    XContentParser.Token token = parser.currentToken();
    if (token == XContentParser.Token.VALUE_STRING) {
        content = parser.text();
        if (detector.getSettings().getAsBoolean("binary", false)) {
            try {
                byte[] b = parser.binaryValue();
                if (b != null && b.length > 0) {
                    content = new String(b, Charset.forName("UTF-8"));
                }
            } catch (Exception e) {
            }
        }
    }
    if (content == null) {
        return;
    }
    context = context.createExternalValueContext(content);
    contentMapper.parse(context);
    try {
        List<Language> langs = detector.detectAll(content);
        for (Language lang : langs) {
            context = context.createExternalValueContext(lang.getLanguage());
            langMapper.parse(context);
        }
    } catch (LanguageDetectionException e) {
        context = context.createExternalValueContext("unknown");
        langMapper.parse(context);
    }
}
Example 34
Project: elasticsearch-analysis-standardnumber-master  File: StandardNumberMapper.java View source code
@Override
public Mapper parse(ParseContext context) throws IOException {
    String content = null;
    XContentParser parser = context.parser();
    XContentParser.Token token = parser.currentToken();
    if (token == XContentParser.Token.VALUE_STRING) {
        content = parser.text();
    }
    if (content == null) {
        return null;
    }
    context = context.createExternalValueContext(content);
    contentMapper.parse(context);
    try {
        Collection<StandardNumber> stdnums = service.detect(content);
        for (StandardNumber stdnum : stdnums) {
            context = context.createExternalValueContext(stdnum.normalizedValue());
            stdnumMapper.parse(context);
        }
    } catch (NumberFormatException e) {
        context = context.createExternalValueContext("unknown");
        stdnumMapper.parse(context);
    }
    return null;
}
Example 35
Project: elasticsearch-carrot2-master  File: ClusteringAction.java View source code
/**
         * Parses some {@link org.elasticsearch.common.xcontent.XContent} and fills in the request.
         */
@SuppressWarnings("unchecked")
public void source(BytesReference source, XContentType xContentType, NamedXContentRegistry xContentRegistry) {
    if (source == null || source.length() == 0) {
        return;
    }
    try (XContentParser parser = XContentHelper.createParser(xContentRegistry, source, xContentType)) {
        // TODO: we should avoid reparsing search_request here 
        // but it's terribly difficult to slice the underlying byte 
        // buffer to get just the search request.
        Map<String, Object> asMap = parser.mapOrdered();
        String queryHint = (String) asMap.get("query_hint");
        if (queryHint != null) {
            setQueryHint(queryHint);
        }
        Map<String, List<String>> fieldMapping = (Map<String, List<String>>) asMap.get("field_mapping");
        if (fieldMapping != null) {
            parseFieldSpecs(fieldMapping);
        }
        String algorithm = (String) asMap.get("algorithm");
        if (algorithm != null) {
            setAlgorithm(algorithm);
        }
        Map<String, Object> attributes = (Map<String, Object>) asMap.get("attributes");
        if (attributes != null) {
            setAttributes(attributes);
        }
        Map<String, ?> searchRequestMap = (Map<String, ?>) asMap.get("search_request");
        if (searchRequestMap != null) {
            if (this.searchRequest == null) {
                searchRequest = new SearchRequest();
            }
            XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON).map(searchRequestMap);
            XContentParser searchXParser = XContentFactory.xContent(XContentType.JSON).createParser(xContentRegistry, builder.bytes());
            QueryParseContext parseContext = new QueryParseContext(searchXParser);
            SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(parseContext);
            searchRequest.source(searchSourceBuilder);
        }
        Object includeHits = asMap.get("include_hits");
        if (includeHits != null) {
            Loggers.getLogger(getClass()).warn("Request used deprecated 'include_hits' parameter.");
            setIncludeHits(Boolean.parseBoolean(includeHits.toString()));
        }
        Object maxHits = asMap.get("max_hits");
        if (maxHits != null) {
            setMaxHits(maxHits.toString());
        }
    } catch (Exception e) {
        String sSource = "_na_";
        try {
            sSource = XContentHelper.convertToJson(source, false, false, xContentType);
        } catch (Throwable e1) {
        }
        throw new ClusteringException("Failed to parse source [" + sSource + "]", e);
    }
}
Example 36
Project: elasticsearch-extended-analyze-master  File: RestExtendedAnalyzeAction.java View source code
public static void buildFromContent(BytesReference content, ExtendedAnalyzeRequest analyzeRequest) throws IllegalArgumentException {
    try (XContentParser parser = XContentHelper.createParser(content)) {
        if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
            throw new IllegalArgumentException("Malformed content, must start with an object");
        } else {
            XContentParser.Token token;
            String currentFieldName = null;
            while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
                if (token == XContentParser.Token.FIELD_NAME) {
                    currentFieldName = parser.currentName();
                } else if ("text".equals(currentFieldName) && token == XContentParser.Token.VALUE_STRING) {
                    analyzeRequest.text(parser.text());
                } else if ("text".equals(currentFieldName) && token == XContentParser.Token.START_ARRAY) {
                    List<String> texts = new ArrayList<>();
                    while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
                        if (token.isValue() == false) {
                            throw new IllegalArgumentException(currentFieldName + " array element should only contain text");
                        }
                        texts.add(parser.text());
                    }
                    analyzeRequest.text(texts.toArray(Strings.EMPTY_ARRAY));
                } else if ("analyzer".equals(currentFieldName) && token == XContentParser.Token.VALUE_STRING) {
                    analyzeRequest.analyzer(parser.text());
                } else if ("field".equals(currentFieldName) && token == XContentParser.Token.VALUE_STRING) {
                    analyzeRequest.field(parser.text());
                } else if ("tokenizer".equals(currentFieldName) && token == XContentParser.Token.VALUE_STRING) {
                    analyzeRequest.tokenizer(parser.text());
                } else if (("token_filters".equals(currentFieldName) || "filters".equals(currentFieldName)) && token == XContentParser.Token.START_ARRAY) {
                    List<String> filters = new ArrayList<>();
                    while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
                        if (token.isValue() == false) {
                            throw new IllegalArgumentException(currentFieldName + " array element should only contain token filter's name");
                        }
                        filters.add(parser.text());
                    }
                    analyzeRequest.tokenFilters(filters.toArray(Strings.EMPTY_ARRAY));
                } else if ("char_filters".equals(currentFieldName) && token == XContentParser.Token.START_ARRAY) {
                    List<String> charFilters = new ArrayList<>();
                    while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
                        if (token.isValue() == false) {
                            throw new IllegalArgumentException(currentFieldName + " array element should only contain char filter's name");
                        }
                        charFilters.add(parser.text());
                    }
                    analyzeRequest.tokenFilters(charFilters.toArray(Strings.EMPTY_ARRAY));
                } else if ("attributes".equals(currentFieldName) && token == XContentParser.Token.START_ARRAY) {
                    List<String> attributes = new ArrayList<>();
                    while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
                        if (token.isValue() == false) {
                            throw new IllegalArgumentException(currentFieldName + " array element should only contain attribute name");
                        }
                        attributes.add(parser.text());
                    }
                    analyzeRequest.attributes(attributes.toArray(Strings.EMPTY_ARRAY));
                } else if ("use_short_attr".equals(currentFieldName) && token == XContentParser.Token.VALUE_BOOLEAN) {
                    analyzeRequest.shortAttributeName(parser.booleanValue());
                } else {
                    throw new IllegalArgumentException("Unknown parameter [" + currentFieldName + "] in request body or parameter is of the wrong type[" + token + "] ");
                }
            }
        }
    } catch (IOException e) {
        throw new IllegalArgumentException("Failed to parse request body", e);
    }
}
Example 37
Project: elasticsearch-opennlp-plugin-master  File: OpenNlpMapper.java View source code
@Override
public void parse(ParseContext context) throws IOException {
    String content = null;
    XContentParser parser = context.parser();
    XContentParser.Token token = parser.currentToken();
    if (token == XContentParser.Token.VALUE_STRING) {
        content = parser.text();
    }
    context.externalValue(content);
    contentMapper.parse(context);
    Map<String, Set<String>> namedEntities = openNlpService.tokenize(content);
    Set<String> names = namedEntities.get("name");
    if (names != null && names.size() > 0) {
        for (String name : names) {
            context.externalValue(name);
            nameMapper.parse(context);
        }
    }
    Set<String> dates = namedEntities.get("date");
    if (dates != null && dates.size() > 0) {
        for (String date : dates) {
            context.externalValue(date);
            dateMapper.parse(context);
        }
    }
    Set<String> locations = namedEntities.get("location");
    if (locations != null && locations.size() > 0) {
        for (String location : locations) {
            context.externalValue(location);
            locationMapper.parse(context);
        }
    }
}
Example 38
Project: elasticsearch-analysis-reference-master  File: ReferenceMapper.java View source code
@Override
@SuppressWarnings("unchecked")
public Mapper parse(ParseContext originalContext) throws IOException {
    String content = null;
    ParseContext context = originalContext;
    XContentParser parser = context.parser();
    XContentParser.Token token = parser.currentToken();
    if (token == XContentParser.Token.VALUE_STRING) {
        content = parser.text();
    } else {
        String currentFieldName = null;
        while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
            if (token == XContentParser.Token.FIELD_NAME) {
                currentFieldName = parser.currentName();
            } else if (token == XContentParser.Token.VALUE_STRING) {
                if (currentFieldName != null) {
                    switch(currentFieldName) {
                        case "ref_index":
                            index = parser.text();
                            break;
                        case "ref_type":
                            type = parser.text();
                            break;
                        case "ref_fields":
                            // single field
                            fields = new LinkedList<>();
                            fields.add(parser.text());
                            break;
                        default:
                            break;
                    }
                }
            } else if (token == XContentParser.Token.START_ARRAY && "ref_fields".equals(currentFieldName)) {
                fields = new LinkedList<>();
                while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
                    if (parser.text() != null) {
                        fields.add(parser.text());
                    }
                }
            }
        }
    }
    if (content == null) {
        return null;
    }
    context = context.createExternalValueContext(content);
    contentMapper.parse(context);
    if (client != null && index != null && type != null && fields != null) {
        try {
            GetResponse response = client.prepareGet().setIndex(index).setType(type).setId(content).execute().actionGet();
            if (response != null && response.isExists()) {
                for (String field : fields) {
                    Map<String, Object> source = response.getSource();
                    List<Object> list = XContentMapValues.extractRawValues(field, source);
                    if (list.isEmpty()) {
                        Object object = XContentMapValues.extractValue(field, source);
                        if (object instanceof Map) {
                            Map<String, Object> map = (Map<String, Object>) object;
                            Double lat = (Double) map.get("lat");
                            Double lon = (Double) map.get("lon");
                            if (lat != null && lon != null) {
                                list = Collections.singletonList(new GeoPoint(lat, lon));
                            }
                        }
                    }
                    for (Object object : list) {
                        context = context.createExternalValueContext(object);
                        if (copyTo != null) {
                            parseCopyFields(context, copyTo.copyToFields());
                        }
                    }
                }
            } else {
                logger.warn("ref doc does not exist: {}/{}/{}", index, type, content);
            }
        } catch (Exception e) {
            logger.error("error while getting ref doc " + index + "/" + type + "/" + content + ": " + e.getMessage(), e);
        }
    }
    return null;
}
Example 39
Project: elasticsearch-batch-percolator-master  File: BatchPercolatorQueriesRegistry.java View source code
QueryAndSource parsePercolatorDocument(String id, BytesReference source) {
    String type = null;
    BytesReference querySource = null;
    XContentParser parser = null;
    try {
        parser = XContentHelper.createParser(source);
        String currentFieldName = null;
        // move the START_OBJECT
        XContentParser.Token token = parser.nextToken();
        if (token != XContentParser.Token.START_OBJECT) {
            throw new ElasticsearchException("failed to parse query [" + id + "], not starting with OBJECT");
        }
        while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
            if (token == XContentParser.Token.FIELD_NAME) {
                currentFieldName = parser.currentName();
            } else if (token == XContentParser.Token.START_OBJECT) {
                if ("query".equals(currentFieldName)) {
                    if (type != null) {
                        Query query = parseQuery(type, null, parser);
                        return new QueryAndSource(query, limitingFilterFactory.limitingFilter(query), source);
                    } else {
                        XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType());
                        builder.copyCurrentStructure(parser);
                        querySource = builder.bytes();
                        builder.close();
                    }
                } else {
                    parser.skipChildren();
                }
            } else if (token == XContentParser.Token.START_ARRAY) {
                parser.skipChildren();
            } else if (token.isValue()) {
                if ("type".equals(currentFieldName)) {
                    type = parser.text();
                }
            }
        }
        Query query = parseQuery(type, querySource, null);
        return new QueryAndSource(query, limitingFilterFactory.limitingFilter(query), source);
    } catch (Exception e) {
        throw new BatchPercolatorQueryException(shardId().index(), "failed to parse query [" + id + "]", e);
    } finally {
        if (parser != null) {
            parser.close();
        }
    }
}
Example 40
Project: elasticsearch-minhash-master  File: MinHashFieldMapper.java View source code
@Override
protected void parseCreateField(ParseContext context, List<IndexableField> fields) throws IOException {
    String value;
    if (context.externalValueSet()) {
        value = context.externalValue().toString();
    } else {
        XContentParser parser = context.parser();
        if (parser.currentToken() == XContentParser.Token.VALUE_NULL) {
            value = fieldType().nullValueAsString();
        } else {
            value = parser.textOrNull();
        }
    }
    if (value == null) {
        return;
    }
    byte[] minhashValue = MinHash.calculate(minhashAnalyzer, value);
    if (fieldType().stored()) {
        fields.add(new Field(fieldType().name(), minhashValue, fieldType()));
    }
    if (fieldType().hasDocValues()) {
        CustomMinHashDocValuesField field = (CustomMinHashDocValuesField) context.doc().getByKey(fieldType().name());
        if (field == null) {
            field = new CustomMinHashDocValuesField(fieldType().name(), minhashValue);
            context.doc().addWithKey(fieldType().name(), field);
        } else {
            field.add(minhashValue);
        }
    }
    if (copyBitsTo != null) {
        parseCopyBitsFields(context.createExternalValueContext(MinHash.toBinaryString(minhashValue)), copyBitsTo.copyBitsToFields);
    }
}
Example 41
Project: elasticsearch-river-jira-master  File: JIRA5RestClient.java View source code
/**
	 * Get projectKeys of all projects in configured JIRA instance.
	 * 
	 * @return list of project keys
	 * @throws Exception
	 */
@Override
@SuppressWarnings("unchecked")
public List<String> getAllJIRAProjects() throws Exception {
    XContentParser parser = null;
    try {
        byte[] responseData = performJIRAGetRESTCall("project", null);
        logger.debug("JIRA REST response data: {}", new String(responseData));
        StringBuilder sb = new StringBuilder();
        sb.append("{ \"projects\" : ").append(new String(responseData, "UTF-8")).append("}");
        responseData = sb.toString().getBytes("UTF-8");
        parser = XContentFactory.xContent(XContentType.JSON).createParser(responseData);
        Map<String, Object> responseParsed = parser.mapAndClose();
        List<String> ret = new ArrayList<String>();
        for (Map<String, Object> mk : (List<Map<String, Object>>) responseParsed.get("projects")) {
            ret.add((String) mk.get("key"));
        }
        return ret;
    } finally {
        if (parser != null)
            parser.close();
    }
}
Example 42
Project: elasticsearch-river-remote-master  File: GetJSONClient.java View source code
/**
	 * Parse JSON response into Object Structure.
	 * 
	 * @param responseData to parse
	 * @return parsed response (May be Map, or List, or simple value)
	 * @throws UnsupportedEncodingException
	 * @throws IOException
	 */
protected Object parseJSONResponse(byte[] responseData) throws UnsupportedEncodingException, IOException {
    XContentParser parser = null;
    // by XContentFactory
    try {
        StringBuilder sb = new StringBuilder();
        sb.append("{ \"wrapit\" : ").append(new String(responseData, "UTF-8")).append("}");
        responseData = sb.toString().getBytes("UTF-8");
        parser = XContentFactory.xContent(XContentType.JSON).createParser(responseData);
        Map<String, Object> wrappedResponseParsed = parser.mapAndClose();
        return wrappedResponseParsed.get("wrapit");
    } finally {
        if (parser != null)
            parser.close();
    }
}
Example 43
Project: searchisko-master  File: TestUtils.java View source code
/**
	 * Read JSON file from classpath into Map of Map structure.
	 * 
	 * @param filePath path inside classpath pointing to JSON file to read
	 * @return parsed JSON file
	 * @throws SettingsException
	 */
public static Map<String, Object> loadJSONFromClasspathFile(String filePath) {
    XContentParser parser = null;
    try {
        parser = XContentFactory.xContent(XContentType.JSON).createParser(TestUtils.class.getResourceAsStream(filePath));
        return parser.mapOrderedAndClose();
    } catch (IOException e) {
        throw new RuntimeException(e.getMessage(), e);
    } finally {
        if (parser != null)
            parser.close();
    }
}
Example 44
Project: vertexium-master  File: VertexiumQueryStringQueryParser.java View source code
@Override
public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
    String[] authorizations = null;
    DocumentMapper documentMapper = parseContext.mapperService().documentMapper(ELEMENT_DOCUMENT_MAPPER_NAME);
    FieldNameToVisibilityMap fieldNameToVisibilityMap = getFieldNameToVisibilityMap(documentMapper);
    XContentParser parser = parseContext.parser();
    String queryName = null;
    QueryParserSettings qpSettings = new QueryParserSettings();
    qpSettings.defaultField(parseContext.defaultField());
    qpSettings.lenient(parseContext.queryStringLenient());
    qpSettings.analyzeWildcard(defaultAnalyzeWildcard);
    qpSettings.allowLeadingWildcard(defaultAllowLeadingWildcard);
    qpSettings.locale(Locale.ROOT);
    String currentFieldName = null;
    XContentParser.Token token;
    while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
        if (token == XContentParser.Token.FIELD_NAME) {
            currentFieldName = parser.currentName();
        } else if (token == XContentParser.Token.START_ARRAY) {
            if ("authorizations".equals(currentFieldName)) {
                authorizations = xContentToAuthorizationsArray(parser);
                Set<String> fields = getQueryableFields(documentMapper, fieldNameToVisibilityMap, authorizations);
                if (qpSettings.fields() == null) {
                    qpSettings.fields(Lists.newArrayList());
                }
                for (String field : fields) {
                    qpSettings.fields().add(field);
                }
            } else if ("fields".equals(currentFieldName)) {
                while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
                    String fField = null;
                    float fBoost = -1;
                    char[] text = parser.textCharacters();
                    int end = parser.textOffset() + parser.textLength();
                    for (int i = parser.textOffset(); i < end; i++) {
                        if (text[i] == '^') {
                            int relativeLocation = i - parser.textOffset();
                            fField = new String(text, parser.textOffset(), relativeLocation);
                            fBoost = Float.parseFloat(new String(text, i + 1, parser.textLength() - relativeLocation - 1));
                            break;
                        }
                    }
                    if (fField == null) {
                        fField = parser.text();
                    }
                    if (qpSettings.fields() == null) {
                        qpSettings.fields(new ArrayList<String>());
                    }
                    if (Regex.isSimpleMatchPattern(fField)) {
                        for (String field : parseContext.mapperService().simpleMatchToIndexNames(fField)) {
                            qpSettings.fields().add(field);
                            if (fBoost != -1) {
                                if (qpSettings.boosts() == null) {
                                    qpSettings.boosts(new ObjectFloatHashMap<String>());
                                }
                                qpSettings.boosts().put(field, fBoost);
                            }
                        }
                    } else {
                        qpSettings.fields().add(fField);
                        if (fBoost != -1) {
                            if (qpSettings.boosts() == null) {
                                qpSettings.boosts(new ObjectFloatHashMap<String>());
                            }
                            qpSettings.boosts().put(fField, fBoost);
                        }
                    }
                }
            } else {
                throw new QueryParsingException(parseContext, "[query_string] query does not support [" + currentFieldName + "]");
            }
        } else if (token.isValue()) {
            if ("query".equals(currentFieldName)) {
                qpSettings.queryString(parser.text());
            } else if ("default_field".equals(currentFieldName) || "defaultField".equals(currentFieldName)) {
                qpSettings.defaultField(parser.text());
            } else if ("default_operator".equals(currentFieldName) || "defaultOperator".equals(currentFieldName)) {
                String op = parser.text();
                if ("or".equalsIgnoreCase(op)) {
                    qpSettings.defaultOperator(org.apache.lucene.queryparser.classic.QueryParser.Operator.OR);
                } else if ("and".equalsIgnoreCase(op)) {
                    qpSettings.defaultOperator(org.apache.lucene.queryparser.classic.QueryParser.Operator.AND);
                } else {
                    throw new QueryParsingException(parseContext, "Query default operator [" + op + "] is not allowed");
                }
            } else if ("analyzer".equals(currentFieldName)) {
                NamedAnalyzer analyzer = parseContext.analysisService().analyzer(parser.text());
                if (analyzer == null) {
                    throw new QueryParsingException(parseContext, "[query_string] analyzer [" + parser.text() + "] not found");
                }
                qpSettings.forcedAnalyzer(analyzer);
            } else if ("quote_analyzer".equals(currentFieldName) || "quoteAnalyzer".equals(currentFieldName)) {
                NamedAnalyzer analyzer = parseContext.analysisService().analyzer(parser.text());
                if (analyzer == null) {
                    throw new QueryParsingException(parseContext, "[query_string] quote_analyzer [" + parser.text() + "] not found");
                }
                qpSettings.forcedQuoteAnalyzer(analyzer);
            } else if ("allow_leading_wildcard".equals(currentFieldName) || "allowLeadingWildcard".equals(currentFieldName)) {
                qpSettings.allowLeadingWildcard(parser.booleanValue());
            } else if ("auto_generate_phrase_queries".equals(currentFieldName) || "autoGeneratePhraseQueries".equals(currentFieldName)) {
                qpSettings.autoGeneratePhraseQueries(parser.booleanValue());
            } else if ("max_determinized_states".equals(currentFieldName) || "maxDeterminizedStates".equals(currentFieldName)) {
                qpSettings.maxDeterminizedStates(parser.intValue());
            } else if ("lowercase_expanded_terms".equals(currentFieldName) || "lowercaseExpandedTerms".equals(currentFieldName)) {
                qpSettings.lowercaseExpandedTerms(parser.booleanValue());
            } else if ("enable_position_increments".equals(currentFieldName) || "enablePositionIncrements".equals(currentFieldName)) {
                qpSettings.enablePositionIncrements(parser.booleanValue());
            } else if ("escape".equals(currentFieldName)) {
                qpSettings.escape(parser.booleanValue());
            } else if ("use_dis_max".equals(currentFieldName) || "useDisMax".equals(currentFieldName)) {
                qpSettings.useDisMax(parser.booleanValue());
            } else if ("fuzzy_prefix_length".equals(currentFieldName) || "fuzzyPrefixLength".equals(currentFieldName)) {
                qpSettings.fuzzyPrefixLength(parser.intValue());
            } else if ("fuzzy_max_expansions".equals(currentFieldName) || "fuzzyMaxExpansions".equals(currentFieldName)) {
                qpSettings.fuzzyMaxExpansions(parser.intValue());
            } else if ("fuzzy_rewrite".equals(currentFieldName) || "fuzzyRewrite".equals(currentFieldName)) {
                qpSettings.fuzzyRewriteMethod(QueryParsers.parseRewriteMethod(parseContext.parseFieldMatcher(), parser.textOrNull()));
            } else if ("phrase_slop".equals(currentFieldName) || "phraseSlop".equals(currentFieldName)) {
                qpSettings.phraseSlop(parser.intValue());
            } else if (parseContext.parseFieldMatcher().match(currentFieldName, FUZZINESS)) {
                qpSettings.setFuzziness(Fuzziness.parse(parser));
            } else if ("boost".equals(currentFieldName)) {
                qpSettings.boost(parser.floatValue());
            } else if ("tie_breaker".equals(currentFieldName) || "tieBreaker".equals(currentFieldName)) {
                qpSettings.tieBreaker(parser.floatValue());
            } else if ("analyze_wildcard".equals(currentFieldName) || "analyzeWildcard".equals(currentFieldName)) {
                qpSettings.analyzeWildcard(parser.booleanValue());
            } else if ("rewrite".equals(currentFieldName)) {
                qpSettings.rewriteMethod(QueryParsers.parseRewriteMethod(parseContext.parseFieldMatcher(), parser.textOrNull()));
            } else if ("minimum_should_match".equals(currentFieldName) || "minimumShouldMatch".equals(currentFieldName)) {
                qpSettings.minimumShouldMatch(parser.textOrNull());
            } else if ("quote_field_suffix".equals(currentFieldName) || "quoteFieldSuffix".equals(currentFieldName)) {
                qpSettings.quoteFieldSuffix(parser.textOrNull());
            } else if ("lenient".equalsIgnoreCase(currentFieldName)) {
                qpSettings.lenient(parser.booleanValue());
            } else if ("locale".equals(currentFieldName)) {
                String localeStr = parser.text();
                qpSettings.locale(LocaleUtils.parse(localeStr));
            } else if ("time_zone".equals(currentFieldName)) {
                try {
                    qpSettings.timeZone(DateTimeZone.forID(parser.text()));
                } catch (IllegalArgumentException e) {
                    throw new QueryParsingException(parseContext, "[query_string] time_zone [" + parser.text() + "] is unknown");
                }
            } else if ("_name".equals(currentFieldName)) {
                queryName = parser.text();
            } else {
                throw new QueryParsingException(parseContext, "[query_string] query does not support [" + currentFieldName + "]");
            }
        }
    }
    if (qpSettings.queryString() == null) {
        throw new QueryParsingException(parseContext, "query_string must be provided with a [query]");
    }
    qpSettings.defaultAnalyzer(parseContext.mapperService().searchAnalyzer());
    qpSettings.defaultQuoteAnalyzer(parseContext.mapperService().searchQuoteAnalyzer());
    if (qpSettings.escape()) {
        qpSettings.queryString(org.apache.lucene.queryparser.classic.QueryParser.escape(qpSettings.queryString()));
    }
    MapperQueryParser queryParser = new VertexiumMapperQueryParser(parseContext, fieldNameToVisibilityMap, authorizations);
    queryParser.reset(qpSettings);
    try {
        Query query = queryParser.parse(qpSettings.queryString());
        if (query == null) {
            return null;
        }
        if (qpSettings.boost() != QueryParserSettings.DEFAULT_BOOST) {
            query.setBoost(query.getBoost() * qpSettings.boost());
        }
        query = fixNegativeQueryIfNeeded(query);
        // and multiple variations of the same word in the query (synonyms for instance).
        if (query instanceof BooleanQuery && !((BooleanQuery) query).isCoordDisabled()) {
            query = Queries.applyMinimumShouldMatch((BooleanQuery) query, qpSettings.minimumShouldMatch());
        }
        if (queryName != null) {
            parseContext.addNamedQuery(queryName, query);
        }
        return query;
    } catch (org.apache.lucene.queryparser.classic.ParseException e) {
        throw new QueryParsingException(parseContext, "Failed to parse query [" + qpSettings.queryString() + "]", e);
    }
}
Example 45
Project: BioSolr-master  File: OntologyMapper.java View source code
@Override
public Mapper parse(ParseContext context) throws IOException {
    String iri;
    XContentParser parser = context.parser();
    XContentParser.Token token = parser.currentToken();
    if (token == XContentParser.Token.VALUE_STRING) {
        iri = parser.text();
    } else {
        throw new MapperParsingException(name() + " does not contain String value");
    }
    ContentPath.Type origPathType = context.path().pathType();
    context.path().pathType(ContentPath.Type.FULL);
    context.path().add(simpleName());
    boolean modified = false;
    try {
        OntologyHelper helper = getHelper(ontologySettings, threadPool);
        OntologyData data = findOntologyData(helper, iri);
        if (data == null) {
            logger.debug("Cannot find OWL class for IRI {}", iri);
        } else {
            // Add the IRI
            addFieldData(context, getPredefinedMapper(FieldMappings.URI, context), Collections.singletonList(iri));
            // Look up the label(s)
            addFieldData(context, getPredefinedMapper(FieldMappings.LABEL, context), data.getLabels());
            // Look up the synonyms
            addFieldData(context, getPredefinedMapper(FieldMappings.SYNONYMS, context), data.getLabels());
            // Add the child details
            addRelatedNodesWithLabels(context, data.getChildIris(), getPredefinedMapper(FieldMappings.CHILD_URI, context), data.getChildLabels(), getPredefinedMapper(FieldMappings.CHILD_LABEL, context));
            // Add the parent details
            addRelatedNodesWithLabels(context, data.getParentIris(), getPredefinedMapper(FieldMappings.PARENT_URI, context), data.getParentLabels(), getPredefinedMapper(FieldMappings.PARENT_LABEL, context));
            if (ontologySettings.isIncludeIndirect()) {
                // Add the descendant details
                addRelatedNodesWithLabels(context, data.getDescendantIris(), getPredefinedMapper(FieldMappings.DESCENDANT_URI, context), data.getDescendantLabels(), getPredefinedMapper(FieldMappings.DESCENDANT_LABEL, context));
                // Add the ancestor details
                addRelatedNodesWithLabels(context, data.getAncestorIris(), getPredefinedMapper(FieldMappings.ANCESTOR_URI, context), data.getAncestorLabels(), getPredefinedMapper(FieldMappings.ANCESTOR_LABEL, context));
            }
            if (ontologySettings.isIncludeRelations()) {
                // Add the related nodes
                Map<String, Collection<String>> relations = data.getRelationIris();
                for (String relation : relations.keySet()) {
                    // Sanitise the relation name
                    String sanRelation = relation.replaceAll("\\W+", "_");
                    String uriMapperName = sanRelation + DYNAMIC_URI_FIELD_SUFFIX;
                    String labelMapperName = sanRelation + DYNAMIC_LABEL_FIELD_SUFFIX;
                    // Get the mapper for the relation
                    StringFieldMapper uriMapper = mappers.get(context.path().fullPathAsText(uriMapperName));
                    StringFieldMapper labelMapper = mappers.get(context.path().fullPathAsText(labelMapperName));
                    if (uriMapper == null) {
                        // No mappers created yet - build new ones for URI and label
                        BuilderContext builderContext = new BuilderContext(context.indexSettings(), context.path());
                        uriMapper = MapperBuilders.stringField(uriMapperName).store(true).index(true).tokenized(false).build(builderContext);
                        labelMapper = MapperBuilders.stringField(labelMapperName).store(true).index(true).tokenized(true).build(builderContext);
                        synchronized (mutex) {
                            mappers = mappers.copyAndPut(uriMapper.fieldType().names().indexName(), uriMapper);
                            mappers = mappers.copyAndPut(labelMapper.fieldType().names().indexName(), labelMapper);
                        }
                        modified = true;
                    }
                    addRelatedNodesWithLabels(context, relations.get(relation), uriMapper, helper.findLabelsForIRIs(relations.get(relation)), labelMapper);
                }
                if (ontologySettings.isIncludeParentPaths()) {
                    // Add the parent paths
                    addFieldData(context, getPredefinedMapper(FieldMappings.PARENT_PATHS, context), data.getParentPaths());
                }
            }
        }
        helper.updateLastCallTime();
    } catch (OntologyHelperException e) {
        throw new ElasticsearchException("Could not initialise ontology helper", e);
    } finally {
        context.path().remove();
        context.path().pathType(origPathType);
    }
    return modified ? this : null;
}
Example 46
Project: elasticsearch-action-updatebyquery-master  File: TransportShardUpdateByQueryAction.java View source code
private UpdateByQueryContext parseRequestSource(IndexService indexService, ShardUpdateByQueryRequest request, SearchContext context) {
    ScriptParameterParser scriptParameterParser = new ScriptParameterParser();
    ParsedQuery parsedQuery = null;
    String script = null;
    ScriptType scriptType = null;
    String scriptLang = null;
    Map<String, Object> params = Maps.newHashMap();
    try {
        XContentParser parser = XContentHelper.createParser(request.source());
        for (XContentParser.Token token = parser.nextToken(); token != XContentParser.Token.END_OBJECT; token = parser.nextToken()) {
            if (token == XContentParser.Token.FIELD_NAME) {
                String fieldName = parser.currentName();
                if ("query".equals(fieldName)) {
                    parsedQuery = indexService.queryParserService().parse(parser);
                } else if ("query_binary".equals(fieldName)) {
                    parser.nextToken();
                    byte[] querySource = parser.binaryValue();
                    XContentParser qSourceParser = XContentFactory.xContent(querySource).createParser(querySource);
                    parsedQuery = indexService.queryParserService().parse(qSourceParser);
                } else if ("params".equals(fieldName)) {
                    parser.nextToken();
                    params = parser.map();
                } else {
                    token = parser.nextToken();
                    scriptParameterParser.token(fieldName, token, parser);
                }
            }
        }
    } catch (Exception e) {
        throw new ElasticsearchException("Couldn't parse query from source.", e);
    }
    if (parsedQuery == null) {
        throw new ElasticsearchException("Query is required");
    }
    ScriptParameterValue scriptValue = scriptParameterParser.getDefaultScriptParameterValue();
    if (scriptValue != null) {
        script = scriptValue.script();
        scriptType = scriptValue.scriptType();
    } else {
        throw new ElasticsearchException("A script is required");
    }
    scriptLang = scriptParameterParser.lang();
    context.parsedQuery(parsedQuery);
    return new UpdateByQueryContext(context, batchSize, clusterService.state(), script, scriptType, scriptLang, params);
}
Example 47
Project: elasticsearch-benchmark-suite-master  File: RestBenchAction.java View source code
public static BenchmarkRequest parse(BenchmarkRequestBuilder builder, BytesReference data, boolean contentUnsafe) throws Exception {
    XContent xContent = XContentFactory.xContent(data);
    XContentParser p = xContent.createParser(data);
    XContentParser.Token token = p.nextToken();
    assert token == XContentParser.Token.START_OBJECT;
    String fieldName = null;
    while ((token = p.nextToken()) != XContentParser.Token.END_OBJECT) {
        switch(token) {
            case START_ARRAY:
                if ("requests".equals(fieldName)) {
                    while ((token = p.nextToken()) != XContentParser.Token.END_ARRAY) {
                        assert token == XContentParser.Token.START_OBJECT;
                        XContentBuilder payloadBuilder = XContentFactory.contentBuilder(p.contentType()).copyCurrentStructure(p);
                        SearchRequest req = new SearchRequest();
                        req.source(payloadBuilder.bytes(), contentUnsafe);
                        builder.addSearchRequest(req);
                    }
                } else if ("competitors".equals(fieldName)) {
                    while (p.nextToken() != XContentParser.Token.END_ARRAY) {
                        builder.addCompetitor(parse(p, contentUnsafe));
                    }
                } else if ("percentiles".equals(fieldName)) {
                    List<Double> percentiles = new ArrayList<>();
                    while (p.nextToken() != XContentParser.Token.END_ARRAY) {
                        percentiles.add(p.doubleValue());
                    }
                    builder.setPercentiles(Doubles.toArray(percentiles));
                } else {
                    throw new ElasticsearchParseException("Failed parsing array field [" + fieldName + "] field is not recognized");
                }
                break;
            case START_OBJECT:
                if ("clear_caches".equals(fieldName)) {
                    BenchmarkSettings.ClearCachesSettings clearCachesSettings = new BenchmarkSettings.ClearCachesSettings();
                    builder.setClearCachesSettings(clearCachesSettings);
                    parseClearCaches(p, clearCachesSettings);
                } else {
                    throw new ElasticsearchParseException("Failed parsing object field [" + fieldName + "] field is not recognized");
                }
                break;
            case FIELD_NAME:
                fieldName = p.text();
                break;
            case VALUE_NUMBER:
                if ("num_executor_nodes".equals(fieldName)) {
                    builder.setNumExecutorNodes(p.intValue());
                } else if ("iterations".equals(fieldName)) {
                    builder.setIterations(p.intValue());
                } else if ("concurrency".equals(fieldName)) {
                    builder.setConcurrency(p.intValue());
                } else if ("multiplier".equals(fieldName)) {
                    builder.setMultiplier(p.intValue());
                } else if ("num_slowest".equals(fieldName)) {
                    builder.setNumSlowest(p.intValue());
                } else {
                    throw new ElasticsearchParseException("Failed parsing numeric field [" + fieldName + "] field is not recognized");
                }
                break;
            case VALUE_BOOLEAN:
                if ("warmup".equals(fieldName)) {
                    builder.setWarmup(p.booleanValue());
                } else if ("clear_caches".equals(fieldName)) {
                    if (p.booleanValue()) {
                        throw new ElasticsearchParseException("Failed parsing field [" + fieldName + "] must specify which caches to clear");
                    } else {
                        builder.setAllowCacheClearing(false);
                    }
                } else {
                    throw new ElasticsearchParseException("Failed parsing boolean field [" + fieldName + "] field is not recognized");
                }
                break;
            case VALUE_STRING:
                if ("name".equals(fieldName)) {
                    builder.setBenchmarkId(p.text());
                } else {
                    throw new ElasticsearchParseException("Failed parsing string field [" + fieldName + "] field is not recognized");
                }
                break;
            default:
                throw new ElasticsearchParseException("Failed parsing " + token.name() + " field [" + fieldName + "] field is not recognized");
        }
    }
    return builder.request();
}
Example 48
Project: elasticsearch-helper-master  File: IngestRequest.java View source code
/**
     * Adds a framed data in binary format
     *
     * @param data         data
     * @param defaultIndex the default index
     * @param defaultType  the default type
     * @return this request
     * @throws Exception if data could not be added
     */
public IngestRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType) throws Exception {
    XContent xContent = XContentFactory.xContent(data);
    int from = 0;
    int length = data.length();
    byte marker = xContent.streamSeparator();
    while (true) {
        int nextMarker = findNextMarker(marker, from, data, length);
        if (nextMarker == -1) {
            break;
        }
        // now parse the move
        XContentParser parser = xContent.createParser(data.slice(from, nextMarker - from));
        try {
            // move pointers
            from = nextMarker + 1;
            // Move to START_OBJECT
            XContentParser.Token token = parser.nextToken();
            if (token == null) {
                continue;
            }
            assert token == XContentParser.Token.START_OBJECT;
            // Move to FIELD_NAME, that's the move
            token = parser.nextToken();
            assert token == XContentParser.Token.FIELD_NAME;
            String action = parser.currentName();
            String index = defaultIndex;
            String type = defaultType;
            String id = null;
            String routing = null;
            String parent = null;
            String timestamp = null;
            Long ttl = null;
            String opType = null;
            long version = 0;
            VersionType versionType = VersionType.INTERNAL;
            // at this stage, next token can either be END_OBJECT (and use default index and type, with auto generated id)
            // or START_OBJECT which will have another set of parameters
            String currentFieldName = null;
            while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
                if (token == XContentParser.Token.FIELD_NAME) {
                    currentFieldName = parser.currentName();
                } else if (token.isValue()) {
                    if ("_index".equals(currentFieldName)) {
                        index = parser.text();
                    } else if ("_type".equals(currentFieldName)) {
                        type = parser.text();
                    } else if ("_id".equals(currentFieldName)) {
                        id = parser.text();
                    } else if ("_routing".equals(currentFieldName) || "routing".equals(currentFieldName)) {
                        routing = parser.text();
                    } else if ("_parent".equals(currentFieldName) || "parent".equals(currentFieldName)) {
                        parent = parser.text();
                    } else if ("_timestamp".equals(currentFieldName) || "timestamp".equals(currentFieldName)) {
                        timestamp = parser.text();
                    } else if ("_ttl".equals(currentFieldName) || "ttl".equals(currentFieldName)) {
                        if (parser.currentToken() == XContentParser.Token.VALUE_STRING) {
                            ttl = TimeValue.parseTimeValue(parser.text(), null, currentFieldName).millis();
                        } else {
                            ttl = parser.longValue();
                        }
                    } else if ("op_type".equals(currentFieldName) || "opType".equals(currentFieldName)) {
                        opType = parser.text();
                    } else if ("_version".equals(currentFieldName) || "version".equals(currentFieldName)) {
                        version = parser.longValue();
                    } else if ("_version_type".equals(currentFieldName) || "_versionType".equals(currentFieldName) || "version_type".equals(currentFieldName) || "versionType".equals(currentFieldName)) {
                        versionType = VersionType.fromString(parser.text());
                    }
                }
            }
            if ("delete".equals(action)) {
                add(new DeleteRequest(index, type, id).parent(parent).version(version).versionType(versionType).routing(routing));
            } else {
                nextMarker = findNextMarker(marker, from, data, length);
                if (nextMarker == -1) {
                    break;
                }
                if ("index".equals(action)) {
                    if (opType == null) {
                        internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).timestamp(timestamp).ttl(ttl).version(version).versionType(versionType).source(data.slice(from, nextMarker - from)));
                    } else {
                        internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).timestamp(timestamp).ttl(ttl).version(version).versionType(versionType).create("create".equals(opType)).source(data.slice(from, nextMarker - from)));
                    }
                } else if ("create".equals(action)) {
                    internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).timestamp(timestamp).ttl(ttl).version(version).versionType(versionType).create(true).source(data.slice(from, nextMarker - from)));
                }
                from = nextMarker + 1;
            }
        } finally {
            parser.close();
        }
    }
    return this;
}
Example 49
Project: elasticsearch-jdbc-master  File: PlainKeyValueStreamListener.java View source code
/**
     * Receive values.
     *
     * @param values the values
     * @return this value listener
     * @throws java.io.IOException when value processing giv an error
     */
@Override
public KeyValueStreamListener<K, V> values(List<V> values) throws IOException {
    boolean hasSource = false;
    if (current == null) {
        current = newObject();
    }
    if (prev == null) {
        prev = newObject();
    }
    // execute meta operations
    for (int i = 0; i < keys.size() && i < values.size(); i++) {
        // v may be null, then continue
        V v = values.get(i);
        if (v == null) {
            continue;
        }
        K k = keys.get(i);
        map(k, v, current);
        if (ControlKeys._source.name().equals(k)) {
            hasSource = true;
        }
    }
    if (hasSource) {
        end(current);
        current = newObject();
        return this;
    }
    // switch to next structured object if current is not equal to previous
    if (!current.equals(prev) || current.isEmpty() || shouldAutoGenID) {
        // "steal" source
        prev.source(current.source());
        // here, the element is being prepared for bulk indexing
        end(prev);
        prev = current;
        current = newObject();
    }
    // create current object from values by sequentially merging the values
    for (int i = 0; i < keys.size() && i < values.size(); i++) {
        Object v = null;
        try {
            String s = values.get(i).toString();
            // geo content?
            if (shouldDetectGeo && s.startsWith("POLYGON(") || s.startsWith("POINT(")) {
                SpatialContext ctx = JtsSpatialContext.GEO;
                Shape shape = ctx.readShapeFromWkt(s);
                XContentBuilder builder = jsonBuilder();
                builder.startObject();
                GeoJSONShapeSerializer.serialize(shape, builder);
                builder.endObject();
                s = builder.string();
            }
            // JSON content?
            if (shouldDetectJson) {
                XContentParser parser = JsonXContent.jsonXContent.createParser(s);
                XContentParser.Token token = parser.currentToken();
                if (token == null) {
                    token = parser.nextToken();
                }
                if (token == XContentParser.Token.START_OBJECT) {
                    v = parser.map();
                } else if (token == XContentParser.Token.START_ARRAY) {
                    v = parser.list();
                }
            }
        } catch (Exception e) {
        }
        if (v == null || (v instanceof Map && ((Map) v).isEmpty())) {
            v = values.get(i);
        }
        Map<String, Object> m = merge(current.source(), keys.get(i), v);
        current.source(m);
    }
    return this;
}
Example 50
Project: elk-master  File: SearchGuardAdmin.java View source code
private static BytesReference readXContent(final Reader reader, final XContentType xContentType) throws IOException {
    BytesReference retVal;
    XContentParser parser = null;
    try {
        parser = XContentFactory.xContent(xContentType).createParser(reader);
        parser.nextToken();
        final XContentBuilder builder = XContentFactory.jsonBuilder();
        builder.copyCurrentStructure(parser);
        retVal = builder.bytes();
    } finally {
        if (parser != null) {
            parser.close();
        }
    }
    //validate
    Settings.builder().put(new JsonSettingsLoader().load(XContentHelper.createParser(retVal))).build();
    return retVal;
}
Example 51
Project: jdbc-importer-master  File: PlainKeyValueStreamListener.java View source code
/**
     * Receive values.
     *
     * @param values the values
     * @return this value listener
     * @throws java.io.IOException when value processing giv an error
     */
@Override
public KeyValueStreamListener<K, V> values(List<V> values) throws IOException {
    boolean hasSource = false;
    if (current == null) {
        current = newObject();
    }
    if (prev == null) {
        prev = newObject();
    }
    // execute meta operations
    for (int i = 0; i < keys.size() && i < values.size(); i++) {
        // v may be null, then continue
        V v = values.get(i);
        if (v == null) {
            continue;
        }
        K k = keys.get(i);
        map(k, v, current);
        if (ControlKeys._source.name().equals(k)) {
            hasSource = true;
        }
    }
    if (hasSource) {
        end(current);
        current = newObject();
        return this;
    }
    // switch to next structured object if current is not equal to previous
    if (!current.equals(prev) || current.isEmpty() || shouldAutoGenID) {
        // "steal" source
        prev.source(current.source());
        // here, the element is being prepared for bulk indexing
        end(prev);
        prev = current;
        current = newObject();
    }
    // create current object from values by sequentially merging the values
    for (int i = 0; i < keys.size() && i < values.size(); i++) {
        Object v = null;
        try {
            String s = values.get(i).toString();
            // geo content?
            if (shouldDetectGeo && s.startsWith("POLYGON(") || s.startsWith("POINT(")) {
                SpatialContext ctx = JtsSpatialContext.GEO;
                Shape shape = ctx.readShapeFromWkt(s);
                XContentBuilder builder = jsonBuilder();
                builder.startObject();
                GeoJSONShapeSerializer.serialize(shape, builder);
                builder.endObject();
                s = builder.string();
            }
            // JSON content?
            if (shouldDetectJson) {
                XContentParser parser = JsonXContent.jsonXContent.createParser(s);
                XContentParser.Token token = parser.currentToken();
                if (token == null) {
                    token = parser.nextToken();
                }
                if (token == XContentParser.Token.START_OBJECT) {
                    v = parser.map();
                } else if (token == XContentParser.Token.START_ARRAY) {
                    v = parser.list();
                }
            }
        } catch (Exception e) {
        }
        if (v == null || (v instanceof Map && ((Map) v).isEmpty())) {
            v = values.get(i);
        }
        Map<String, Object> m = merge(current.source(), keys.get(i), v);
        current.source(m);
    }
    return this;
}
Example 52
Project: search-guard-master  File: SearchGuardAdmin.java View source code
private static BytesReference readXContent(final Reader reader, final XContentType xContentType) throws IOException {
    BytesReference retVal;
    XContentParser parser = null;
    try {
        parser = XContentFactory.xContent(xContentType).createParser(reader);
        parser.nextToken();
        final XContentBuilder builder = XContentFactory.jsonBuilder();
        builder.copyCurrentStructure(parser);
        retVal = builder.bytes();
    } finally {
        if (parser != null) {
            parser.close();
        }
    }
    //validate
    Settings.builder().put(new JsonSettingsLoader().load(XContentHelper.createParser(retVal))).build();
    return retVal;
}
Example 53
Project: elasticsearch-mapper-attachments-master  File: AttachmentMapper.java View source code
@Override
public Mapper parse(ParseContext context) throws IOException {
    byte[] content = null;
    String contentType = null;
    int indexedChars = defaultIndexedChars;
    boolean langDetect = defaultLangDetect;
    String name = null;
    String language = null;
    XContentParser parser = context.parser();
    XContentParser.Token token = parser.currentToken();
    if (token == XContentParser.Token.VALUE_STRING) {
        content = parser.binaryValue();
    } else {
        String currentFieldName = null;
        while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
            if (token == XContentParser.Token.FIELD_NAME) {
                currentFieldName = parser.currentName();
            } else if (token == XContentParser.Token.VALUE_STRING) {
                if ("_content".equals(currentFieldName)) {
                    content = parser.binaryValue();
                } else if ("_content_type".equals(currentFieldName)) {
                    contentType = parser.text();
                } else if ("_name".equals(currentFieldName)) {
                    name = parser.text();
                } else if ("_language".equals(currentFieldName)) {
                    language = parser.text();
                }
            } else if (token == XContentParser.Token.VALUE_NUMBER) {
                if ("_indexed_chars".equals(currentFieldName) || "_indexedChars".equals(currentFieldName)) {
                    indexedChars = parser.intValue();
                }
            } else if (token == XContentParser.Token.VALUE_BOOLEAN) {
                if ("_detect_language".equals(currentFieldName) || "_detectLanguage".equals(currentFieldName)) {
                    langDetect = parser.booleanValue();
                }
            }
        }
    }
    // Throw clean exception when no content is provided Fix #23
    if (content == null) {
        throw new MapperParsingException("No content is provided.");
    }
    Metadata metadata = new Metadata();
    if (contentType != null) {
        metadata.add(Metadata.CONTENT_TYPE, contentType);
    }
    if (name != null) {
        metadata.add(Metadata.RESOURCE_NAME_KEY, name);
    }
    String parsedContent;
    try {
        parsedContent = TikaImpl.parse(content, metadata, indexedChars);
    } catch (Throwable e) {
        if (!ignoreErrors) {
            logger.trace("exception caught", e);
            throw new MapperParsingException("Failed to extract [" + indexedChars + "] characters of text for [" + name + "] : " + e.getMessage(), e);
        } else {
            logger.debug("Failed to extract [{}] characters of text for [{}]: [{}]", indexedChars, name, e.getMessage());
            logger.trace("exception caught", e);
        }
        return null;
    }
    context = context.createExternalValueContext(parsedContent);
    contentMapper.parse(context);
    if (langDetect) {
        try {
            if (language != null) {
                metadata.add(Metadata.CONTENT_LANGUAGE, language);
            } else {
                LanguageIdentifier identifier = new LanguageIdentifier(parsedContent);
                language = identifier.getLanguage();
            }
            context = context.createExternalValueContext(language);
            languageMapper.parse(context);
        } catch (Throwable t) {
            logger.debug("Cannot detect language: [{}]", t.getMessage());
        }
    }
    if (name != null) {
        try {
            context = context.createExternalValueContext(name);
            nameMapper.parse(context);
        } catch (MapperParsingException e) {
            if (!ignoreErrors)
                throw e;
            if (logger.isDebugEnabled())
                logger.debug("Ignoring MapperParsingException catch while parsing name: [{}]", e.getMessage());
        }
    }
    if (metadata.get(Metadata.DATE) != null) {
        try {
            context = context.createExternalValueContext(metadata.get(Metadata.DATE));
            dateMapper.parse(context);
        } catch (MapperParsingException e) {
            if (!ignoreErrors)
                throw e;
            if (logger.isDebugEnabled())
                logger.debug("Ignoring MapperParsingException catch while parsing date: [{}]: [{}]", e.getMessage(), context.externalValue());
        }
    }
    if (metadata.get(Metadata.TITLE) != null) {
        try {
            context = context.createExternalValueContext(metadata.get(Metadata.TITLE));
            titleMapper.parse(context);
        } catch (MapperParsingException e) {
            if (!ignoreErrors)
                throw e;
            if (logger.isDebugEnabled())
                logger.debug("Ignoring MapperParsingException catch while parsing title: [{}]: [{}]", e.getMessage(), context.externalValue());
        }
    }
    if (metadata.get(Metadata.AUTHOR) != null) {
        try {
            context = context.createExternalValueContext(metadata.get(Metadata.AUTHOR));
            authorMapper.parse(context);
        } catch (MapperParsingException e) {
            if (!ignoreErrors)
                throw e;
            if (logger.isDebugEnabled())
                logger.debug("Ignoring MapperParsingException catch while parsing author: [{}]: [{}]", e.getMessage(), context.externalValue());
        }
    }
    if (metadata.get(Metadata.KEYWORDS) != null) {
        try {
            context = context.createExternalValueContext(metadata.get(Metadata.KEYWORDS));
            keywordsMapper.parse(context);
        } catch (MapperParsingException e) {
            if (!ignoreErrors)
                throw e;
            if (logger.isDebugEnabled())
                logger.debug("Ignoring MapperParsingException catch while parsing keywords: [{}]: [{}]", e.getMessage(), context.externalValue());
        }
    }
    if (contentType == null) {
        contentType = metadata.get(Metadata.CONTENT_TYPE);
    }
    if (contentType != null) {
        try {
            context = context.createExternalValueContext(contentType);
            contentTypeMapper.parse(context);
        } catch (MapperParsingException e) {
            if (!ignoreErrors)
                throw e;
            if (logger.isDebugEnabled())
                logger.debug("Ignoring MapperParsingException catch while parsing content_type: [{}]: [{}]", e.getMessage(), context.externalValue());
        }
    }
    int length = content.length;
    // If we have CONTENT_LENGTH from Tika we use it
    if (metadata.get(Metadata.CONTENT_LENGTH) != null) {
        length = Integer.parseInt(metadata.get(Metadata.CONTENT_LENGTH));
    }
    try {
        context = context.createExternalValueContext(length);
        contentLengthMapper.parse(context);
    } catch (MapperParsingException e) {
        if (!ignoreErrors)
            throw e;
        if (logger.isDebugEnabled())
            logger.debug("Ignoring MapperParsingException catch while parsing content_length: [{}]: [{}]", e.getMessage(), context.externalValue());
    }
    return null;
}
Example 54
Project: nuxeo-master  File: NxqlQueryConverter.java View source code
private static GeoPoint parseGeoPointString(String value) {
    try {
        XContentBuilder content = JsonXContent.contentBuilder();
        content.value(value);
        XContentParser parser = JsonXContent.jsonXContent.createParser(content.bytes());
        parser.nextToken();
        return GeoUtils.parseGeoPoint(parser);
    } catch (IOException e) {
        throw new IllegalArgumentException("Invalid value for geopoint: " + e.getMessage());
    }
}
Example 55
Project: Raigad-master  File: ElasticsearchUtil.java View source code
private static Map<String, Object> jsonToMap(String jsonString) throws IOException {
    try (XContentParser parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, jsonString)) {
        return parser.mapOrdered();
    }
}
Example 56
Project: elasticsearch-sql-master  File: Maker.java View source code
private ShapeBuilder getShapeBuilderFromJson(String json) throws IOException {
    XContentParser parser = null;
    parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, json);
    parser.nextToken();
    return ShapeBuilder.parse(parser);
}