/* * Licensed to ElasticSearch and Shay Banon under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. ElasticSearch licenses this * file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.river; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Maps; import org.elasticsearch.ElasticSearchParseException; import org.elasticsearch.ElasticSearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.admin.indices.mapping.delete.DeleteMappingResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.client.IngestClient; import org.elasticsearch.client.ServerIndicesAdminClient; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Injector; import org.elasticsearch.common.inject.Injectors; import org.elasticsearch.common.inject.ModulesBuilder; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.indices.IndexMissingException; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.river.cluster.RiverClusterChangedEvent; import org.elasticsearch.river.cluster.RiverClusterService; import org.elasticsearch.river.cluster.RiverClusterState; import org.elasticsearch.river.cluster.RiverClusterStateListener; import org.elasticsearch.river.routing.RiverRouting; import org.elasticsearch.search.lookup.SourceLookup; import org.elasticsearch.threadpool.ThreadPool; import java.util.Map; import java.util.concurrent.CountDownLatch; /** * */ public class RiversService extends AbstractLifecycleComponent<RiversService> { private final String riverIndexName; private ServerIndicesAdminClient adminClient; private IngestClient ingestClient; private final ThreadPool threadPool; private final ClusterService clusterService; private final RiversTypesRegistry typesRegistry; private final Injector injector; private final Map<RiverName, Injector> riversInjectors = Maps.newHashMap(); private volatile ImmutableMap<RiverName, River> rivers = ImmutableMap.of(); @Inject public RiversService(Settings settings, IngestClient client, ThreadPool threadPool, ClusterService clusterService, RiversTypesRegistry typesRegistry, RiverClusterService riverClusterService, Injector injector) { super(settings); this.riverIndexName = RiverIndexName.Conf.indexName(settings); this.ingestClient = client; this.threadPool = threadPool; this.clusterService = clusterService; this.typesRegistry = typesRegistry; this.injector = injector; riverClusterService.add(new ApplyRivers()); } @Override protected void doStart() throws ElasticSearchException { } @Override protected void doStop() throws ElasticSearchException { ImmutableSet<RiverName> indices = ImmutableSet.copyOf(this.rivers.keySet()); final CountDownLatch latch = new CountDownLatch(indices.size()); for (final RiverName riverName : indices) { threadPool.generic().execute(new Runnable() { @Override public void run() { try { closeRiver(riverName); } catch (Exception e) { logger.warn("failed to delete river on stop [{}]/[{}]", e, riverName.type(), riverName.name()); } finally { latch.countDown(); } } }); } try { latch.await(); } catch (InterruptedException e) { // ignore } } @Override protected void doClose() throws ElasticSearchException { } public synchronized void createRiver(RiverName riverName, Map<String, Object> settings) throws ElasticSearchException { if (riversInjectors.containsKey(riverName)) { logger.warn("ignoring river [{}][{}] creation, already exists", riverName.type(), riverName.name()); return; } logger.debug("creating river [{}][{}]", riverName.type(), riverName.name()); try { ModulesBuilder modules = new ModulesBuilder(); modules.add(new RiverNameModule(riverName)); modules.add(new RiverModule(riverName, settings, this.settings, typesRegistry)); modules.add(new RiversPluginsModule(this.settings, injector.getInstance(PluginsService.class))); Injector indexInjector = modules.createChildInjector(injector); riversInjectors.put(riverName, indexInjector); River river = indexInjector.getInstance(River.class); rivers = MapBuilder.newMapBuilder(rivers).put(riverName, river).immutableMap(); // we need this start so there can be operations done (like creating an index) which can't be // done on create since Guice can't create two concurrent child injectors river.start(); XContentBuilder builder = XContentFactory.jsonBuilder().startObject(); builder.field("ok", true); builder.startObject("node"); builder.field("id", clusterService.localNode().id()); builder.field("name", clusterService.localNode().name()); builder.field("transport_address", clusterService.localNode().address().toString()); builder.endObject(); builder.endObject(); ingestClient.prepareIndex(riverIndexName, riverName.name(), "_status") .setConsistencyLevel(WriteConsistencyLevel.ONE) .setSource(builder).execute().actionGet(); } catch (Exception e) { logger.warn("failed to create river [{}][{}]", e, riverName.type(), riverName.name()); try { XContentBuilder builder = XContentFactory.jsonBuilder().startObject(); builder.field("error", ExceptionsHelper.detailedMessage(e)); builder.startObject("node"); builder.field("id", clusterService.localNode().id()); builder.field("name", clusterService.localNode().name()); builder.field("transport_address", clusterService.localNode().address().toString()); builder.endObject(); ingestClient.prepareIndex(riverIndexName, riverName.name(), "_status") .setConsistencyLevel(WriteConsistencyLevel.ONE) .setSource(builder).execute().actionGet(); } catch (Exception e1) { logger.warn("failed to write failed status for river creation", e); } } } public synchronized void closeRiver(RiverName riverName) throws ElasticSearchException { Injector riverInjector; River river; synchronized (this) { riverInjector = riversInjectors.remove(riverName); if (riverInjector == null) { throw new RiverException(riverName, "missing"); } logger.debug("closing river [{}][{}]", riverName.type(), riverName.name()); Map<RiverName, River> tmpMap = Maps.newHashMap(rivers); river = tmpMap.remove(riverName); rivers = ImmutableMap.copyOf(tmpMap); } river.close(); Injectors.close(injector); } private class ApplyRivers implements RiverClusterStateListener { @Override public void riverClusterChanged(RiverClusterChangedEvent event) { DiscoveryNode localNode = clusterService.localNode(); RiverClusterState state = event.state(); // first, go over and delete ones that either don't exists or are not allocated for (final RiverName riverName : rivers.keySet()) { RiverRouting routing = state.routing().routing(riverName); if (routing == null || !localNode.equals(routing.node())) { // not routed at all, and not allocated here, clean it (we delete the relevant ones before) closeRiver(riverName); // also, double check and delete the river content if it was deleted (_meta does not exists) try { ingestClient.prepareGet(riverIndexName, riverName.name(), "_meta").setListenerThreaded(true).execute(new ActionListener<GetResponse>() { @Override public void onResponse(GetResponse getResponse) { if (!getResponse.exists()) { // verify the river is deleted adminClient.prepareDeleteMapping(riverIndexName).setType(riverName.name()).execute(new ActionListener<DeleteMappingResponse>() { @Override public void onResponse(DeleteMappingResponse deleteMappingResponse) { // all is well... } @Override public void onFailure(Throwable e) { logger.debug("failed to (double) delete river [{}] content", e, riverName.name()); } }); } } @Override public void onFailure(Throwable e) { logger.debug("failed to (double) delete river [{}] content", e, riverName.name()); } }); } catch (IndexMissingException e) { // all is well, the _river index was deleted } catch (Exception e) { logger.warn("unexpected failure when trying to verify river [{}] deleted", e, riverName.name()); } } } for (final RiverRouting routing : state.routing()) { // not allocated if (routing.node() == null) { continue; } // only apply changes to the local node if (!routing.node().equals(localNode)) { continue; } // if its already created, ignore it if (rivers.containsKey(routing.riverName())) { continue; } ingestClient.prepareGet(riverIndexName, routing.riverName().name(), "_meta").setListenerThreaded(true).execute(new ActionListener<GetResponse>() { @Override public void onResponse(GetResponse getResponse) { if (!rivers.containsKey(routing.riverName())) { if (getResponse.exists()) { // only create the river if it exists, otherwise, the indexing meta data has not been visible yet... createRiver(routing.riverName(), sourceAsMap(getResponse)); } } } @Override public void onFailure(Throwable e) { // if its this is a failure that need to be retried, then do it // this might happen if the state of the river index has not been propagated yet to this node, which // should happen pretty fast since we managed to get the _meta in the RiversRouter Throwable failure = ExceptionsHelper.unwrapCause(e); if ((failure instanceof NoShardAvailableActionException) || (failure instanceof ClusterBlockException) || (failure instanceof IndexMissingException)) { logger.debug("failed to get _meta from [{}]/[{}], retrying...", e, routing.riverName().type(), routing.riverName().name()); final ActionListener<GetResponse> listener = this; threadPool.schedule(TimeValue.timeValueSeconds(5), ThreadPool.Names.SAME, new Runnable() { @Override public void run() { ingestClient.prepareGet(riverIndexName, routing.riverName().name(), "_meta").setListenerThreaded(true).execute(listener); } }); } else { logger.warn("failed to get _meta from [{}]/[{}]", e, routing.riverName().type(), routing.riverName().name()); } } }); } } } public Map<String, Object> sourceAsMap(GetResponse getResponse) throws ElasticSearchParseException { BytesReference source = getResponse.getSourceAsBytesRef(); if (source == null) { return null; } return SourceLookup.sourceAsMap(source); } }