/* Copyright (c) 2012 LinkedIn Corp. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.linkedin.d2.balancer.servers; import com.linkedin.common.callback.Callback; import com.linkedin.common.util.None; import com.linkedin.d2.balancer.LoadBalancerServer; import com.linkedin.d2.balancer.properties.PartitionData; import com.linkedin.d2.balancer.properties.UriProperties; import com.linkedin.d2.discovery.event.PropertyEventThread.PropertyEventShutdownCallback; import com.linkedin.d2.discovery.stores.zk.ZooKeeperEphemeralStore; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.net.URI; import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import static com.linkedin.d2.discovery.util.LogUtil.info; import static com.linkedin.d2.discovery.util.LogUtil.warn; public class ZooKeeperServer implements LoadBalancerServer { private static final Logger _log = LoggerFactory.getLogger(ZooKeeperServer.class); private volatile ZooKeeperEphemeralStore<UriProperties> _store; public ZooKeeperServer() { } public ZooKeeperServer(ZooKeeperEphemeralStore<UriProperties> store) { _store = store; } @Override public void start(Callback<None> callback) { _store.start(callback); } @Override public void shutdown(final Callback<None> callback) { _store.shutdown(new PropertyEventShutdownCallback() { @Override public void done() { callback.onSuccess(None.none()); } }); } @Override public void markUp(final String clusterName, final URI uri, final Map<Integer, PartitionData> partitionDataMap, final Callback<None> callback) { markUp(clusterName, uri, partitionDataMap, Collections.<String, Object>emptyMap(), callback); } @Override public void markUp(final String clusterName, final URI uri, final Map<Integer, PartitionData> partitionDataMap, final Map<String, Object> uriSpecificProperties, final Callback<None> callback) { final Callback<None> doPutCallback = new Callback<None>() { @Override public void onSuccess(None none) { Map<URI, Map<Integer, PartitionData>> partitionDesc = new HashMap<URI, Map<Integer, PartitionData>>(); partitionDesc.put(uri, partitionDataMap); Map<URI, Map<String, Object>> myUriSpecificProperties; if (uriSpecificProperties != null && !uriSpecificProperties.isEmpty()) { myUriSpecificProperties = new HashMap<URI, Map<String, Object>>(); myUriSpecificProperties.put(uri, uriSpecificProperties); } else { myUriSpecificProperties = Collections.emptyMap(); } if (_log.isInfoEnabled()) { StringBuilder sb = new StringBuilder(); sb.append(_store); sb.append(" marked up for cluster: "); sb.append(clusterName); sb.append(", uri: "); sb.append(uri); sb.append(", announcing [partitionId: weight]s: {"); for (final int partitionId : partitionDataMap.keySet()) { sb.append("["); sb.append(partitionId); sb.append(" : "); sb.append(partitionDataMap.get(partitionId)); sb.append("]"); } sb.append("}"); info(_log, sb); } _store.put(clusterName, new UriProperties(clusterName, partitionDesc, myUriSpecificProperties), callback); } @Override public void onError(Throwable e) { callback.onError(e); } }; Callback<UriProperties> getCallback = new Callback<UriProperties>() { @Override public void onSuccess(UriProperties uris) { if (uris != null && uris.Uris().contains(uri)) { warn(_log, "markUp called on a uri that already exists in cluster ", clusterName, ": ", uri); // mark down before marking up with the new weight markDown(clusterName, uri, doPutCallback); } else { doPutCallback.onSuccess(None.none()); } } @Override public void onError(Throwable e) { callback.onError(e); } }; _store.get(clusterName, getCallback); } @Override public void markDown(final String clusterName, final URI uri, final Callback<None> callback) { Callback<UriProperties> getCallback = new Callback<UriProperties>() { @Override public void onSuccess(UriProperties uris) { if (uris == null) { warn(_log, "markDown called on a cluster that doesn't exist in zk: ", clusterName); callback.onSuccess(None.none()); } else if (!uris.Uris().contains(uri)) { warn(_log, "markDown called on a uri that doesn't exist in cluster ", clusterName, ": ", uri); callback.onSuccess(None.none()); } else { warn(_log, _store, " marked down for cluster ", clusterName, "with uri: ", uri); Map<URI, Map<Integer, PartitionData>> partitionData = new HashMap<URI, Map<Integer, PartitionData>>(2); partitionData.put(uri, Collections.<Integer, PartitionData>emptyMap()); _store.removePartial(clusterName, new UriProperties(clusterName, partitionData), callback); } } @Override public void onError(Throwable e) { callback.onError(e); } }; _store.get(clusterName, getCallback); } public void setStore(ZooKeeperEphemeralStore<UriProperties> store) { _store = store; info(_log, "store set to new store: ", _store); } public void shutdown() { info(_log, "shutting down zk server"); final CountDownLatch latch = new CountDownLatch(1); _store.shutdown(new PropertyEventShutdownCallback() { @Override public void done() { latch.countDown(); } }); try { if (!latch.await(5, TimeUnit.SECONDS)) { warn(_log, "unable to shut down propertly"); } else { info(_log, "shutting down complete"); } } catch (InterruptedException e) { warn(_log, "unable to shut down propertly.. got interrupt exception while waiting"); } } }