/*
* Copyright (c) 2008-2017, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.cache.impl;
import com.hazelcast.config.CacheConfig;
import com.hazelcast.core.ICompletableFuture;
import com.hazelcast.map.impl.MapEntries;
import com.hazelcast.nio.serialization.Data;
import com.hazelcast.spi.InternalCompletableFuture;
import com.hazelcast.spi.NodeEngine;
import com.hazelcast.spi.Operation;
import com.hazelcast.spi.OperationFactory;
import com.hazelcast.spi.OperationService;
import com.hazelcast.spi.partition.IPartitionService;
import javax.cache.CacheException;
import javax.cache.expiry.ExpiryPolicy;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Future;
import static com.hazelcast.cache.impl.CacheProxyUtil.validateNotNull;
import static com.hazelcast.util.ExceptionUtil.rethrow;
import static com.hazelcast.util.ExceptionUtil.rethrowAllowedTypeFirst;
/**
* <p>Hazelcast provides extension functionality to default spec interface {@link javax.cache.Cache}.
* {@link com.hazelcast.cache.ICache} is the designated interface.</p>
* <p>AbstractCacheProxyExtension provides implementation of various {@link com.hazelcast.cache.ICache} methods.</p>
* <p>Note: this partial implementation is used by server or embedded mode cache.</p>
*
* @param <K> the type of key.
* @param <V> the type of value.
* @see com.hazelcast.cache.impl.CacheProxy
* @see com.hazelcast.cache.ICache
*/
abstract class AbstractCacheProxy<K, V>
extends AbstractInternalCacheProxy<K, V> {
AbstractCacheProxy(CacheConfig<K, V> cacheConfig, NodeEngine nodeEngine, ICacheService cacheService) {
super(cacheConfig, nodeEngine, cacheService);
}
@Override
public InternalCompletableFuture<V> getAsync(K key) {
return getAsync(key, null);
}
@Override
public InternalCompletableFuture<V> getAsync(K key, ExpiryPolicy expiryPolicy) {
ensureOpen();
validateNotNull(key);
Data keyData = serializationService.toData(key);
Operation op = operationProvider.createGetOperation(keyData, expiryPolicy);
return invoke(op, keyData, false);
}
@Override
public InternalCompletableFuture<Void> putAsync(K key, V value) {
return putAsync(key, value, null);
}
@Override
public InternalCompletableFuture<Void> putAsync(K key, V value, ExpiryPolicy expiryPolicy) {
return putAsyncInternal(key, value, expiryPolicy, false, false);
}
@Override
public InternalCompletableFuture<Boolean> putIfAbsentAsync(K key, V value) {
return putIfAbsentAsyncInternal(key, value, null, false);
}
@Override
public InternalCompletableFuture<Boolean> putIfAbsentAsync(K key, V value, ExpiryPolicy expiryPolicy) {
return putIfAbsentAsyncInternal(key, value, expiryPolicy, false);
}
@Override
public ICompletableFuture<V> getAndPutAsync(K key, V value) {
return getAndPutAsync(key, value, null);
}
@Override
public ICompletableFuture<V> getAndPutAsync(K key, V value, ExpiryPolicy expiryPolicy) {
return putAsyncInternal(key, value, expiryPolicy, true, false);
}
@Override
public InternalCompletableFuture<Boolean> removeAsync(K key) {
return removeAsyncInternal(key, null, false, false, false);
}
@Override
public InternalCompletableFuture<Boolean> removeAsync(K key, V oldValue) {
return removeAsyncInternal(key, oldValue, true, false, false);
}
@Override
public ICompletableFuture<V> getAndRemoveAsync(K key) {
return removeAsyncInternal(key, null, false, true, false);
}
@Override
public ICompletableFuture<Boolean> replaceAsync(K key, V value) {
return replaceAsyncInternal(key, null, value, null, false, false, false);
}
@Override
public ICompletableFuture<Boolean> replaceAsync(K key, V value, ExpiryPolicy expiryPolicy) {
return replaceAsyncInternal(key, null, value, expiryPolicy, false, false, false);
}
@Override
public ICompletableFuture<Boolean> replaceAsync(K key, V oldValue, V newValue) {
return replaceAsyncInternal(key, oldValue, newValue, null, true, false, false);
}
@Override
public ICompletableFuture<Boolean> replaceAsync(K key, V oldValue, V newValue, ExpiryPolicy expiryPolicy) {
return replaceAsyncInternal(key, oldValue, newValue, expiryPolicy, true, false, false);
}
@Override
public ICompletableFuture<V> getAndReplaceAsync(K key, V value) {
return replaceAsyncInternal(key, null, value, null, false, true, false);
}
@Override
public ICompletableFuture<V> getAndReplaceAsync(K key, V value, ExpiryPolicy expiryPolicy) {
return replaceAsyncInternal(key, null, value, expiryPolicy, false, true, false);
}
@Override
public V get(K key, ExpiryPolicy expiryPolicy) {
try {
Future<V> future = getAsync(key, expiryPolicy);
return future.get();
} catch (Throwable e) {
throw rethrowAllowedTypeFirst(e, CacheException.class);
}
}
@Override
public Map<K, V> getAll(Set<? extends K> keys, ExpiryPolicy expiryPolicy) {
ensureOpen();
validateNotNull(keys);
if (keys.isEmpty()) {
return Collections.EMPTY_MAP;
}
Set<Data> ks = new HashSet<Data>(keys.size());
for (K key : keys) {
Data dataKey = serializationService.toData(key);
ks.add(dataKey);
}
Map<K, V> result = new HashMap<K, V>();
Collection<Integer> partitions = getPartitionsForKeys(ks);
try {
OperationFactory factory = operationProvider.createGetAllOperationFactory(ks, expiryPolicy);
OperationService operationService = getNodeEngine().getOperationService();
Map<Integer, Object> responses = operationService.invokeOnPartitions(getServiceName(), factory, partitions);
for (Object response : responses.values()) {
MapEntries mapEntries = serializationService.toObject(response);
mapEntries.putAllToMap(serializationService, result);
}
} catch (Throwable e) {
throw rethrowAllowedTypeFirst(e, CacheException.class);
}
return result;
}
@Override
public void put(K key, V value, ExpiryPolicy expiryPolicy) {
try {
InternalCompletableFuture<Object> future = putAsyncInternal(key, value, expiryPolicy, false, true);
future.get();
} catch (Throwable e) {
throw rethrowAllowedTypeFirst(e, CacheException.class);
}
}
@Override
public V getAndPut(K key, V value, ExpiryPolicy expiryPolicy) {
try {
InternalCompletableFuture<V> future = putAsyncInternal(key, value, expiryPolicy, true, true);
return future.get();
} catch (Throwable e) {
throw rethrowAllowedTypeFirst(e, CacheException.class);
}
}
@Override
public void putAll(Map<? extends K, ? extends V> map, ExpiryPolicy expiryPolicy) {
ensureOpen();
validateNotNull(map);
try {
// first we fill entry set per partition
int partitionCount = partitionService.getPartitionCount();
List<Map.Entry<Data, Data>>[] entriesPerPartition = groupDataToPartitions(map, partitionCount);
// then we invoke the operations and sync on completion of these operations
putToAllPartitionsAndWaitForCompletion(entriesPerPartition, expiryPolicy);
} catch (Exception e) {
throw rethrow(e);
}
}
@SuppressWarnings("unchecked")
private List<Map.Entry<Data, Data>>[] groupDataToPartitions(Map<? extends K, ? extends V> map, int partitionCount) {
List<Map.Entry<Data, Data>>[] entriesPerPartition = new List[partitionCount];
for (Map.Entry<? extends K, ? extends V> entry : map.entrySet()) {
K key = entry.getKey();
V value = entry.getValue();
validateNotNull(key, value);
Data keyData = serializationService.toData(key);
Data valueData = serializationService.toData(value);
int partitionId = partitionService.getPartitionId(keyData);
List<Map.Entry<Data, Data>> entries = entriesPerPartition[partitionId];
if (entries == null) {
entries = new ArrayList<Map.Entry<Data, Data>>();
entriesPerPartition[partitionId] = entries;
}
entries.add(new AbstractMap.SimpleImmutableEntry<Data, Data>(keyData, valueData));
}
return entriesPerPartition;
}
private void putToAllPartitionsAndWaitForCompletion(List<Map.Entry<Data, Data>>[] entriesPerPartition,
ExpiryPolicy expiryPolicy) throws Exception {
List<Future> futures = new ArrayList<Future>(entriesPerPartition.length);
for (int partitionId = 0; partitionId < entriesPerPartition.length; partitionId++) {
List<Map.Entry<Data, Data>> entries = entriesPerPartition[partitionId];
if (entries != null) {
// TODO: if there is a single entry, we could make use of a put operation since that is a bit cheaper
Operation operation = operationProvider.createPutAllOperation(entries, expiryPolicy, partitionId);
Future future = invoke(operation, partitionId, true);
futures.add(future);
}
}
Throwable error = null;
for (Future future : futures) {
try {
future.get();
} catch (Throwable t) {
logger.finest("Error occurred while putting entries as batch!", t);
if (error == null) {
error = t;
}
}
}
if (error != null) {
/*
* There maybe multiple exceptions but we throw only the first one.
* There are some ideas to throw all exceptions to caller but all of them have drawbacks:
* - `Thread::addSuppressed` can be used to add other exceptions to the first one
* but it is available since JDK 7.
* - `Thread::initCause` can be used but this is wrong as semantic
* since the other exceptions are not cause of the first one.
* - We may wrap all exceptions in our custom exception (such as `MultipleCacheException`)
* but in this case caller may wait different exception type and this idea causes problem.
* For example see this TCK test:
* `org.jsr107.tck.integration.CacheWriterTest::shouldWriteThoughUsingPutAll_partialSuccess`
* In this test exception is thrown at `CacheWriter` and caller side expects this exception.
* So as a result, we only throw the first exception and others are suppressed by only logging.
*/
throw rethrow(error);
}
}
@Override
public boolean putIfAbsent(K key, V value, ExpiryPolicy expiryPolicy) {
try {
Future<Boolean> future = putIfAbsentAsyncInternal(key, value, expiryPolicy, true);
return future.get();
} catch (Throwable e) {
throw rethrowAllowedTypeFirst(e, CacheException.class);
}
}
@Override
public boolean replace(K key, V oldValue, V newValue, ExpiryPolicy expiryPolicy) {
try {
Future<Boolean> future = replaceAsyncInternal(key, oldValue, newValue, expiryPolicy, true, false, true);
return future.get();
} catch (Throwable e) {
throw rethrowAllowedTypeFirst(e, CacheException.class);
}
}
@Override
public boolean replace(K key, V value, ExpiryPolicy expiryPolicy) {
try {
Future<Boolean> future = replaceAsyncInternal(key, null, value, expiryPolicy, false, false, true);
return future.get();
} catch (Throwable e) {
throw rethrowAllowedTypeFirst(e, CacheException.class);
}
}
@Override
public V getAndReplace(K key, V value, ExpiryPolicy expiryPolicy) {
try {
Future<V> future = replaceAsyncInternal(key, null, value, expiryPolicy, false, true, true);
return future.get();
} catch (Throwable e) {
throw rethrowAllowedTypeFirst(e, CacheException.class);
}
}
@Override
public int size() {
ensureOpen();
try {
OperationFactory operationFactory = operationProvider.createSizeOperationFactory();
Map<Integer, Object> results = getNodeEngine().getOperationService()
.invokeOnAllPartitions(getServiceName(), operationFactory);
int total = 0;
for (Object result : results.values()) {
//noinspection RedundantCast
total += (Integer) getNodeEngine().toObject(result);
}
return total;
} catch (Throwable t) {
throw rethrowAllowedTypeFirst(t, CacheException.class);
}
}
private Set<Integer> getPartitionsForKeys(Set<Data> keys) {
IPartitionService partitionService = getNodeEngine().getPartitionService();
int partitions = partitionService.getPartitionCount();
int capacity = Math.min(partitions, keys.size());
Set<Integer> partitionIds = new HashSet<Integer>(capacity);
Iterator<Data> iterator = keys.iterator();
while (iterator.hasNext() && partitionIds.size() < partitions) {
Data key = iterator.next();
partitionIds.add(partitionService.getPartitionId(key));
}
return partitionIds;
}
}