/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jctools_voltpatches.queues.atomic; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReferenceArray; import org.jctools_voltpatches.queues.QueueProgressIndicators; /** * A Multi-Producer-Single-Consumer queue based on a {@link AtomicReferenceArrayQueue}. This implies that * any thread may call the offer method, but only a single thread may call poll/peek for correctness to * maintained. <br> * This implementation follows patterns documented on the package level for False Sharing protection.<br> * This implementation is using the <a href="http://sourceforge.net/projects/mc-fastflow/">Fast Flow</a> * method for polling from the queue (with minor change to correctly publish the index) and an extension of * the Leslie Lamport concurrent queue algorithm (originated by Martin Thompson) on the producer side.<br> * * @author nitsanw * * @param <E> */ public final class MpscAtomicArrayQueue<E> extends AtomicReferenceArrayQueue<E> implements QueueProgressIndicators { private final AtomicLong consumerIndex; private final AtomicLong producerIndex; private volatile long headCache; public MpscAtomicArrayQueue(int capacity) { super(capacity); this.consumerIndex = new AtomicLong(); this.producerIndex = new AtomicLong(); } /** * {@inheritDoc} <br> * * IMPLEMENTATION NOTES:<br> * Lock free offer using a single CAS. As class name suggests access is permitted to many threads * concurrently. * * @see java.util.Queue#offer(java.lang.Object) * @see org.jctools_voltpatches.queues.MessagePassingQueue#offer(Object) */ @Override public boolean offer(final E e) { if (null == e) { throw new NullPointerException(); } // use a cached view on consumer index (potentially updated in loop) final int mask = this.mask; final long capacity = mask + 1; long consumerIndexCache = lvConsumerIndexCache(); // LoadLoad long currentProducerIndex; do { currentProducerIndex = lvProducerIndex(); // LoadLoad final long wrapPoint = currentProducerIndex - capacity; if (consumerIndexCache <= wrapPoint) { final long currHead = lvConsumerIndex(); // LoadLoad if (currHead <= wrapPoint) { return false; // FULL :( } else { // update shared cached value of the consumerIndex svConsumerIndexCache(currHead); // StoreLoad // update on stack copy, we might need this value again if we lose the CAS. consumerIndexCache = currHead; } } } while (!casProducerIndex(currentProducerIndex, currentProducerIndex + 1)); /* * NOTE: the new producer index value is made visible BEFORE the element in the array. If we relied on * the index visibility to poll() we would need to handle the case where the element is not visible. */ // Won CAS, move on to storing final int offset = calcElementOffset(currentProducerIndex, mask); soElement(offset, e); // StoreStore return true; // AWESOME :) } /** * A wait free alternative to offer which fails on CAS failure. * * @param e new element, not null * @return 1 if next element cannot be filled, -1 if CAS failed, 0 if successful */ public final int weakOffer(final E e) { if (null == e) { throw new NullPointerException("Null is not a valid element"); } final int mask = this.mask; final long capacity = mask + 1; final long currentTail = lvProducerIndex(); // LoadLoad final long consumerIndexCache = lvConsumerIndexCache(); // LoadLoad final long wrapPoint = currentTail - capacity; if (consumerIndexCache <= wrapPoint) { long currHead = lvConsumerIndex(); // LoadLoad if (currHead <= wrapPoint) { return 1; // FULL :( } else { svConsumerIndexCache(currHead); // StoreLoad } } // look Ma, no loop! if (!casProducerIndex(currentTail, currentTail + 1)) { return -1; // CAS FAIL :( } // Won CAS, move on to storing final int offset = calcElementOffset(currentTail, mask); soElement(offset, e); return 0; // AWESOME :) } /** * {@inheritDoc} * <p> * IMPLEMENTATION NOTES:<br> * Lock free poll using ordered loads/stores. As class name suggests access is limited to a single thread. * * @see java.util.Queue#poll() * @see org.jctools_voltpatches.queues.MessagePassingQueue#poll() */ @Override public E poll() { final long consumerIndex = lvConsumerIndex(); // LoadLoad final int offset = calcElementOffset(consumerIndex); // Copy field to avoid re-reading after volatile load final AtomicReferenceArray<E> buffer = this.buffer; // If we can't see the next available element we can't poll E e = lvElement(buffer, offset); // LoadLoad if (null == e) { /* * NOTE: Queue may not actually be empty in the case of a producer (P1) being interrupted after * winning the CAS on offer but before storing the element in the queue. Other producers may go on * to fill up the queue after this element. */ if (consumerIndex != lvProducerIndex()) { do { e = lvElement(buffer, offset); } while (e == null); } else { return null; } } spElement(buffer, offset, null); soConsumerIndex(consumerIndex + 1); // StoreStore return e; } /** * {@inheritDoc} * <p> * IMPLEMENTATION NOTES:<br> * Lock free peek using ordered loads. As class name suggests access is limited to a single thread. * * @see java.util.Queue#poll() * @see org.jctools_voltpatches.queues.MessagePassingQueue#poll() */ @Override public E peek() { // Copy field to avoid re-reading after volatile load final AtomicReferenceArray<E> buffer = this.buffer; final long consumerIndex = lvConsumerIndex(); // LoadLoad final int offset = calcElementOffset(consumerIndex); E e = lvElement(buffer, offset); if (null == e) { /* * NOTE: Queue may not actually be empty in the case of a producer (P1) being interrupted after * winning the CAS on offer but before storing the element in the queue. Other producers may go on * to fill up the queue after this element. */ if (consumerIndex != lvProducerIndex()) { do { e = lvElement(buffer, offset); } while (e == null); } else { return null; } } return e; } /** * {@inheritDoc} * <p> * */ @Override public int size() { /* * It is possible for a thread to be interrupted or reschedule between the read of the producer and * consumer indices, therefore protection is required to ensure size is within valid range. In the * event of concurrent polls/offers to this method the size is OVER estimated as we read consumer * index BEFORE the producer index. */ long after = lvConsumerIndex(); while (true) { final long before = after; final long currentProducerIndex = lvProducerIndex(); after = lvConsumerIndex(); if (before == after) { return (int) (currentProducerIndex - after); } } } @Override public boolean isEmpty() { // Order matters! // Loading consumer before producer allows for producer increments after consumer index is read. // This ensures the correctness of this method at least for the consumer thread. Other threads POV is // not really // something we can fix here. return (lvConsumerIndex() == lvProducerIndex()); } @Override public long currentProducerIndex() { return lvProducerIndex(); } @Override public long currentConsumerIndex() { return lvConsumerIndex(); } private long lvConsumerIndex() { return consumerIndex.get(); } private long lvProducerIndex() { return producerIndex.get(); } protected final long lvConsumerIndexCache() { return headCache; } protected final void svConsumerIndexCache(long v) { headCache = v; } protected final boolean casProducerIndex(long expect, long newValue) { return producerIndex.compareAndSet(expect, newValue); } protected void soConsumerIndex(long l) { consumerIndex.lazySet(l); } }