需求
最近做公司的報警系統,需要做釘釘推送報警信息,但是釘釘有限流措施,一分鐘內發多了會導致"send too fast"異常,雖然我們可以通過限流工具來拒絕多餘的信息,但是我們希望信息不要漏掉.如果推送時間接受可以晚一點的話,我們可以通過延時隊列解決。
JDK裏的延時隊列
其實jdk就有現成的延時隊列 DelayQueue。裏面存放的元素必須要全部實現 Delayed接口,Delayed接口只有一個方法getDelay,用於自定義計算剩餘延遲時間,如果take的時候第一個隊列元素沒有到到期時間(getDelay>0),那就可能阻塞等待。DelayQueue的隊列實現用的是PriorityQueue,PriorityQueue使用Comparable或者Comparator來進行排序。下面是DelayQueue部分重要組成
public class DelayQueue<E extends Delayed> extends AbstractQueue<E>
implements BlockingQueue<E> {
private final transient ReentrantLock lock = new ReentrantLock();
private final PriorityQueue<E> q = new PriorityQueue<E>();
public boolean offer(E e) {
final ReentrantLock lock = this.lock;
lock.lock();
try {
q.offer(e);
if (q.peek() == e) {
leader = null;
available.signal();
}
return true;
} finally {
lock.unlock();
}
}
public E take() throws InterruptedException {
final ReentrantLock lock = this.lock;
lock.lockInterruptibly();
try {
for (;;) {
E first = q.peek();
if (first == null)
available.await();
else {
long delay = first.getDelay(NANOSECONDS);
if (delay <= 0)
return q.poll();
first = null; // don't retain ref while waiting
if (leader != null)
available.await();
else {
Thread thisThread = Thread.currentThread();
leader = thisThread;
try {
available.awaitNanos(delay);
} finally {
if (leader == thisThread)
leader = null;
}
}
}
}
} finally {
if (leader == null && q.peek() != null)
available.signal();
lock.unlock();
}
}
}
符合實際需求的實現
上面的jdk實現不能直接套用,因爲還要考慮到 按業務主鍵(key)分類,單週期內最大處理數量,隊列最大數量 等因素。實現了一個符合自己實際需求的延遲隊列,完整代碼如下:
package cn.xxywithpq.delay;
import lombok.extern.slf4j.Slf4j;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Consumer;
@Slf4j
public class LimitUtil<E> {
/**
* 隊列最大個數
*/
final int maxSize;
/**
* 每分鐘最大處理數
*/
final int maxSizePerMinutes;
private ScheduledFuture<?> scheduledFuture;
private ScheduledExecutorService executorService;
ReentrantLock putLock = new ReentrantLock();
ReentrantLock takeLock = new ReentrantLock();
Condition notEmpty = takeLock.newCondition();
private ExecutorService service = new ThreadPoolExecutor(1, 1, 0L, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), new ThreadFactory() {
protected final AtomicInteger threadNumber = new AtomicInteger(1);
@Override
public Thread newThread(Runnable r) {
Thread t = new Thread(r, "pool-limit-util-take-thread-" + toString() + "-" + this.threadNumber.getAndIncrement());
t.setDaemon(true);
return t;
}
});
private volatile transient Node<E> head;
private volatile transient Node<E> tail;
/**
* 鏈表個數
*/
private volatile transient AtomicInteger count = new AtomicInteger(0);
/**
* 每分鐘處理個數限制
*/
private volatile transient AtomicInteger limitCount = new AtomicInteger(0);
/**
* @param seconds 週期時間
* @param maxHandleNum 隊列最大個數
* @param maxQueueSize 每週期時間內最大處理數
*/
public LimitUtil(int seconds, int maxHandleNum, int maxQueueSize, Consumer<E> consumer) {
synchronized (this) {
head = tail = new Node(null);
maxSizePerMinutes = maxHandleNum;
maxSize = maxQueueSize;
executorService = new ScheduledThreadPoolExecutor(1, new ThreadFactory() {
protected final AtomicInteger threadNumber = new AtomicInteger(1);
@Override
public Thread newThread(Runnable r) {
Thread t = new Thread(r, "pool-limit-util-timekeeper-thread-" + toString() + "-" + this.threadNumber.getAndIncrement());
t.setDaemon(true);
return t;
}
});
scheduledFuture = executorService.scheduleAtFixedRate(() -> limitCount.set(0), seconds, seconds, TimeUnit.SECONDS);
for (int i = 0; i < 1; i++) {
service.submit(() -> {
while (true) {
try {
E take = take();
log.info("LimitUtil Thread: {} ;result {}", Thread.currentThread().getName(), take);
consumer.accept(take);
} catch (InterruptedException e) {
log.warn("LimitUtil stop take");
return;
} catch (Exception e) {
log.error("LimitUtil error {}");
}
}
});
}
}
}
public void put(E e) {
final AtomicInteger count = this.count;
final ReentrantLock putLock = this.putLock;
putLock.lock();
int size;
try {
// 隊列已滿,不再加入等待隊列
while (count.get() == maxSize) {
log.warn("LimitUtil funnelRate full {}", e);
return;
}
enqueue(e);
size = count.incrementAndGet();
log.info("LimitUtil Thread {} add {} size {}", Thread.currentThread().getId(), e, size);
if ((size > 0 && limitCount.get() < maxSizePerMinutes)) {
signalNotEmpty();
}
} finally {
putLock.unlock();
}
}
public E take() throws InterruptedException {
final AtomicInteger count = this.count;
final ReentrantLock takeLock = this.takeLock;
takeLock.lock();
int size;
try {
while (limitCount.get() >= maxSizePerMinutes) {
try {
long delay = scheduledFuture.getDelay(TimeUnit.NANOSECONDS);
if (delay > 0) {
notEmpty.awaitNanos(delay);
}
} catch (InterruptedException e) {
throw e;
}
}
while (count.get() == 0) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException();
}
notEmpty.await();
}
E result = dequeue();
size = count.decrementAndGet();
if (size > 0) {
notEmpty.signal();
}
limitCount.incrementAndGet();
return result;
} catch (InterruptedException e) {
throw e;
} finally {
takeLock.unlock();
}
}
private void signalNotEmpty() {
takeLock.lock();
this.notEmpty.signal();
takeLock.unlock();
}
// private void signalNotFull() {
// putLock.lock();
// this.notFull.signal();
// putLock.unlock();
// }
private void enqueue(E e) {
Node<E> node = new Node(e);
tail.next = node;
tail = tail.next;
}
private E dequeue() {
Node<E> h = head;
Node<E> first = h.next;
// help gc
h.next = h;
head = first;
E x = first.item;
first.item = null;
return x;
}
private class Node<E> {
volatile E item;
volatile Node<E> next;
Node(E x) {
item = x;
}
}
// @Override
// protected void finalize() {
// log.info("finalize start");
// this.close();
// }
public void close() {
service.shutdownNow();
executorService.shutdownNow();
service = null;
scheduledFuture = null;
executorService = null;
head = tail = null;
}
}
其實出入隊的邏輯直接用的LinkedBlockingQueue的源碼,所以邏輯上不會出什麼問題,而且你可以在源碼上加上自己需要的東西。
延時如何實現
在這個隊列裏,延時的實現依靠一個定時器ScheduledExecutorService,針對釘釘機器人通知,假如我們希望一分鐘只發10次。 那我們就可以設一個變量limitCount記錄一個週期內的消費次數,ScheduledExecutorService每60秒置零一次limitCount。如果單個週期超過 最大處理量,就通過ScheduledFuture的getDelay方法獲取下次執行的剩餘時間用於take線程休眠。休眠時間到了,就開始下一個週期消費。
private volatile transient AtomicInteger limitCount = new AtomicInteger(0);
private ScheduledFuture<?> scheduledFuture = executorService.scheduleAtFixedRate(() -> limitCount.set(0), seconds, seconds, TimeUnit.SECONDS);
public E take() throws InterruptedException {
final AtomicInteger count = this.count;
final ReentrantLock takeLock = this.takeLock;
takeLock.lock();
int size;
try {
// 當前週期內消費次數大於最大處理數,獲取下次執行時間,這個時間並用於當前take線程休眠。
while (limitCount.get() >= maxSizePerMinutes) {
try {
long delay = scheduledFuture.getDelay(TimeUnit.NANOSECONDS);
if (delay > 0) {
notEmpty.awaitNanos(delay);
}
} catch (InterruptedException e) {
throw e;
}
}
while (count.get() == 0) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException();
}
notEmpty.await();
}
E result = dequeue();
size = count.decrementAndGet();
if (size > 0) {
notEmpty.signal();
}
limitCount.incrementAndGet();
return result;
} catch (InterruptedException e) {
throw e;
} finally {
takeLock.unlock();
}
}
根據業務key進行分類
在我們的業務中,釘釘一個機器人就有一個url,每個url應該獨立有一套配置,那我們的LimitUtil實例應該不止一個,怎麼管理,我們可以想到map,但是系統的內存資源是寶貴的,對於用很少的,或者後期乾脆不用的,我們應該及時釋放資源。在這個方案中,我們用實現了LRU淘汰算法的LinkedHashMap作爲我們的LimitUtil容器,你繼承LinkedHashMap後,可以選擇覆寫removeEldestEntry方法,它會自動把最老的元素(最早以前使用過的)作爲方法的參數,這個方法默認返回false,如果你返回true,這個最老的元素就會被map自動刪除。大衆化的實現是根據map的容量是否超過最大值來決定是否刪除。
在LinkedHashMap的構造方法中,有一個accessOrder參數,我們默認設爲true,表示被訪問的元素會自動移到head位置。
class LRU<K, V> extends LinkedHashMap<K, V> {
// 保存緩存的容量
private int capacity;
public LRU(int capacity, float loadFactor) {
super(capacity, loadFactor, true);
this.capacity = capacity;
}
/**
* 重寫removeEldestEntry()方法設置何時移除舊元素
*
* @param eldest
* @return
*/
@Override
protected boolean removeEldestEntry(Map.Entry<K, V> eldest) {
// 當元素個數大於了緩存的容量, 就移除元素
if (size() > this.capacity) {
// LimitUtil value = (LimitUtil) eldest.getValue();
// value.close();
}
return size() > this.capacity;
}
}
有了容器之後,我們簡單寫個工廠根據getInstance方法通過key取value,這樣就便於管理,下面是工廠完整代碼(跟spring集成了):
@Slf4j
@Component
public class DingDingLimitUtilFactory<E> implements InitializingBean {
@Autowired
CustomProperties customProperties;
LRU<String, LimitUtil> map;
public synchronized LimitUtil getInstance(String key, Consumer<E> consumer) {
LimitUtil limitUtil;
if (null != (limitUtil = map.get(key))) {
return limitUtil;
} else {
CustomProperties.DingDingLimit dingDingLimit = customProperties.getDingDingLimit();
limitUtil = new LimitUtil(dingDingLimit.getSeconds(), dingDingLimit.getMaxHandleNum(), dingDingLimit.getMaxQueueSize(), consumer);
map.put(key, limitUtil);
return limitUtil;
}
}
@Override
public void afterPropertiesSet() {
map = new LRU<>(customProperties.getDingDingLimit().getLruCapacity(), 0.75f);
}
class LRU<K, V> extends LinkedHashMap<K, V> {
// 保存緩存的容量
private int capacity;
public LRU(int capacity, float loadFactor) {
super(capacity, loadFactor, true);
this.capacity = capacity;
}
/**
* 重寫removeEldestEntry()方法設置何時移除舊元素
*
* @param eldest
* @return
*/
@Override
protected boolean removeEldestEntry(Map.Entry<K, V> eldest) {
// 當元素個數大於了緩存的容量, 就移除元素
if (size() > this.capacity) {
LimitUtil value = (LimitUtil) eldest.getValue();
value.close();
}
return size() > this.capacity;
}
}
}
在getInstance方法中,有一個consumer參數,決定了LimitUtil 的take方法取到元素後 怎麼處理的邏輯,這是lamdba語法,這就讓這個工具更加通用,不同的業務也能寫出不同的處理方式。
不足
上線之後,工具可以正常用。但這個工具類完全是單機的產物,不適用於分佈式環境,就當學習一些map,blockqueue的源碼。而且釘釘這種業務也沒有一定要用上中間件,可以滿足業務需求。
前面雖然有LinkedHashMap作爲lru淘汰來節省資源,但是這種代碼會導致內存泄漏,具體還看下一篇文章。
完整代碼上傳到 simplify-lock-spring-boot-starter,LimitUtilTest是測試類。