闽公网安备 35020302035485号
<aop:aspectj-autoproxy/> <bean id="hystrixAspect" class="com.netflix.hystrix.contrib.javanica.aop.aspectj.HystrixCommandAspect" />collapser
public class HystrixCollapserSample {
@HystrixCollapser(batchMethod = "batch")
public Future<Boolean> single(String input) {
return null; // single方法不会被执行到
}
public List<Boolean> batch(List<String> inputs) {
return inputs.stream().map(it -> Boolean.TRUE).collect(Collectors.toList());
}
}
源码实现@HystrixCollapser(
batchMethod = "batch",
collapserKey = "single",
scope = com.netflix.hystrix.HystrixCollapser.Scope.GLOBAL,
collapserProperties = {
@HystrixProperty(name = "maxRequestsInBatch", value = "100"),
@HystrixProperty(name = "timerDelayInMilliseconds", value = "1000"),
@HystrixProperty(name = "requestCache.enabled", value = "true")
})
public class BatchCollapser<E> implements InitializingBean {
privatestaticfinal Logger logger = LoggerFactory.getLogger(BatchCollapser.class);
privatestaticvolatile Map<Class, BatchCollapser> instance = Maps.newConcurrentMap();
privatestaticfinal ScheduledExecutorService SCHEDULE_EXECUTOR = Executors.newScheduledThreadPool(1);
privatevolatile LinkedBlockingDeque<E> batchContainer = new LinkedBlockingDeque<>();
private Handler<List<E>, Boolean> cleaner;
privatelong interval;
privateint threshHold;
private BatchCollapser(Handler<List<E>, Boolean> cleaner, int threshHold, long interval) {
this.cleaner = cleaner;
this.threshHold = threshHold;
this.interval = interval;
}
@Override
public void afterPropertiesSet() throws Exception {
SCHEDULE_EXECUTOR.scheduleAtFixedRate(() -> {
try {
this.clean();
} catch (Exception e) {
logger.error("clean container exception", e);
}
}, 0, interval, TimeUnit.MILLISECONDS);
}
public void submit(E event) {
batchContainer.add(event);
if (batchContainer.size() >= threshHold) {
clean();
}
}
private void clean() {
List<E> transferList = Lists.newArrayListWithExpectedSize(threshHold);
batchContainer.drainTo(transferList, 100);
if (CollectionUtils.isEmpty(transferList)) {
return;
}
try {
cleaner.handle(transferList);
} catch (Exception e) {
logger.error("batch execute error, transferList:{}", transferList, e);
}
}
publicstatic <E> BatchCollapser getInstance(Handler<List<E>, Boolean> cleaner, int threshHold, long interval) {
Class jobClass = cleaner.getClass();
if (instance.get(jobClass) == null) {
synchronized (BatchCollapser.class) {
if (instance.get(jobClass) == null) {
instance.put(jobClass, new BatchCollapser<>(cleaner, threshHold, interval));
}
}
}
return instance.get(jobClass);
}
}
以下代码内需要注意的点:由于合并器的全局性需求,需要将合并器实现为一个单例,另外为了提升它的通用性,内部使用使用 concurrentHashMap 和 double check 实现了一个简单的单例工厂。为了区分不同用途的合并器,工厂需要传入一个实现了 Handler 的实例,通过实例的 class 来对请求进行分组存储。由于 java.util.Timer 的阻塞特性,一个 Timer 线程在阻塞时不会启动另一个同样的 Timer 线程,所以使用 ScheduledExecutorService 定时启动 Timer 线程。
if (ConcurrentHashMultiset.isEmpty()) {
return;
}
List<Request> transferList = Lists.newArrayList();
ConcurrentHashMultiset.elementSet().forEach(request -> {
int count = ConcurrentHashMultiset.count(request);
if (count <= 0) {
return;
}
transferList.add(count == 1 ? request : new Request(request.getIncrement() * count));
ConcurrentHashMultiset.remove(request, count);
});
小结
最后总结一下各个技术适用的场景:ConcurrentHashMultiset:请求重复率很高的统计类场景;