当前位置: 首页 > news >正文

分布式ID生成-雪花算法实现无状态

雪花算法这里不再赘述,其缺点是有状态(多副本隔离时,依赖手动配置workIddatacenterId),代码如下:

/*** 雪花算法ID生成器*/
public class SnowflakeIdWorker {/*** 开始时间截 (2017-01-01)*/private static final long twepoch = 1483200000000L;/*** 机器id所占的位数*/private static final long workerIdBits = 5L;/*** 数据标识id所占的位数*/private static final long datacenterIdBits = 5L;/*** 支持的最大机器id,结果是31 (这个移位算法可以很快的计算出几位二进制数所能表示的最大十进制数)*/private static final long maxWorkerId = ~(-1L << workerIdBits);/*** 支持的最大数据标识id,结果是31*/private static final long maxDatacenterId = ~(-1L << datacenterIdBits);/*** 序列在id中占的位数*/private final long sequenceBits = 12L;/*** 机器ID向左移12位*/private final long workerIdShift = sequenceBits;/*** 数据标识id向左移17位(12+5)*/private final long datacenterIdShift = sequenceBits + workerIdBits;/*** 时间截向左移22位(5+5+12)*/private final long timestampLeftShift = sequenceBits + workerIdBits + datacenterIdBits;/*** 生成序列的掩码,这里为4095 (0b111111111111=0xfff=4095)*/private final long sequenceMask = ~(-1L << sequenceBits);/*** 工作机器ID(0~31)*/private long workerId;/*** 数据中心ID(0~31)*/private long datacenterId;/*** 毫秒内序列(0~4095)*/private long sequence = 0L;/*** 上次生成ID的时间截*/private long lastTimestamp = -1L;/*** 构造函数** @param workerId     工作ID (0~31)* @param datacenterId 数据中心ID (0~31)*/public SnowflakeIdWorker(long workerId, long datacenterId) {if (workerId > maxWorkerId || workerId < 0) {throw new IllegalArgumentException(String.format("worker Id can't be greater than %d or less than 0", maxWorkerId));}if (datacenterId > maxDatacenterId || datacenterId < 0) {throw new IllegalArgumentException(String.format("datacenter Id can't be greater than %d or less than 0", maxDatacenterId));}this.workerId = workerId;this.datacenterId = datacenterId;}/*** 获得下一个ID (该方法是线程安全的)** @return SnowflakeId*/public synchronized String getId() {long id = nextId();return id+"";}/*** 获得下一个ID (该方法是线程安全的)** @return SnowflakeId*/public synchronized long nextId() {long timestamp = timeGen();//如果当前时间小于上一次ID生成的时间戳,说明系统时钟回退过这个时候应当抛出异常if (timestamp < lastTimestamp) {throw new RuntimeException(String.format("Clock moved backwards.  Refusing to generate id for %d milliseconds", lastTimestamp - timestamp));}//如果是同一时间生成的,则进行毫秒内序列if (lastTimestamp == timestamp) {sequence = (sequence + 1) & sequenceMask;//毫秒内序列溢出if (sequence == 0) {//阻塞到下一个毫秒,获得新的时间戳timestamp = tilNextMillis(lastTimestamp);}}//时间戳改变,毫秒内序列重置else {sequence = 0L;}//上次生成ID的时间截lastTimestamp = timestamp;//移位并通过或运算拼到一起组成64位的IDreturn ((timestamp - twepoch) << timestampLeftShift)| (datacenterId << datacenterIdShift)| (workerId << workerIdShift)| sequence;}/*** 阻塞到下一个毫秒,直到获得新的时间戳** @param lastTimestamp 上次生成ID的时间截* @return 当前时间戳*/protected long tilNextMillis(long lastTimestamp) {long timestamp = timeGen();while (timestamp <= lastTimestamp) {timestamp = timeGen();}return timestamp;}/*** 返回以毫秒为单位的当前时间** @return 当前时间(毫秒)*/protected long timeGen() {return System.currentTimeMillis();}/*** 测试*/public static void main(String[] args) throws ParseException {SnowflakeIdWorker idWorker = new SnowflakeIdWorker(1, 1);ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(10, Integer.MAX_VALUE, 5, TimeUnit.SECONDS, new SynchronousQueue<>());for (int i = 0; i < 10000; i++) {threadPoolExecutor.execute(() -> {long start = System.currentTimeMillis();long id = idWorker.nextId();System.out.println("id:"+id);});}threadPoolExecutor.shutdown();}
}

我们可以利用redis分布式锁,在服务启动时获取,实现无状态:
在这里插入图片描述

1、利用redis分布式锁获取,解决多副本冲突。


import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;import java.net.InetAddress;
import java.net.UnknownHostException;@Configuration
public class SnowBeanConfig {private static final Logger logger = LoggerFactory.getLogger(SnowBeanConfig.class);private final static String OK = "OK";private final static int DATA_SIZE = 31;@Value("${cus.snowflake.expire:40}")private int snowflakeExpireTime;@Autowiredprivate IRedis redis;@Beanpublic SnowflakeIdWorker getIdWorker() {try {/** 防止水平扩展的其他机器保活失败(如0-0,但是仍然在用),故这里等35s,让其在redis中过期*/logger.info("Get snow work by redis and wait for {}s...",snowflakeExpireTime);Thread.sleep(snowflakeExpireTime * 1000);getSnowWorkIdByRedis();Integer dataCenterId = SnowWorkIdLocalCache.getDataCenterId();Integer workerId = SnowWorkIdLocalCache.getWorkId();logger.info("Host: {} SnowFlake success get dataCenterId: {}, workerId: {}", getHostName(), dataCenterId, workerId);if (dataCenterId == null || workerId == null) {throw new RuntimeException("get SnowflakeIdWorker error");}return new SnowflakeIdWorker(workerId, dataCenterId);} catch (Exception e) {throw new RuntimeException("get SnowflakeIdWorker error", e);}}private void getSnowWorkIdByRedis() throws UnknownHostException {for (int dataCenterId = 0; dataCenterId <= DATA_SIZE; dataCenterId++) {for (int workId = 0; workId <= DATA_SIZE; workId++) {String key = Const.Cache.SERVER_NAME + ":snow:id:" + dataCenterId + "_" + workId;String value = getHostName();String result = redis.setNx(key, value, snowflakeExpireTime);logger.info("redis setNx key:[{}],value:[{}],seconds:[{}]", key, value, snowflakeExpireTime);if (OK.equals(result)) {SnowWorkIdLocalCache.setCache(workId, dataCenterId);return;}}}throw new RuntimeException("get SnowflakeIdWorker error");}public static String getHostName() throws UnknownHostException {InetAddress addr = InetAddress.getLocalHost();String hostName = addr.getHostName();if (hostName == null) {hostName = StringUtils.EMPTY;}return hostName;}}

2、获取后缓存到本地:


import java.util.HashMap;
import java.util.Map;public class SnowWorkIdLocalCache {private SnowWorkIdLocalCache() {}private static final String DATA_CENTER_ID = "data_center_id";private static final String WORK_ID = "work_id";private static Map<String, Integer> cacheMap = new HashMap<>(2);static void setCache(int workId, int dataCenterId) {cacheMap.put(WORK_ID, workId);cacheMap.put(DATA_CENTER_ID, dataCenterId);}public static Integer getWorkId() {return cacheMap.get(WORK_ID);}public static Integer getDataCenterId() {return cacheMap.get(DATA_CENTER_ID);}}

3、定期保活


import com.test.common.Const;
import com.test.config.jedis.core.IRedis;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.scheduling.annotation.EnableScheduling;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;@EnableScheduling
@Component
public class SnowWorkHealth {private static final Logger logger = LoggerFactory.getLogger(SnowWorkHealth.class);private static final int MAX_RETRY_COUNT = 2;private int failCount = 0;@Value("${cus.snowflake.expire:40}")private int snowflakeExpireTime;@Autowiredprivate IRedis redis;@Scheduled(cron = "0/10 * * * * *")public void keepAlive() {Integer dataCenterId = SnowWorkIdLocalCache.getDataCenterId();Integer workId = SnowWorkIdLocalCache.getWorkId();if (dataCenterId == null || workId == null) {logger.error(".....keep error and system exit!!!");System.exit(0);}String key = Const.Cache.SERVER_NAME + ":snow:id:" + dataCenterId + "_" + workId;if (redis.expire(key, snowflakeExpireTime)) {String hostName = redis.get(key);logger.info("keep alive of snow work host:{}, dataCenterId: {},workId: {}", hostName, dataCenterId, workId);} else {logger.error("keep snow work id active exception of redis");failCount++;}if (failCount >= MAX_RETRY_COUNT) {logger.error(".....keep error and system exit!!!");System.exit(0);}}}

http://www.mrgr.cn/news/82869.html

相关文章:

  • java项目之网上租贸系统源码(springboot+mysql+vue)
  • php函数性能优化中应注意哪些问题
  • HTML5实现好看的博客网站、通用大作业网页模板源码
  • 深入浅出Node.js-1(node.js入门)
  • 基于MATLAB的汽车热管理模型构建
  • 2025第2周 | JavaScript中的函数的参数默认值和剩余参数
  • 【C++数据结构——图】最小生成树(头歌实践教学平台习题) 【合集】
  • Kafka集群安装
  • 【C++面向对象——类的多态性与虚函数】编写教学游戏:认识动物(头歌实践教学平台习题)【合集】
  • 【Vue.js 组件化】高效组件管理与自动化实践指南
  • oracle jdk17新版变回OTN 商用收费了
  • 期末概率论总结提纲(仅适用于本校,看文中说明)
  • 【C++数据结构——查找】顺序查找(头歌实践教学平台习题)【合集】
  • Linux查看服务器日志
  • 【Vue.js】监听器功能(EventListener)的实际应用【合集】
  • 《Vue3 六》组件间通信
  • NLP项目实战——基于Bert模型的多情感评论分类(附数据集和源码)
  • 【C++数据结构——栈与队列】链栈的基本运算(头歌实践教学平台习题)【合集】
  • 从 TiDB 学习分布式数据库测试
  • 大模型搜索引擎增强问答demo-纯python实现
  • Linux应用软件编程--网络通信(传输层:udp协议,tcp协议,应用层:http协议)
  • nlp培训重点-2
  • Kali系统(Debian 10.3) 遇到的问题
  • 【ROS2】☆ launch之Python
  • 云商城--业务+架构学习和环境准备
  • 【Transformer】小白入门指南