前提
这个分库分表功能,按月自动建表,做的比较久了,还没上线,是在ruoyi框架内做的,踩了不少坑,但是已经实现了,就分享一下代码吧
参考
先分享一些参考文章
【若依系列】集成ShardingSphere
Sharding-JDBC(九)5.3.0版本,实现按月分表、自动建表、自动刷新节点
ShardingSphere系列
ShardingSphere实现单库按月份分表、自动创建表、自动刷新分片节点
MySQL分库篇:Sharding-Sphere分库分表框架的保姆级教学
这些文章我都看过,然后对比着写过代码,因为使用的是ruoyi,不一定很适合,我把已经实现的代码贴一下
代码
ruoyi-admin\pom.xml
<!-- 分库分表引擎 -->
<dependency>
<groupId>org.apache.shardingsphere</groupId>
<artifactId>shardingsphere-jdbc-core-spring-boot-starter</artifactId>
<version>5.2.0</version>
</dependency>
<!-- 阿里的 Transmittable ThreadLocal 解决父子线程值传递问题 -->
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>transmittable-thread-local</artifactId>
<version>2.14.2</version>
</dependency>
ruoyi-framework\pom.xml
<!-- Sharding-JDBC分库分表 -->
<!-- https://mvnrepository.com/artifact/org.apache.shardingsphere/shardingsphere-jdbc-core -->
<dependency>
<groupId>org.apache.shardingsphere</groupId>
<artifactId>shardingsphere-jdbc-core</artifactId>
<version>5.2.0</version>
</dependency>
\ruoyi-framework\src\main\java\com\ruoyi\framework\config\DruidConfig.java
/**
* druid 配置多数据源
*/
@Configuration
public class DruidConfig
{
@Bean
@ConfigurationProperties("spring.dynamic.datasource.druid.master")
public DataSource masterDataSource(DruidProperties druidProperties)
{
DruidDataSource dataSource = DruidDataSourceBuilder.create().build();
return druidProperties.dataSource(dataSource);
}
@Bean
@ConfigurationProperties("spring.dynamic.datasource.druid.slave")
@ConditionalOnProperty(prefix = "spring.dynamic.datasource.druid.slave", name = "enabled", havingValue = "true")
public DataSource slaveDataSource(DruidProperties druidProperties)
{
DruidDataSource dataSource = DruidDataSourceBuilder.create().build();
return druidProperties.dataSource(dataSource);
}
@Bean(name = "shardingDataSource")
public DataSource shardingDataSource() throws Exception
{
DataSource dataSource = YamlShardingSphereDataSourceFactory.createDataSource(ShardingConfig.getShardingYAMLFile());
return dataSource;
}
@Bean(name = "dynamicDataSource")
@Primary
public DynamicDataSource dataSource(DataSource masterDataSource)
{
Map<Object, Object> targetDataSources = new HashMap<>();
targetDataSources.put(DataSourceType.MASTER.name(), masterDataSource);
setDataSource(targetDataSources, DataSourceType.SLAVE.name(), "slaveDataSource");
setDataSource(targetDataSources, DataSourceType.SHARDING.name(), "shardingDataSource");
return new DynamicDataSource(masterDataSource, targetDataSources);
}
/**
* 设置数据源
*
* @param targetDataSources 备选数据源集合
* @param sourceName 数据源名称
* @param beanName bean名称
*/
public void setDataSource(Map<Object, Object> targetDataSources, String sourceName, String beanName)
{
try
{
DataSource dataSource = SpringUtils.getBean(beanName);
targetDataSources.put(sourceName, dataSource);
}
catch (Exception e)
{
}
}
/**
* 去除监控页面底部的广告
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
@Bean
@ConditionalOnProperty(name = "spring.dynamic.datasource.druid.statViewServlet.enabled", havingValue = "true")
public FilterRegistrationBean removeDruidFilterRegistrationBean(DruidStatProperties properties)
{
// 获取web监控页面的参数
DruidStatProperties.StatViewServlet config = properties.getStatViewServlet();
// 提取common.js的配置路径
String pattern = config.getUrlPattern() != null ? config.getUrlPattern() : "/druid/*";
String commonJsPattern = pattern.replaceAll("\\*", "js/common.js");
final String filePath = "support/http/resources/js/common.js";
// 创建filter进行过滤
Filter filter = new Filter()
{
@Override
public void init(javax.servlet.FilterConfig filterConfig) throws ServletException
{
}
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
throws IOException, ServletException
{
chain.doFilter(request, response);
// 重置缓冲区,响应头不会被重置
response.resetBuffer();
// 获取common.js
String text = Utils.readFromResource(filePath);
// 正则替换banner, 除去底部的广告信息
text = text.replaceAll("<a.*?banner\"></a><br/>", "");
text = text.replaceAll("powered.*?shrek.wang</a>", "");
response.getWriter().write(text);
}
@Override
public void destroy()
{
}
};
FilterRegistrationBean registrationBean = new FilterRegistrationBean();
registrationBean.setFilter(filter);
registrationBean.addUrlPatterns(commonJsPattern);
return registrationBean;
}
}
新建
\framework\config\ShardingConfig.java
@Configuration
public class ShardingConfig {
/** 配置文件路径 */
private static final String CONFIG_FILE = "sharding.yml";
/**
* 获取数据源配置
*/
public static File getShardingYAMLFile() {
return new File(Objects.requireNonNull(
ShardingConfig.class.getClassLoader().getResource(CONFIG_FILE), String.format("File `%s` is not existed.", CONFIG_FILE)).getFile());
}
}
framework\config\properties\DruidProperties.java
/**
* druid 配置属性
*/
@Configuration
public class DruidProperties
{
@Value("${spring.dynamic.datasource.druid.initialSize}")
private int initialSize;
@Value("${spring.dynamic.datasource.druid.minIdle}")
private int minIdle;
@Value("${spring.dynamic.datasource.druid.maxActive}")
private int maxActive;
@Value("${spring.dynamic.datasource.druid.maxWait}")
private int maxWait;
@Value("${spring.dynamic.datasource.druid.timeBetweenEvictionRunsMillis}")
private int timeBetweenEvictionRunsMillis;
@Value("${spring.dynamic.datasource.druid.minEvictableIdleTimeMillis}")
private int minEvictableIdleTimeMillis;
@Value("${spring.dynamic.datasource.druid.maxEvictableIdleTimeMillis}")
private int maxEvictableIdleTimeMillis;
@Value("${spring.dynamic.datasource.druid.validationQuery}")
private String validationQuery;
@Value("${spring.dynamic.datasource.druid.testWhileIdle}")
private boolean testWhileIdle;
@Value("${spring.dynamic.datasource.druid.testOnBorrow}")
private boolean testOnBorrow;
@Value("${spring.dynamic.datasource.druid.testOnReturn}")
private boolean testOnReturn;
public DruidDataSource dataSource(DruidDataSource datasource)
{
/** 配置初始化大小、最小、最大 */
datasource.setInitialSize(initialSize);
datasource.setMaxActive(maxActive);
datasource.setMinIdle(minIdle);
/** 配置获取连接等待超时的时间 */
datasource.setMaxWait(maxWait);
/** 配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒 */
datasource.setTimeBetweenEvictionRunsMillis(timeBetweenEvictionRunsMillis);
/** 配置一个连接在池中最小、最大生存的时间,单位是毫秒 */
datasource.setMinEvictableIdleTimeMillis(minEvictableIdleTimeMillis);
datasource.setMaxEvictableIdleTimeMillis(maxEvictableIdleTimeMillis);
/**
* 用来检测连接是否有效的sql,要求是一个查询语句,常用select 'x'。如果validationQuery为null,testOnBorrow、testOnReturn、testWhileIdle都不会起作用。
*/
datasource.setValidationQuery(validationQuery);
/** 建议配置为true,不影响性能,并且保证安全性。申请连接的时候检测,如果空闲时间大于timeBetweenEvictionRunsMillis,执行validationQuery检测连接是否有效。 */
datasource.setTestWhileIdle(testWhileIdle);
/** 申请连接时执行validationQuery检测连接是否有效,做了这个配置会降低性能。 */
datasource.setTestOnBorrow(testOnBorrow);
/** 归还连接时执行validationQuery检测连接是否有效,做了这个配置会降低性能。 */
datasource.setTestOnReturn(testOnReturn);
return datasource;
}
}
\common\enums\DataSourceType.java
/**
* 数据源
*
* @author ruoyi
*/
public enum DataSourceType
{
/**
* 主库
*/
MASTER,
/**
* 从库
*/
SLAVE,
/**
* 分表库
*/
SHARDING
}
\ruoyi-admin\src\main\resources\sharding.yml
# 数据源配置
dataSources:
sharding:
dataSourceClassName: com.alibaba.druid.pool.DruidDataSource
driverClassName: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://localhost:3306/ring_iot?useUnicode=true&characterEncoding=utf8&zeroDateTimeBehavior=convertToNull&useSSL=true&serverTimezone=GMT%2B8
username: root
password: root
# 规则配置
rules:
- !SHARDING
# 表策略配置
tables:
# mall_order 是逻辑表,订单表
lomo_ring_history:
# 配置数据节点,按年分表
# actualDataNodes: sharding.mall_order_$->{2023..2024}
actualDataNodes: sharding.lomo_ring_history
tableStrategy:
# 使用标准分片策略
standard:
# 配置分片字段,以哪个字段作为分片标准
shardingColumn: time
# 分片算法名称,不支持大写字母和下划线,否则启动就会报错
shardingAlgorithmName: time-sharding-altorithm
# 分片算法配置
shardingAlgorithms:
# 分片算法名称,不支持大写字母和下划线,否则启动就会报错
time-sharding-altorithm:
# 类型:自定义策略
type: CLASS_BASED
props:
# 分片策略
strategy: STANDARD
# 分片算法类
algorithmClassName: com.ruoyi.sharding.TimeShardingAlgorithm
props:
# 输出SQL
sql-show: true
\ruoyi-admin\src\main\resources\application-druid.yml
# 数据源配置
spring:
datasource:
driver-class-name: org.apache.shardingsphere.driver.ShardingSphereDriver
url: jdbc:shardingsphere:classpath:sharding.yml
# 多数据源
dynamic:
datasource:
druid:
# 主库数据源
master:
type: com.alibaba.druid.pool.DruidDataSource
driverClassName: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://localhost:3306/ring_iot?useUnicode=true&characterEncoding=utf8&zeroDateTimeBehavior=convertToNull&useSSL=true&serverTimezone=GMT%2B8
username: xxx
password: xxx
# url: jdbc:mysql://${MYSQL_HOST:xxx.1xx.1xx.1xx}:${MYSQL_PORT:3307}/${MYSQL_DB_NAME:ring_iot}?useUnicode=true&characterEncoding=utf8&zeroDateTimeBehavior=convertToNull&useSSL=true&serverTimezone=GMT%2B8
# username: "${MYSQL_USER_NAME:root}"
# password: "${MYSQL_PWD:xxxx}"
# 从库数据源
slave:
# 从数据源开关/默认关闭
enabled: false
url:
username:
password:
# 初始连接数
initialSize: 5
# 最小连接池数量
minIdle: 10
# 最大连接池数量
maxActive: 20
# 配置获取连接等待超时的时间
maxWait: 60000
# 配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒
timeBetweenEvictionRunsMillis: 60000
# 配置一个连接在池中最小生存的时间,单位是毫秒
minEvictableIdleTimeMillis: 300000
# 配置一个连接在池中最大生存的时间,单位是毫秒
maxEvictableIdleTimeMillis: 900000
# 配置检测连接是否有效
validationQuery: SELECT 1 FROM DUAL
testWhileIdle: true
testOnBorrow: false
testOnReturn: false
webStatFilter:
enabled: true
statViewServlet:
enabled: true
# 设置白名单,不填则允许所有访问
allow:
url-pattern: /druid/*
# 控制台管理用户名和密码
login-username: ruoyi
login-password: 123456
filter:
stat:
enabled: true
# 慢SQL记录
log-slow-sql: true
slow-sql-millis: 1000
merge-sql: true
wall:
config:
multi-statement-allow: true
新建:
\ruoyi-admin\src\main\java\com\ruoyi\sharding\TimeShardingAlgorithm.java
public class TimeShardingAlgorithm implements StandardShardingAlgorithm<Long>, ShardingAutoTableAlgorithm {
private static final Logger log = LoggerFactory.getLogger(TimeShardingAlgorithm.class);
/**
* 分片时间格式
*/
private static final DateTimeFormatter TABLE_SHARD_TIME_FORMATTER = DateTimeFormatter.ofPattern("yyyyMM");
/**
* 完整时间格式
*/
private static final DateTimeFormatter DATE_TIME_FORMATTER = DateTimeFormatter.ofPattern("yyyyMMdd HH:mm:ss");
private static final DateTimeFormatter YYYY_MM_DD_HH_MM_SS = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss");
/**
* 表分片符号,例:mall_member_2024 中,分片符号为 "_"
*/
private final String TABLE_SPLIT_SYMBOL = "_";
private Properties props;
private int autoTablesAmount;
/**
* 精准分片(新增)
* @param tableNames 对应分片库中所有分片表的集合
* @param preciseShardingValue 分片键值,其中 logicTableName 为逻辑表,columnName 分片键,value 为从 SQL 中解析出来的分片键的值
* @return 表名
*/
@Override
public String doSharding(Collection<String> tableNames, PreciseShardingValue<Long> preciseShardingValue) {
String logicTableName = preciseShardingValue.getLogicTableName();
// 打印分片信息
log.info("精确分片,节点配置表名:{}", tableNames);
SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
Long dateLong = preciseShardingValue.getValue();
Date date = new Date(dateLong);
String string = format.format(date);
LocalDateTime dateTime = LocalDateTime.parse(string, YYYY_MM_DD_HH_MM_SS);
String resultTableName = logicTableName + "_" + dateTime.format(TABLE_SHARD_TIME_FORMATTER);
// 检查是否需要初始化
if (tableNames.size() == 1) {
// 如果只有一个表,说明需要获取所有表名
List<String> allTableNameBySchema = ShardingAlgorithmTool.getAllTableNameBySchema(logicTableName);
tableNames.clear();// 先清除,再加载所有的分片表
tableNames.addAll(allTableNameBySchema);
autoTablesAmount = allTableNameBySchema.size();
}
return getShardingTableAndCreate(logicTableName, resultTableName, tableNames);
}
/**
* 范围分片(查询、更新、删除)
* @param tableNames 对应分片库中所有分片表的集合
* @param rangeShardingValue 分片范围
* @return 表名集合
*/
private static final SimpleDateFormat DATE_FORMATTER = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
@Override
public Collection<String> doSharding(Collection<String> tableNames, RangeShardingValue<Long> rangeShardingValue) {
String logicTableName = rangeShardingValue.getLogicTableName();
// 打印分片信息
log.info("范围分片,节点配置表名:{}", tableNames);
// 检查是否需要初始化
if (tableNames.size() == 1) {
// 如果只有一个表,说明需要获取所有表名
List<String> allTableNameBySchema = ShardingAlgorithmTool.getAllTableNameBySchema(logicTableName);
tableNames.clear();// 先清除,再加载所有的分片表
tableNames.addAll(allTableNameBySchema);
autoTablesAmount = allTableNameBySchema.size();
}
// between and 的起始值
Range<Long> valueRange = rangeShardingValue.getValueRange();
boolean hasLowerBound = valueRange.hasLowerBound();
boolean hasUpperBound = valueRange.hasUpperBound();
// 获取日期范围的上下界
Date lowerDate = null;
Date upperDate = null;
try {
lowerDate = DATE_FORMATTER.parse(String.valueOf(rangeShardingValue.getValueRange().lowerEndpoint()));
upperDate = DATE_FORMATTER.parse(String.valueOf(rangeShardingValue.getValueRange().upperEndpoint()));
} catch (ParseException e) {
throw new RuntimeException(e);
}
LocalDateTime lowerEndpoint = lowerDate.toInstant()
.atZone(ZoneId.systemDefault())
.toLocalDateTime();
LocalDateTime upperEndpoint = upperDate.toInstant()
.atZone(ZoneId.systemDefault())
.toLocalDateTime();
// 获取最大值和最小值
LocalDateTime min = hasLowerBound ? lowerEndpoint :getLowerEndpoint(tableNames);
LocalDateTime max = hasUpperBound ? upperEndpoint :getUpperEndpoint(tableNames);
// 循环计算分表范围
Set<String> resultTableNames = new LinkedHashSet<>();
resultTableNames.add(logicTableName);
while (min.isBefore(max) || min.equals(max)) {
String tableName = logicTableName + TABLE_SPLIT_SYMBOL + min.format(TABLE_SHARD_TIME_FORMATTER);
resultTableNames.add(tableName);
min = min.plusMinutes(1);
}
// 查询与修改不需要执行创建表
return resultTableNames;
}
@Override
public void init(Properties props) {
this.props = props;
}
@Override
public int getAutoTablesAmount() {
return autoTablesAmount;
}
@Override
public String getType() {
return "CLASS_BASED";
}
public Properties getProps() {
return props;
}
/**
* 检查分表获取的表名是否存在,不存在则自动建表
*
* @param logicTableName 逻辑表
* @param resultTableNames 真实表名,例:mall_order_2022
* @param availableTargetNames 可用的数据库表名
* @return 存在于数据库中的真实表名集合
*/
public Set<String> getShardingTablesAndCreate(String logicTableName, Collection<String> resultTableNames, Collection<String> availableTargetNames) {
return resultTableNames.stream().map(o -> getShardingTableAndCreate(logicTableName, o, availableTargetNames)).collect(Collectors.toSet());
}
/**
* 检查分表获取的表名是否存在,不存在则自动建表
* @param logicTableName 逻辑表
* @param resultTableName 真实表名,例:mall_order_2023
* @return 确认存在于数据库中的真实表名
*/
private String getShardingTableAndCreate(String logicTableName, String resultTableName, Collection<String> availableTargetNames) {
// 缓存中有此表则返回,没有则判断创建
if (availableTargetNames.contains(resultTableName)) {
return resultTableName;
} else {
// 检查分表获取的表名不存在,需要自动建表
boolean isSuccess = ShardingAlgorithmTool.createShardingTable(logicTableName, resultTableName);
if (isSuccess) {
// 如果建表成功,需要更新缓存
availableTargetNames.add(resultTableName);
autoTablesAmount++;
return resultTableName;
} else {
// 如果建表失败,返回逻辑空表
return logicTableName;
}
}
}
/**
* 获取 最小分片值
* @param tableNames 表名集合
* @return 最小分片值
*/
private LocalDateTime getLowerEndpoint(Collection<String> tableNames) {
Optional<LocalDateTime> optional = tableNames.stream()
.map(o -> LocalDateTime.parse(o.replace(TABLE_SPLIT_SYMBOL, "") + "01-01-01 00:00:00", DATE_TIME_FORMATTER))
.min(Comparator.comparing(Function.identity()));
if (optional.isPresent()) {
return optional.get();
} else {
log.error("获取数据最小分表失败,请稍后重试,tableName:{}", tableNames);
throw new IllegalArgumentException("获取数据最小分表失败,请稍后重试");
}
}
/**
* 获取 最大分片值
* @param tableNames 表名集合
* @return 最大分片值
*/
private LocalDateTime getUpperEndpoint(Collection<String> tableNames) {
Optional<LocalDateTime> optional = tableNames.stream()
.map(o -> LocalDateTime.parse(o.replace(TABLE_SPLIT_SYMBOL, "") + "01-01-01 00:00:00", DATE_TIME_FORMATTER))
.max(Comparator.comparing(Function.identity()));
if (optional.isPresent()) {
return optional.get();
} else {
log.error("获取数据最大分表失败,请稍后重试,tableName:{}", tableNames);
throw new IllegalArgumentException("获取数据最大分表失败,请稍后重试");
}
}
}
\ruoyi-admin\src\main\java\com\ruoyi\sharding\SpringUtil.java
@Component
public class SpringUtil implements ApplicationContextAware {
private static ApplicationContext applicationContext = null;
@Override
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
SpringUtil.applicationContext = applicationContext;
}
public static ApplicationContext getApplicationContext() {
return SpringUtil.applicationContext;
}
public static <T> T getBean(Class<T> cla) {
return applicationContext.getBean(cla);
}
public static <T> T getBean(String name, Class<T> cal) {
return applicationContext.getBean(name, cal);
}
public static String getProperty(String key) {
return applicationContext.getBean(Environment.class).getProperty(key);
}
}
\ruoyi-admin\src\main\java\com\ruoyi\sharding\ShardingTablesLoadRunner.java
@Order(value = 1) // 数字越小,越先执行
@Component
public class ShardingTablesLoadRunner implements CommandLineRunner {
private final static Logger log = LoggerFactory.getLogger(ShardingTablesLoadRunner.class);
@Autowired
private ILomoRingHistoryService lomoRingHistoryService;
@Override
public void run(String... args) {
// 读取已有分表,进行缓存
ShardingAlgorithmTool.tableNameCacheReloadAll();
//查询一下分片字段,走一下TimeShardingAlgorithm 的
LomoRingHistory lomoRingHistory = new LomoRingHistory();
lomoRingHistory.setTime(DateUtils.getNowDate().getTime());
lomoRingHistoryService.selectLomoRingHistoryList(lomoRingHistory);
// LomoRingHistory lomoRingHistory=new LomoRingHistory();
// lomoRingHistory.setTime(DateUtils.getNowDate().getTime());
// lomoRingHistory.setUid(1111L);
// lomoRingHistoryService.insertLomoRingHistory(lomoRingHistory);
log.info("缓存已有分表成功");
}
}
坑
因为我是在历史表里按照时间time分表,只有按time查询,才会建表,所以我就在这个类中,查询了一下,这个也是个坑,因为写好代码了,竟然没分表,原因就是没有去查询,走不到建表策略那
\ruoyi-admin\src\main\java\com\ruoyi\sharding\ShardingTableCache.java
public enum ShardingTableCache {
/**
* 订单表
*/
MALL_ORDER("lomo_ring_history", new HashSet<>());
/**
* 逻辑表名
*/
private final String logicTableName;
/**
* 实际表名
*/
private final Set<String> resultTableNamesCache;
private static Map<String, ShardingTableCache> valueMap = new HashMap<>();
static {
Arrays.stream(ShardingTableCache.values()).forEach(o -> valueMap.put(o.logicTableName, o));
}
ShardingTableCache(String logicTableName, Set<String> resultTableNamesCache) {
this.logicTableName = logicTableName;
this.resultTableNamesCache = resultTableNamesCache;
}
public static ShardingTableCache of(String value) {
return valueMap.get(value);
}
public String logicTableName() {
return logicTableName;
}
public Set<String> resultTableNamesCache() {
return resultTableNamesCache;
}
/**
* 更新缓存、配置(原子操作)
*
* @param tableNameList
*/
public void atomicUpdateCacheAndActualDataNodes(List<String> tableNameList) {
synchronized (resultTableNamesCache) {
// 删除缓存
resultTableNamesCache.clear();
// 写入新的缓存
resultTableNamesCache.addAll(tableNameList);
// 动态更新配置 actualDataNodes
ShardingAlgorithmTool.actualDataNodesRefresh(logicTableName, tableNameList);
}
}
public static Set<String> logicTableNames() {
return valueMap.keySet();
}
@Override
public String toString() {
return "ShardingTableCache{" +
"logicTableName='" + logicTableName + '\'' +
", resultTableNamesCache=" + resultTableNamesCache +
'}';
}
}
\ruoyi-admin\src\main\java\com\ruoyi\sharding\ShardingAlgorithmTool.java
@Component
public class ShardingAlgorithmTool implements InitializingBean {
private static final Logger log = LoggerFactory.getLogger(ShardingAlgorithmTool.class);
/** 表分片符号,例:mall_order_2024 中,分片符号为 "_" */
private static final String TABLE_SPLIT_SYMBOL = "_";
@Value("${spring.dynamic.datasource.druid.master.url}")
private String MASTER_URL;
@Value("${spring.dynamic.datasource.druid.master.username}")
private String MASTER_USERNAME;
@Value("${spring.dynamic.datasource.druid.master.password}")
private String MASTER_PASSWORD;
private static String DATASOURCE_URL = "jdbc:mysql://localhost:3306/ring_iot?useUnicode=true&characterEncoding=utf8&zeroDateTimeBehavior=convertToNull&useSSL=true&serverTimezone=GMT%2B8";
private static String DATASOURCE_USERNAME = "root";
private static String DATASOURCE_PASSWORD = "root";
@Override
public void afterPropertiesSet() throws Exception {
DATASOURCE_URL = MASTER_URL;
DATASOURCE_USERNAME = MASTER_USERNAME;
DATASOURCE_PASSWORD = MASTER_PASSWORD;
log.info("afterPropertiesSet,URL:{}, username:{}, password:{}", DATASOURCE_URL, DATASOURCE_USERNAME, DATASOURCE_PASSWORD);
}
/**
* 获取ContextManager
* @param dataSource
* @return
*/
public static ContextManager getContextManager(final ShardingSphereDataSource dataSource) {
try {
Field field = ShardingSphereDataSource.class.getDeclaredField("contextManager");
field.setAccessible(true);
return (ContextManager) field.get(dataSource);
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
/**
* 重载全部缓存
*/
public static void tableNameCacheReloadAll() {
Arrays.stream(ShardingTableCache.values()).forEach(ShardingAlgorithmTool::tableNameCacheReload);
}
/**
* 重载指定分表缓存
* @param logicTable 逻辑表,例:mall_order
*/
public static void tableNameCacheReload(ShardingTableCache logicTable) {
// 读取数据库中所有表名
List<String> tableNameList = getAllTableNameBySchema(logicTable.logicTableName());
// 更新缓存、配置(原子操作)
logicTable.atomicUpdateCacheAndActualDataNodes(tableNameList);
// 删除旧的缓存(如果存在)
logicTable.resultTableNamesCache().clear();
// 写入新的缓存
logicTable.resultTableNamesCache().addAll(tableNameList);
// 动态更新配置 actualDataNodes
actualDataNodesRefresh(logicTable.logicTableName(), tableNameList);
}
/**
* 获取所有表名
* @return 表名集合
* @param logicTableName 逻辑表
*/
public static List<String> getAllTableNameBySchema(String logicTableName) {
List<String> tableNames = new ArrayList<>();
if (StringUtils.isEmpty(DATASOURCE_URL) || StringUtils.isEmpty(DATASOURCE_USERNAME) || StringUtils.isEmpty(DATASOURCE_PASSWORD)) {
log.error("数据库连接配置有误,请稍后重试,URL:{}, username:{}, password:{}", DATASOURCE_URL, DATASOURCE_USERNAME, DATASOURCE_PASSWORD);
throw new IllegalArgumentException("数据库连接配置有误,请稍后重试");
}
log.info("数据库连接配置,URL:{}, username:{}, password:{}", DATASOURCE_URL, DATASOURCE_USERNAME, DATASOURCE_PASSWORD);
try (Connection conn = DriverManager.getConnection(DATASOURCE_URL, DATASOURCE_USERNAME, DATASOURCE_PASSWORD);
Statement st = conn.createStatement()) {
try (ResultSet rs = st.executeQuery("show TABLES like '" + logicTableName + TABLE_SPLIT_SYMBOL + "%'")) {
tableNames.add(logicTableName);
while (rs.next()) {
String tableName = rs.getString(1);
// 匹配分表格式 例:^(mall\_contract_\d{4})$
if (tableName != null && tableName.matches(String.format("^(%s\\d{4})$", logicTableName + TABLE_SPLIT_SYMBOL))) {
tableNames.add(rs.getString(1));
}
}
}
} catch (SQLException e) {
log.error("数据库连接失败,请稍后重试,原因:{}", e.getMessage(), e);
throw new IllegalArgumentException("数据库连接失败,请稍后重试");
}
return tableNames;
}
/**
* 动态更新配置 actualDataNodes
*
* @param logicTableName 逻辑表名
* @param tableNamesCache 真实表名集合
*/
public static void actualDataNodesRefresh(String logicTableName, List<String> tableNamesCache) {
try {
// 获取数据分片节点
String dbName = "sharding";
log.info("更新分表配置,logicTableName:{},tableNamesCache:{}", logicTableName, tableNamesCache);
// generate actualDataNodes
String newActualDataNodes = tableNamesCache.stream().map(o -> String.format("%s.%s", dbName, o)).collect(Collectors.joining(","));
ShardingSphereDataSource shardingSphereDataSource = (ShardingSphereDataSource) YamlShardingSphereDataSourceFactory.createDataSource(ShardingConfig.getShardingYAMLFile());
updateShardRuleActualDataNodes(shardingSphereDataSource, logicTableName, newActualDataNodes);
}catch (Exception e){
log.error("初始化动态表单失败,原因:{}", e.getMessage(), e);
}
}
/**
* 刷新ActualDataNodes
*/
private static void updateShardRuleActualDataNodes(ShardingSphereDataSource dataSource, String logicTableName, String newActualDataNodes) {
// Context manager.
ContextManager contextManager = getContextManager(dataSource);
// Rule configuration.
String schemaName = "logic_db";
Collection<RuleConfiguration> newRuleConfigList = new LinkedList<>();
Collection<RuleConfiguration> oldRuleConfigList = contextManager.getMetaDataContexts()
.getMetaData()
.getGlobalRuleMetaData()
.getConfigurations();
for (RuleConfiguration oldRuleConfig : oldRuleConfigList) {
if (oldRuleConfig instanceof ShardingRuleConfiguration) {
// Algorithm provided sharding rule configuration
ShardingRuleConfiguration oldAlgorithmConfig = (ShardingRuleConfiguration) oldRuleConfig;
ShardingRuleConfiguration newAlgorithmConfig = new ShardingRuleConfiguration();
// Sharding table rule configuration Collection
Collection<ShardingTableRuleConfiguration> newTableRuleConfigList = new LinkedList<>();
Collection<ShardingTableRuleConfiguration> oldTableRuleConfigList = oldAlgorithmConfig.getTables();
oldTableRuleConfigList.forEach(oldTableRuleConfig -> {
if (logicTableName.equals(oldTableRuleConfig.getLogicTable())) {
ShardingTableRuleConfiguration newTableRuleConfig = new ShardingTableRuleConfiguration(oldTableRuleConfig.getLogicTable(), newActualDataNodes);
newTableRuleConfig.setTableShardingStrategy(oldTableRuleConfig.getTableShardingStrategy());
newTableRuleConfig.setDatabaseShardingStrategy(oldTableRuleConfig.getDatabaseShardingStrategy());
newTableRuleConfig.setKeyGenerateStrategy(oldTableRuleConfig.getKeyGenerateStrategy());
newTableRuleConfigList.add(newTableRuleConfig);
} else {
newTableRuleConfigList.add(oldTableRuleConfig);
}
});
newAlgorithmConfig.setTables(newTableRuleConfigList);
newAlgorithmConfig.setAutoTables(oldAlgorithmConfig.getAutoTables());
newAlgorithmConfig.setBindingTableGroups(oldAlgorithmConfig.getBindingTableGroups());
newAlgorithmConfig.setDefaultDatabaseShardingStrategy(oldAlgorithmConfig.getDefaultDatabaseShardingStrategy());
newAlgorithmConfig.setDefaultTableShardingStrategy(oldAlgorithmConfig.getDefaultTableShardingStrategy());
newAlgorithmConfig.setDefaultKeyGenerateStrategy(oldAlgorithmConfig.getDefaultKeyGenerateStrategy());
newAlgorithmConfig.setDefaultShardingColumn(oldAlgorithmConfig.getDefaultShardingColumn());
newAlgorithmConfig.setShardingAlgorithms(oldAlgorithmConfig.getShardingAlgorithms());
newAlgorithmConfig.setKeyGenerators(oldAlgorithmConfig.getKeyGenerators());
newRuleConfigList.add(newAlgorithmConfig);
}
}
// update context
contextManager.alterRuleConfiguration(schemaName, newRuleConfigList);
}
/**
* 创建分表
* @param logicTableName 逻辑表
* @param resultTableName 真实表名,例:mall_order_2024
* @return 创建结果(true创建成功,false未创建)
*/
public static boolean createShardingTable(String logicTableName, String resultTableName) {
// 根据日期判断,当前月份之后分表不提前创建
String month = resultTableName.replace(logicTableName + TABLE_SPLIT_SYMBOL,"");
YearMonth shardingMonth = YearMonth.parse(month, DateTimeFormatter.ofPattern("yyyyMM"));
if (shardingMonth.isAfter(YearMonth.now())) {
return false;
}
synchronized (logicTableName.intern()) {
// 缓存中无此表,则建表并添加缓存
executeSql(Collections.singletonList("CREATE TABLE IF NOT EXISTS `" + resultTableName + "` LIKE `" + logicTableName + "`;"));
}
return true;
}
/**
* 执行SQL
* @param sqlList SQL集合
*/
private static void executeSql(List<String> sqlList) {
if (StringUtils.isEmpty(DATASOURCE_URL) || StringUtils.isEmpty(DATASOURCE_USERNAME) || StringUtils.isEmpty(DATASOURCE_PASSWORD)) {
log.error("数据库连接配置有误,请稍后重试,URL:{}, username:{}, password:{}", DATASOURCE_URL, DATASOURCE_USERNAME, DATASOURCE_PASSWORD);
throw new IllegalArgumentException("数据库连接配置有误,请稍后重试");
}
try (Connection conn = DriverManager.getConnection(DATASOURCE_URL, DATASOURCE_USERNAME, DATASOURCE_PASSWORD)) {
try (Statement st = conn.createStatement()) {
conn.setAutoCommit(false);
for (String sql : sqlList) {
st.execute(sql);
}
} catch (Exception e) {
conn.rollback();
log.error("数据表创建执行失败,请稍后重试,原因:{}", e.getMessage(), e);
throw new IllegalArgumentException("数据表创建执行失败,请稍后重试");
}
} catch (SQLException e) {
log.error("数据库连接失败,请稍后重试,原因:{}", e.getMessage(), e);
throw new IllegalArgumentException("数据库连接失败,请稍后重试");
}
}
}
坑2
这里有个大坑,MASTER_URL,MASTER_USERNAME,MASTER_PASSWORD这几个值,用@Value赋值是没有问题的,有问题的是,切换了数据源,这几个值就变成了null,导致后续没办法进行,我现在还没找到办法,只能写死了。如果有哪位大佬有解决办法,请留言,感谢
我是对历史数据,根据time分表,所以在对应的mapper和service里增加代码
\ruoyi-admin\src\main\java\com\ruoyi\device\service\impl\LomoRingHistoryServiceImpl.java
/**
* 历史记录Service业务层处理
*/
@Service
public class LomoRingHistoryServiceImpl implements ILomoRingHistoryService
{
@Autowired
private LomoRingHistoryMapper lomoRingHistoryMapper;
/**
* 查询历史记录
*
* @param id 历史记录主键
* @return 历史记录
*/
@Override
@DataSource(DataSourceType.SHARDING)
public LomoRingHistory selectLomoRingHistoryById(Long id)
{
return lomoRingHistoryMapper.selectLomoRingHistoryById(id);
}
/**
* 查询历史记录列表
*
* @param lomoRingHistory 历史记录
* @return 历史记录
*/
@Override
@DataSource(DataSourceType.SHARDING)
public List<LomoRingHistory> selectLomoRingHistoryList(LomoRingHistory lomoRingHistory)
{
return lomoRingHistoryMapper.selectLomoRingHistoryList(lomoRingHistory);
}
@Override
@DataSource(DataSourceType.SHARDING)
public List<LomoRingHistory> selectHistoryListForQueryHistory(QueryHistory queryHistory) {
return lomoRingHistoryMapper.selectHistoryListForQueryHistory(queryHistory);
}
@Override
@DataSource(DataSourceType.SHARDING)
public void insertLomoRingHistoryBatch(List<LomoRingHistory> historyBeanList) {
lomoRingHistoryMapper.insertLomoRingHistoryBatch(historyBeanList);
}
/**
* 新增历史记录
*
* @param lomoRingHistory 历史记录
* @return 结果
*/
@Override
@DataSource(DataSourceType.SHARDING)
public int insertLomoRingHistory(LomoRingHistory lomoRingHistory)
{
lomoRingHistory.setCreateTime(DateUtils.getNowDate());
return lomoRingHistoryMapper.insertLomoRingHistory(lomoRingHistory);
}
/**
* 修改历史记录
*
* @param lomoRingHistory 历史记录
* @return 结果
*/
@Override
@DataSource(DataSourceType.SHARDING)
public int updateLomoRingHistory(LomoRingHistory lomoRingHistory)
{
lomoRingHistory.setCreateTime(null);
return lomoRingHistoryMapper.updateLomoRingHistory(lomoRingHistory);
}
/**
* 批量删除历史记录
*
* @param ids 需要删除的历史记录主键
* @return 结果
*/
@Override
@DataSource(DataSourceType.SHARDING)
public int deleteLomoRingHistoryByIds(Long[] ids)
{
return lomoRingHistoryMapper.deleteLomoRingHistoryByIds(ids);
}
/**
* 删除历史记录信息
*
* @param id 历史记录主键
* @return 结果
*/
@Override
@DataSource(DataSourceType.SHARDING)
public int deleteLomoRingHistoryById(Long id)
{
return lomoRingHistoryMapper.deleteLomoRingHistoryById(id);
}
}
\ruoyi-admin\src\main\java\com\ruoyi\device\mapper\LomoRingHistoryMapper.java
/**
* 历史记录Mapper接口
*/
public interface LomoRingHistoryMapper
{
/**
* 查询历史记录
*
* @param id 历史记录主键
* @return 历史记录
*/
@DataSource(DataSourceType.SHARDING)
public LomoRingHistory selectLomoRingHistoryById(Long id);
/**
* 查询历史记录列表
*
* @param lomoRingHistory 历史记录
* @return 历史记录集合
*/
@DataSource(DataSourceType.SHARDING)
public List<LomoRingHistory> selectLomoRingHistoryList(LomoRingHistory lomoRingHistory);
/**
* 新增历史记录
*
* @param lomoRingHistory 历史记录
* @return 结果
*/
@DataSource(DataSourceType.SHARDING)
public int insertLomoRingHistory(LomoRingHistory lomoRingHistory);
/**
* 修改历史记录
*
* @param lomoRingHistory 历史记录
* @return 结果
*/
@DataSource(DataSourceType.SHARDING)
public int updateLomoRingHistory(LomoRingHistory lomoRingHistory);
/**
* 删除历史记录
*
* @param id 历史记录主键
* @return 结果
*/
@DataSource(DataSourceType.SHARDING)
public int deleteLomoRingHistoryById(Long id);
/**
* 批量删除历史记录
*
* @param ids 需要删除的数据主键集合
* @return 结果
*/
@DataSource(DataSourceType.SHARDING)
public int deleteLomoRingHistoryByIds(Long[] ids);
@DataSource(DataSourceType.SHARDING)
List<LomoRingHistory> selectHistoryListForQueryHistory(QueryHistory queryHistory);
@DataSource(DataSourceType.SHARDING)
void insertLomoRingHistoryBatch(List<LomoRingHistory> historyBeanList);
}