【ShardingSphere】springboot整合shardingjdbc使用精確分片算法自定義分庫分表

上兩篇文章講到了兩種分庫分表的方式,這兩種方式可以歸結爲一種類型,都是通過配置的形式來分片數據。本文我們繼續講解一種新的方式來分片數據,除了配置的形式外,shardingjdbc還支持通過代碼來自定義規則。


自定義規則

之前我們實現了id取模和按日期分庫分表,這裏我們爲了展示技術,還是繼續按照日期分表,不過這裏通過代碼來自定義。在開始寫代碼之前,我們先將分庫分表規則定義好。
這裏我們建立兩個庫ds0,ds1。每個庫建立表t_order2021、t_order2022兩個表,語句如下:

CREATE TABLE `t_order2021` (
  `id` bigint(32) NOT NULL,
  `user_id` int(11) DEFAULT NULL,
  `order_id` int(11) DEFAULT NULL,
  `cloumn` varchar(45) DEFAULT NULL,
  `day_date` char(8) DEFAULT NULL,
  PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
CREATE TABLE `t_order2022` (
  `id` bigint(32) NOT NULL,
  `user_id` int(11) DEFAULT NULL,
  `order_id` int(11) DEFAULT NULL,
  `cloumn` varchar(45) DEFAULT NULL,
  `day_date` char(8) DEFAULT NULL,
  PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;

搭建工程

基本環境準備好了,我們就可以開始我們的工程搭建了。這裏搭建一個springboot工程,然後整合mybatis和shardingjdbc。具體依賴如下:

<properties>
    <java.version>1.8</java.version>
    <sharding-sphere.version>4.1.1</sharding-sphere.version>
</properties>
<dependencies>
    <dependency>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-web</artifactId>
    </dependency>

    <dependency>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-test</artifactId>
        <scope>test</scope>
    </dependency>

    <dependency>
        <groupId>org.apache.shardingsphere</groupId>
        <artifactId>sharding-jdbc-spring-boot-starter</artifactId>
        <version>${sharding-sphere.version}</version>
    </dependency>

    <dependency>
        <groupId>org.projectlombok</groupId>
        <artifactId>lombok</artifactId>
        <scope>provided</scope>
    </dependency>

    <!-- https://mvnrepository.com/artifact/org.apache.commons/commons-lang3 -->
    <dependency>
        <groupId>org.apache.commons</groupId>
        <artifactId>commons-lang3</artifactId>
        <version>3.12.0</version>
    </dependency>

    <!-- https://mvnrepository.com/artifact/com.zaxxer/HikariCP -->
    <dependency>
        <groupId>com.zaxxer</groupId>
        <artifactId>HikariCP</artifactId>
        <version>4.0.3</version>
    </dependency>

    <!-- https://mvnrepository.com/artifact/mysql/mysql-connector-java -->
    <dependency>
        <groupId>mysql</groupId>
        <artifactId>mysql-connector-java</artifactId>
        <version>8.0.25</version>
    </dependency>

    <!-- https://mvnrepository.com/artifact/org.mybatis/mybatis -->
    <dependency>
        <groupId>org.mybatis.spring.boot</groupId>
        <artifactId>mybatis-spring-boot-starter</artifactId>
        <version>2.1.4</version>
    </dependency>
</dependencies>

上手sharding配置

添加mybatis和shardingjdbc的配置

server.port=10080

spring.shardingsphere.datasource.names=ds0,ds1

# 配置第一個數據庫
spring.shardingsphere.datasource.ds0.type=com.zaxxer.hikari.HikariDataSource
spring.shardingsphere.datasource.ds0.driver-class-name=com.mysql.jdbc.Driver
spring.shardingsphere.datasource.ds0.jdbc-url=jdbc:mysql://localhost:3306/ds0
spring.shardingsphere.datasource.ds0.username=root
spring.shardingsphere.datasource.ds0.password=root

# 配置第二個數據庫
spring.shardingsphere.datasource.ds1.type=com.zaxxer.hikari.HikariDataSource
spring.shardingsphere.datasource.ds1.driver-class-name=com.mysql.jdbc.Driver
spring.shardingsphere.datasource.ds1.jdbc-url=jdbc:mysql://localhost:3306/ds1
spring.shardingsphere.datasource.ds1.username=root
spring.shardingsphere.datasource.ds1.password=root

# 配置t_order表的分庫策略
spring.shardingsphere.sharding.tables.t_order.database-strategy.standard.sharding-column=id
# 自定義分庫策略
spring.shardingsphere.sharding.tables.t_order.database-strategy.standard.precise-algorithm-class-name=com.example.test.config.MyDbPreciseShardingAlgorithm

# 配置t_order的分表策略
spring.shardingsphere.sharding.tables.t_order.actual-data-nodes=ds$->{0..1}.t_order$->{2021..2022}
spring.shardingsphere.sharding.tables.t_order.table-strategy.inline.sharding-column=day_date
# 自定義分表策略
spring.shardingsphere.sharding.tables.t_order.table-strategy.standard.precise-algorithm-class-name=com.example.test.config.MyTablePreciseShardingAlgorithm

# 添加t_order表的id生成策略
spring.shardingsphere.sharding.tables.t_order.key-generator.column=id
spring.shardingsphere.sharding.tables.t_order.key-generator.type=SNOWFLAKE

# 打開sql輸出日誌
spring.shardingsphere.props.sql.show=true

# mybatis配置
mybatis.mapper-locations=classpath:mapping/*.xml
mybatis.type-aliases-package=com.example.test.po

# 配置日誌級別
logging.level.com.echo.shardingjdbc.dao=DEBUG

啓動類上添加mybatis的mapper掃描配置@MapperScan("com.example.test.dao")

在以上配置中,我們定義了自定義配置的類路徑,接下來我們會去編寫這兩個自定義配置的內容。

編寫自定義規則類

在文章開頭我們就已經定義了規則,現在我們來實現這個規則。根據我們的規則我們可以選擇精確分片算法來實現,具體代碼如下:

package com.example.test.config;

import lombok.extern.slf4j.Slf4j;
import org.apache.shardingsphere.api.sharding.standard.PreciseShardingAlgorithm;
import org.apache.shardingsphere.api.sharding.standard.PreciseShardingValue;

import java.util.Collection;

/**
 * 自定義分庫規則類
 * @author echo
 * @date 2021/6/10 0010 上午 10:09
 */
@Slf4j
public class MyDbPreciseShardingAlgorithm implements PreciseShardingAlgorithm<Long> {

    /**
     * 分片策略
     *
     * @param availableTargetNames 所有的數據源
     * @param shardingValue        SQL執行時傳入的分片值
     * @return 返回
     */
    @Override
    public String doSharding(Collection<String> availableTargetNames, PreciseShardingValue<Long> shardingValue) {
        // 真實節點
        availableTargetNames.forEach(item -> log.info("actual node db:{}", item));

        log.info("logic table name:{},rout column:{}", shardingValue.getLogicTableName(), shardingValue.getColumnName());

        //精確分片
        log.info("column value:{}", shardingValue.getValue());

        for (String each : availableTargetNames) {
            Long value = shardingValue.getValue();
            if (("ds" + value % 2).equals(each)) {
                return each;
            }
        }

        return null;
    }

}
package com.example.test.config;

import lombok.extern.slf4j.Slf4j;
import org.apache.shardingsphere.api.sharding.standard.PreciseShardingAlgorithm;
import org.apache.shardingsphere.api.sharding.standard.PreciseShardingValue;

import java.util.Collection;

/**
 * 自定義分表規則類
 *
 * @author echo
 * @date 2021/6/10 0010 上午 10:09
 */
@Slf4j
public class MyTablePreciseShardingAlgorithm implements PreciseShardingAlgorithm<String> {

    @Override
    public String doSharding(Collection<String> availableTargetNames, PreciseShardingValue<String> shardingValue) {
        // 真實節點
        availableTargetNames.forEach(item -> log.info("actual node table:{}", item));

        log.info("logic table name:{},rout column:{}", shardingValue.getLogicTableName(), shardingValue.getColumnName());

        // 精確分片
        log.info("column value:{}", shardingValue.getValue());

        for (String each : availableTargetNames) {
            if (("t_order" + shardingValue.getValue()).equals(each)) return each;
        }

        return null;
    }

}

上測試代碼

按照之前文章的套路,我們寫點測試代碼,代碼如下:

package com.example.test.controller;

import com.example.test.po.TOrder;
import com.example.test.service.TOrderService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;

import java.util.List;

/**
 * @author echo
 * @date 2021/6/3 0003 下午 16:37
 */
@RestController
@RequestMapping("/order")
public class TOrderController {

    @Autowired
    private TOrderService tOrderService;

    @PostMapping("/save")
    public String save(@RequestBody TOrder tOrder) {
        tOrderService.save(tOrder);
        return "success";
    }

    @PostMapping("/delete")
    public String delete(@RequestParam(value = "id") Long id) {
        tOrderService.delete(id);
        return "success";
    }

    @PostMapping("/update")
    public int update(@RequestBody TOrder tOrder) {
        return tOrderService.update(tOrder);
    }

    @GetMapping("/getList")
    public List<TOrder> getList() {
        return tOrderService.getList();
    }

}

public interface TOrderService {
    void save(TOrder tOrder);

    void delete(Long id);

    int update(TOrder tOrder);

    List<TOrder> getList();
}

@Service
public class TOrderServiceImpl implements TOrderService {

    @Autowired
    private TOrderDao tOrderDao;

    @Override
    public void save(TOrder tOrder) {
        tOrderDao.insert(tOrder);
    }

    @Override
    public void delete(Long id) {
        tOrderDao.delete(id);
    }

    @Override
    public int update(TOrder tOrder) {
        return tOrderDao.update(tOrder);
    }

    @Override
    public List<TOrder> getList() {
        return tOrderDao.getList();
    }

}

public interface TOrderDao {

    void insert(TOrder tOrder);

    List<TOrder> getList();

    void delete(Long id);

    int update(TOrder tOrder);
}

<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.example.test.dao.TOrderDao">
    <resultMap id="BaseResultMap" type="com.example.test.po.TOrder">
        <id column="id" jdbcType="BIGINT" property="id"/>
        <result column="user_id" jdbcType="INTEGER" property="userId"/>
        <result column="order_id" jdbcType="INTEGER" property="orderId"/>
        <result column="cloumn" jdbcType="VARCHAR" property="cloumn"/>
        <result column="day_date" jdbcType="CHAR" property="dayDate"/>
    </resultMap>
    <sql id="Base_Column_List">
        id, user_id, order_id, cloumn, day_date
    </sql>

    <insert id="insert" parameterType="com.example.test.po.TOrder">
        insert into t_order (user_id, order_id, cloumn, day_date) value (#{userId}, #{orderId}, #{cloumn}, #{dayDate})
    </insert>

    <select id="getList" resultMap="BaseResultMap">
        select
        <include refid="Base_Column_List"/>
        from t_order
    </select>

    <delete id="delete" parameterType="java.lang.Long">
        delete from t_order
        where id = #{id,jdbcType=BIGINT}
    </delete>

    <update id="update" parameterType="com.example.test.po.TOrder">
        update t_order
        set
          cloumn = #{cloumn,jdbcType=VARCHAR},
          order_id = #{orderId,jdbcType=INTEGER},
          user_id = #{userId,jdbcType=INTEGER}
        where id = #{id,jdbcType=BIGINT}
    </update>
</mapper>

完成之後我們可以測試一下

調用接口http://localhost:3306/order/save,我們會發現,我們的數據根據我們既定的規則進入了相應的表
在這裏插入圖片描述

總結

  • 在配置的時候,版本問題會對配置造成一定的影響,所以如果配置相應內容的話, 要注意版本信息對應的官網配置規則
  • 不同規則對應的配置規則不一,比如這裏用的精確分片算法,需要找到對應的精確分片算法的配置內容,不然不會生效
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章