1. 获取配置文件
2. 安装MySQL
3. 创建数据库,并执行sql脚本
4. 启动nacos,并添加命名空间
5. 修改seata-server配置文件
6. 设置nacos配置中心
7. 启动seata-server
8. 测试
9. seata-storage-service
10. seata-account-service
11. seata-order-service
12. 验证
1. 获取配置文件
# 拉取镜像
docker pull seataio/seata-server:1.4.2
# 开放端口
# seata-server
firewall-cmd --add-port=8091/tcp --zone=public --permanent
# nacos
firewall-cmd --add-port=8848/tcp --zone=public --permanent
firewall-cmd --add-port=9848/tcp --zone=public --permanent
# mysql
firewall-cmd --add-port=3306/tcp --zone=public --permanent
firewall-cmd --reload
# 先直接运行seata容器,然后将配置文件拷贝到宿主机器,用于后面的挂载 *** 作。
docker run -d \
--name seata-server \
-p 8091:8091 \
seataio/seata-server:1.4.2
docker cp seata-server:/seata-server/resources /mydata/seata/seata-config
# 删除容器
docker rm -f seata-server
# 日志文件目录
mkdir -p /mydata/seata/logs
2. 安装MySQL(已经安装的或者连接其他机子的数据库的,可跳过)
# 拉取镜像
docker pull mysql:8.0
# 在宿主机创建放置mysql的配置文件目录、数据目录和日志目录,并且进行授权
mkdir -p /mydata/mysql/{data,conf,log}
chmod -R 755 /mydata/mysql/
# 创建MySQL的配置文件
vim /mydata/mysql/conf/my.cnf
# 添加以下内容到上述创建的配置文件中
[client]
# socket = /mydata/mysql/mysqld.sock
# 修改字符编码
default-character-set = utf8mb4
[mysqld]
# pid-file = /var/run/mysqld/mysqld.pid
# socket = /var/run/mysqld/mysqld.sock
# datadir = /var/lib/mysql
# socket = /mydata/mysql/mysqld.sock
# pid-file = /mydata/mysql/mysqld.pid
datadir = /mydata/mysql/data
character_set_server = utf8mb4
collation_server = utf8mb4_bin
# 跳过域名检查,解决MySQL连接慢的问题
skip-name-resolve
secure-file-priv= NULL
# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0
# Custom config should go here
!includedir /etc/mysql/conf.d/
# 创建并启动容器
docker run \
-v /mydata/mysql/log:/var/log/mysql \
-v /mydata/mysql/data:/var/lib/mysql \
-v /mydata/mysql/conf/my.cnf:/etc/mysql/my.conf \
-e TZ=Asia/Shanghai \
-e MYSQL_ROOT_PASSWORD=123456 \
-p 3306:3306 \
--restart=unless-stopped \
--name mysql \
-d mysql:8.0
# 进入容器
docker exec -it mysql /bin/bash
mysql -uroot -p
# 使用数据库
use mysql
# 修改访问主机以及密码等,设置为所有主机可访问
# 查看是否运行远程访问;如果root用户的host为localhost,要远程访问,需要将它改成%
select host,user,plugin from user;
# 将host改为%
update user set host='%' where user ='root';
# 更改连接的密码校验方式(mysql_native_password,mysql8.x版本必须使用这种模式,否则navicate无法正确连接)
ALTER USER 'root'@'%' IDENTIFIED WITH mysql_native_password BY '新密码';
# 刷新权限
flush privileges;
3. 创建数据库
seata
,并执行以下sql脚本
官方mysql.sql传送门
-- -------------------------------- The script used when storeMode is 'db' --------------------------------
-- the table to store GlobalSession data
CREATE TABLE IF NOT EXISTS `global_table`
(
`xid` VARCHAR(128) NOT NULL,
`transaction_id` BIGINT,
`status` TINYINT NOT NULL,
`application_id` VARCHAR(32),
`transaction_service_group` VARCHAR(32),
`transaction_name` VARCHAR(128),
`timeout` INT,
`begin_time` BIGINT,
`application_data` VARCHAR(2000),
`gmt_create` DATETIME,
`gmt_modified` DATETIME,
PRIMARY KEY (`xid`),
KEY `idx_gmt_modified_status` (`gmt_modified`, `status`),
KEY `idx_transaction_id` (`transaction_id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8;
-- the table to store BranchSession data
CREATE TABLE IF NOT EXISTS `branch_table`
(
`branch_id` BIGINT NOT NULL,
`xid` VARCHAR(128) NOT NULL,
`transaction_id` BIGINT,
`resource_group_id` VARCHAR(32),
`resource_id` VARCHAR(256),
`branch_type` VARCHAR(8),
`status` TINYINT,
`client_id` VARCHAR(64),
`application_data` VARCHAR(2000),
`gmt_create` DATETIME(6),
`gmt_modified` DATETIME(6),
PRIMARY KEY (`branch_id`),
KEY `idx_xid` (`xid`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8;
-- the table to store lock data
CREATE TABLE IF NOT EXISTS `lock_table`
(
`row_key` VARCHAR(128) NOT NULL,
`xid` VARCHAR(128),
`transaction_id` BIGINT,
`branch_id` BIGINT NOT NULL,
`resource_id` VARCHAR(256),
`table_name` VARCHAR(32),
`pk` VARCHAR(36),
`gmt_create` DATETIME,
`gmt_modified` DATETIME,
PRIMARY KEY (`row_key`),
KEY `idx_branch_id` (`branch_id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8;
4. 启动nacos,并添加命名空间
docker run -it \
-e MODE=standalone \
-e JVM_XMS=256m \
-e JVM_XMX=256m \
-e JVM_XMN=256m \
-p 8848:8848 \
-p 9848:9848 \
--name mynacos \
nacos/nacos-server:2.0.3
http://ip:8848/nacos
**
vim /mydata/seata/seata-config/file.conf
# 修改file.conf文件,将mode改为db,然后配置数据库
## transaction log store, only used in seata-server
store {
## store mode: file、db、redis
mode = "db"
## rsa decryption public key
publicKey = ""
## file store property
file {
## store location dir
dir = "sessionStore"
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
maxBranchSessionSize = 16384
# globe session size , if exceeded throws exceptions
maxGlobalSessionSize = 512
# file buffer size , if exceeded allocate new buffer
fileWriteBufferCacheSize = 16384
# when recover batch read size
sessionReloadReadSize = 100
# async, sync
flushDiskMode = async
}
## database store property
db {
## the implement of javax.sql.DataSource, such as DruidDataSource(druid)/BasicDataSource(dbcp)/HikariDataSource(hikari) etc.
datasource = "druid"
## mysql/oracle/postgresql/h2/oceanbase etc.
dbType = "mysql"
driverClassName = "com.mysql.cj.jdbc.Driver"
## if using mysql to store the data, recommend add rewriteBatchedStatements=true in jdbc connection param
url = "jdbc:mysql://192.168.23.3:3306/seata?rewriteBatchedStatements=true&serverTimezone=Asia/Shanghai"
user = "root"
password = "123456"
minConn = 5
maxConn = 100
globalTable = "global_table"
branchTable = "branch_table"
lockTable = "lock_table"
queryLimit = 100
maxWait = 5000
}
## redis store property
redis {
## redis mode: single、sentinel
mode = "single"
## single mode property
single {
host = "127.0.0.1"
port = "6379"
}
## sentinel mode property
sentinel {
masterName = ""
## such as "10.28.235.65:26379,10.28.235.65:26380,10.28.235.65:26381"
sentinelHosts = ""
}
password = ""
database = "0"
minConn = 1
maxConn = 10
maxTotal = 100
queryLimit = 100
}
}
# 修改registry.conf文件,将注册中心和配置中心改为上面创建的命名空间和对应的nacos IP地址
vim /mydata/seata/seata-config/registry.conf
registry {
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
type = "nacos"
nacos {
application = "seata-server"
serverAddr = "192.168.23.3:8848"
group = "SEATA_GROUP"
namespace = "seata-naming"
cluster = "default"
username = "nacos"
password = "nacos"
}
eureka {
serviceUrl = "http://localhost:8761/eureka"
application = "default"
weight = "1"
}
redis {
serverAddr = "localhost:6379"
db = 0
password = ""
cluster = "default"
timeout = 0
}
zk {
cluster = "default"
serverAddr = "127.0.0.1:2181"
sessionTimeout = 6000
connectTimeout = 2000
username = ""
password = ""
}
consul {
cluster = "default"
serverAddr = "127.0.0.1:8500"
aclToken = ""
}
etcd3 {
cluster = "default"
serverAddr = "http://localhost:2379"
}
sofa {
serverAddr = "127.0.0.1:9603"
application = "default"
region = "DEFAULT_ZONE"
datacenter = "DefaultDataCenter"
cluster = "default"
group = "SEATA_GROUP"
addressWaitTime = "3000"
}
file {
name = "file.conf"
}
}
config {
# file、nacos 、apollo、zk、consul、etcd3
type = "nacos"
nacos {
serverAddr = "192.168.23.3:8848"
namespace = "seata-naming"
group = "SEATA_GROUP"
username = "nacos"
password = "nacos"
dataId = "seataServer.properties"
}
consul {
serverAddr = "127.0.0.1:8500"
aclToken = ""
}
apollo {
appId = "seata-server"
## apolloConfigService will cover apolloMeta
apolloMeta = "http://192.168.23.3:8801"
apolloConfigService = "http://192.168.23.3:8080"
namespace = "application"
apolloAccesskeySecret = ""
cluster = "seata"
}
zk {
serverAddr = "127.0.0.1:2181"
sessionTimeout = 6000
connectTimeout = 2000
username = ""
password = ""
nodePath = "/seata/seata.properties"
}
etcd3 {
serverAddr = "http://localhost:2379"
}
file {
name = "file:/mydata/seata/seata-config/file.conf"
}
}
6. 设置nacos配置中心
从v1.4.2版本开始,已支持从一个Nacos dataId中获取所有配置信息,你只需要额外添加一个dataId配置项。
在nacos新建配置,此处dataId为seataServer.properties
。注意建立的namespace
和group
要与registry.conf
文件中的一致。
官方config.txt传送门
# 新建config.txt配置文件,将store.mode=file 改为store.mode=db ,将数据库改为自己数据库的配置;
# service.default.grouplist=xxx;
# 三个服务,分别是storage-service、account-service和order-service
# service.vgroupMapping.storage-service-group=default
# service.vgroupMapping.order-service-group=default
# service.vgroupMapping.account-service-group=default
transport.type=TCP
transport.server=NIO
transport.heartbeat=true
transport.enableClientBatchSendRequest=false
transport.threadFactory.bossThreadPrefix=NettyBoss
transport.threadFactory.workerThreadPrefix=NettyServerNIOWorker
transport.threadFactory.serverExecutorThreadPrefix=NettyServerBizHandler
transport.threadFactory.shareBossWorker=false
transport.threadFactory.clientSelectorThreadPrefix=NettyClientSelector
transport.threadFactory.clientSelectorThreadSize=1
transport.threadFactory.clientWorkerThreadPrefix=NettyClientWorkerThread
transport.threadFactory.bossThreadSize=1
transport.threadFactory.workerThreadSize=default
transport.shutdown.wait=3
service.vgroupMapping.storage-service-group=default
service.vgroupMapping.order-service-group=default
service.vgroupMapping.account-service-group=default
service.default.grouplist=192.168.23.3:8091
service.enableDegrade=false
service.disableGlobalTransaction=false
client.rm.asyncCommitBufferLimit=10000
client.rm.lock.retryInterval=10
client.rm.lock.retryTimes=30
client.rm.lock.retryPolicyBranchRollbackOnConflict=true
client.rm.reportRetryCount=5
client.rm.tableMetaCheckEnable=false
client.rm.tableMetaCheckerInterval=60000
client.rm.sqlParserType=druid
client.rm.reportSuccessEnable=false
client.rm.sagaBranchRegisterEnable=false
client.tm.commitRetryCount=5
client.tm.rollbackRetryCount=5
client.tm.defaultGlobalTransactionTimeout=60000
client.tm.degradeCheck=false
client.tm.degradeCheckAllowTimes=10
client.tm.degradeCheckPeriod=2000
store.mode=db
store.publicKey=
store.file.dir=file_store/data
store.file.maxBranchSessionSize=16384
store.file.maxGlobalSessionSize=512
store.file.fileWriteBufferCacheSize=16384
store.file.flushDiskMode=async
store.file.sessionReloadReadSize=100
store.db.datasource=druid
store.db.dbType=mysql
store.db.driverClassName=com.mysql.cj.jdbc.Driver
store.db.url=jdbc:mysql://192.168.23.3:3306/seata?useUnicode=true&rewriteBatchedStatements=true&serverTimezone=Asia/Shanghai
store.db.user=root
store.db.password=123456
store.db.minConn=5
store.db.maxConn=30
store.db.globalTable=global_table
store.db.branchTable=branch_table
store.db.queryLimit=100
store.db.lockTable=lock_table
store.db.maxWait=5000
store.redis.mode=single
store.redis.single.host=127.0.0.1
store.redis.single.port=6379
store.redis.sentinel.masterName=
store.redis.sentinel.sentinelHosts=
store.redis.maxConn=10
store.redis.minConn=1
store.redis.maxTotal=100
store.redis.database=0
store.redis.password=
store.redis.queryLimit=100
server.recovery.committingRetryPeriod=1000
server.recovery.asynCommittingRetryPeriod=1000
server.recovery.rollbackingRetryPeriod=1000
server.recovery.timeoutRetryPeriod=1000
server.maxCommitRetryTimeout=-1
server.maxRollbackRetryTimeout=-1
server.rollbackRetryTimeoutUnlockEnable=false
client.undo.dataValidation=true
client.undo.logSerialization=jackson
client.undo.onlyCareUpdateColumns=true
server.undo.logSaveDays=7
server.undo.logDeletePeriod=86400000
client.undo.logTable=undo_log
client.undo.compress.enable=true
client.undo.compress.type=zip
client.undo.compress.threshold=64k
log.exceptionRate=100
transport.serialization=seata
transport.compressor=none
metrics.enabled=false
metrics.registryType=compact
metrics.exporterList=prometheus
metrics.exporterPrometheusPort=9898
# SEATA_IP指定外网ip地址
docker run -it \
--name seata-server \
-p 8091:8091 \
-e SEATA_CONFIG_NAME=file:/seata-server/resources/registry \
-e SEATA_IP=192.168.23.3 \
-e SEATA_PORT=8091 \
-v /mydata/seata/seata-config:/seata-server/resources \
-v /mydata/seata/logs:/root/logs \
seataio/seata-server:1.4.2
8. 测试
架构
SpringCloud Alibaba + MyBatis + OpenFeign + Nacos
版本
逻辑
用户购买商品的业务逻辑。整个业务逻辑由3个微服务提供支持:
9. seata-storage-service
模块结构
pom.xml
<dependencies>
<dependency>
<groupId>com.alibaba.cloudgroupId>
<artifactId>spring-cloud-starter-alibaba-nacos-discoveryartifactId>
dependency>
<dependency>
<groupId>com.alibaba.cloudgroupId>
<artifactId>spring-cloud-starter-alibaba-seataartifactId>
<exclusions>
<exclusion>
<artifactId>seata-spring-boot-starterartifactId>
<groupId>io.seatagroupId>
exclusion>
exclusions>
dependency>
<dependency>
<groupId>io.seatagroupId>
<artifactId>seata-spring-boot-starterartifactId>
<version>1.4.2version>
dependency>
<dependency>
<groupId>org.springframework.cloudgroupId>
<artifactId>spring-cloud-starter-openfeignartifactId>
dependency>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-webartifactId>
dependency>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-actuatorartifactId>
dependency>
<dependency>
<groupId>com.alibabagroupId>
<artifactId>druid-spring-boot-starterartifactId>
dependency>
<dependency>
<groupId>mysqlgroupId>
<artifactId>mysql-connector-javaartifactId>
dependency>
<dependency>
<groupId>org.mybatis.spring.bootgroupId>
<artifactId>mybatis-spring-boot-starterartifactId>
dependency>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-devtoolsartifactId>
<scope>runtimescope>
<optional>trueoptional>
dependency>
<dependency>
<groupId>org.projectlombokgroupId>
<artifactId>lombokartifactId>
<optional>trueoptional>
dependency>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-testartifactId>
<scope>testscope>
<exclusions>
<exclusion>
<groupId>org.junit.vintagegroupId>
<artifactId>junit-vintage-engineartifactId>
exclusion>
exclusions>
dependency>
dependencies>
application.yml
# 应用服务 WEB 访问端口
server:
port: 9031
# 应用名称
spring:
application:
name: seata-storage-service
cloud:
nacos:
discovery:
server-addr: 192.168.23.3:8848
datasource:
type: com.alibaba.druid.pool.DruidDataSource
driver-class-name: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://192.168.23.3:3306/seata_storage?useUnicode=true&serverTimezone=UTC&useSSL=false
username: root
password: 123456
seata:
enabled: true
application-id: ${spring.application.name}
# 自定义事务组名称
# service.vgroup_mapping.${your-service-group}=default,中间的${your-service-group}为自己定义的服务组名称
# 高版本中应该是vgroupMapping 同时后面的如: order-service-group 不能定义为 order_service_group
# 需要与seata-server中的对应:service.vgroupMapping.order-service-group=default
tx-service-group: storage-service-group
# 配置事务组与集群的对应关系
service:
vgroup-mapping:
# order-service-group为事务组的名称,default为集群名称(与registry.conf中的一致)
storage-service-group: default
registry:
type: nacos
nacos:
application: seata-server
server-addr: 192.168.23.3:8848
group: SEATA_GROUP
namespace: seata-naming
username: nacos
password: nacos
# registry.conf中,配置cluster名称
cluster: default
config:
type: nacos
nacos:
server-addr: 192.168.23.3:8848
group: SEATA_GROUP
namespace: seata-naming
username: nacos
password: nacos
# nacos配置中心配置的dataId
data-id: seataServer.properties
logging:
level:
io:
seata: info
实体类:Storage
@Data
@AllArgsConstructor
@NoArgsConstructor
@Accessors(chain = true)
public class Storage implements Serializable {
private Long id;
private Long productId;
private Integer total;
private Integer used;
private Integer residue;
}
结果封装类:CommonResult
@Data
@AllArgsConstructor
@NoArgsConstructor
@Accessors(chain = true)
public class CommonResult<T> {
private Integer code;
private String message;
private T data;
public CommonResult(Integer code, String message) {
this(code, message, null);
}
}
StorageMapper
public interface StorageMapper {
/**
* 扣减库存
* @param productId 商品id
* @param count 数量
*/
@Update("update t_storage set used = used + #{count}, residue = residue - #{count} where product_id = #{productId}")
void decrease(@Param("productId") Long productId, @Param("count") Integer count);
}
DataSourceProxyConfig
/**
* 也可以使用seata-server的数据源代理
*/
@Configuration
public class DataSourceProxyConfig {
@Bean
@ConfigurationProperties(prefix = "spring.datasource")
public DataSource druidDataSource() {
return new DruidDataSource();
}
@Bean
public SqlSessionFactory sqlSessionFactory(DataSource dataSource) throws Exception {
SqlSessionFactoryBean factoryBean = new SqlSessionFactoryBean();
factoryBean.setDataSource(dataSource);
factoryBean.setMapperLocations(
new PathMatchingResourcePatternResolver().getResources("classpath*:/mapper/*.xml"));
return factoryBean.getObject();
}
}
StorageService
public interface StorageService {
/**
* 扣减库存
* @param productId 商品id
* @param count 数量
*/
void decrease(Long productId, Integer count);
}
StorageServiceImpl
@Service
public class StorageServiceImpl implements StorageService {
@Resource
private StorageMapper storageDAO;
@Override
public void decrease(Long productId, Integer count) {
LOGGER.info("------->storage-service中扣减库存开始");
storageDAO.decrease(productId, count);
LOGGER.info("------->storage-service中扣减库存结束");
}
}
StorageController
@RestController
public class StorageController {
@Resource
private StorageService storageService;
@PostMapping("/storage/decrease")
public CommonResult decrease(@RequestParam("productId") Long productId, @RequestParam("count") Integer count) {
storageService.decrease(productId, count);
return new CommonResult(200, "扣减库存成功");
}
}
启动类
@EnableFeignClients
@EnableDiscoveryClient
@SpringBootApplication(exclude = DataSourceAutoConfiguration.class)
@MapperScan("tech.zger.cloud.mapper")
public class Demo14SeataStorageService9031Application {
public static void main(String[] args) {
SpringApplication.run(Demo14SeataStorageService9031Application.class, args);
}
}
10. seata-account-service
模块结构
引入的依赖与seata-storage-service的一致
application.yml
# 应用服务 WEB 访问端口
server:
port: 9032
# 应用名称
spring:
application:
name: seata-account-service
cloud:
nacos:
discovery:
server-addr: 192.168.23.3:8848
datasource:
type: com.alibaba.druid.pool.DruidDataSource
driver-class-name: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://192.168.23.3:3306/seata_account?useUnicode=true&serverTimezone=UTC&useSSL=false
username: root
password: 123456
seata:
enabled: true
application-id: ${spring.application.name}
# 自定义事务组名称
# service.vgroup_mapping.${your-service-group}=default,中间的${your-service-group}为自己定义的服务组名称
# 高版本中应该是vgroupMapping 同时后面的如: account-service-group 不能定义为 account_service_group
# 需要与seata-server中的对应:service.vgroupMapping.account-service-group=default
tx-service-group: account-service-group
# 配置事务组与集群的对应关系
service:
vgroup-mapping:
# account-service-group为事务组的名称,default为集群名称(与registry.conf中的一致)
account-service-group: default
registry:
type: nacos
nacos:
application: seata-server
server-addr: 192.168.23.3:8848
group: SEATA_GROUP
namespace: seata-naming
username: nacos
password: nacos
# registry.conf中,配置cluster名称
cluster: default
config:
type: nacos
nacos:
server-addr: 192.168.23.3:8848
group: SEATA_GROUP
namespace: seata-naming
username: nacos
password: nacos
# nacos配置中心配置的dataId
data-id: seataServer.properties
logging:
level:
io:
seata: info
feign:
httpclient:
enabled: true
DataSourceProxyConfig、CommonResult与前面的一致
Account
@Data
@AllArgsConstructor
@NoArgsConstructor
@Accessors(chain = true)
public class Account implements Serializable {
private Long id;
private Long userId;
private BigDecimal total;
private BigDecimal used;
private BigDecimal residue;
}
AccountMapper
@Mapper
public interface AccountMapper {
/**
* 扣减账户余额
* @param userId 用户id
* @param money 金额
*/
@Update("update t_account set residue = residue - #{money}, used = used + #{money} where user_id = #{userId}")
void decrease(@Param("userId") Long userId, @Param("money") BigDecimal money);
}
AccountService
public interface AccountService {
/**
*
* 扣减账户余额
* @param userId 用户id
* @param money 金额
*/
void decrease(@Param("userId") Long userId, @Param("money") BigDecimal money);
}
AccountServiceImpl
@Service
public class AccountServiceImpl implements AccountService {
@Resource
private AccountMapper accountDAO;
@Override
public void decrease(Long userId, BigDecimal money) {
LOGGER.info("------->account-service中扣减账户余额开始");
accountDAO.decrease(userId, money);
LOGGER.info("------->account-service中扣减账户余额结束");
}
}
AccountController
@RestController
public class AccountController {
@Resource
private AccountService accountService;
@PostMapping("/account/decrease")
public CommonResult decrease(@RequestParam("userId") Long userId, @RequestParam("money")BigDecimal money) {
accountService.decrease(userId, money);
return new CommonResult(200, "扣减账户余额成功!");
}
}
启动类
@EnableFeignClients
@EnableDiscoveryClient
@SpringBootApplication(exclude = DataSourceAutoConfiguration.class)
public class Demo14SeataAccountService9032Application {
public static void main(String[] args) {
SpringApplication.run(Demo14SeataAccountService9032Application.class, args);
}
}
11. seata-order-service
模块结构
引入的依赖与seata-storage-service的一致
application.yml
# 应用服务 WEB 访问端口
server:
port: 9030
# 应用名称
spring:
application:
name: seata-order-service
cloud:
nacos:
discovery:
server-addr: 192.168.23.3:8848
datasource:
type: com.alibaba.druid.pool.DruidDataSource
driver-class-name: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://192.168.23.3:3306/seata_order?useUnicode=true&serverTimezone=UTC&useSSL=false
username: root
password: 123456
seata:
enabled: true
application-id: ${spring.application.name}
# 自定义事务组名称
# service.vgroup_mapping.${your-service-group}=default,中间的${your-service-group}为自己定义的服务组名称
# 高版本中应该是vgroupMapping 同时后面的如: order-service-group 不能定义为 order_service_group
# 需要与seata-server中的对应:service.vgroupMapping.order-service-group=default
tx-service-group: order-service-group
# 配置事务组与集群的对应关系
service:
vgroup-mapping:
# order-service-group为事务组的名称,default为集群名称(与registry.conf中的一致)
order-service-group: default
registry:
type: nacos
nacos:
application: seata-server
server-addr: 192.168.23.3:8848
group: SEATA_GROUP
namespace: seata-naming
username: nacos
password: nacos
# registry.conf中,配置cluster名称
cluster: default
config:
type: nacos
nacos:
server-addr: 192.168.23.3:8848
group: SEATA_GROUP
namespace: seata-naming
username: nacos
password: nacos
# nacos配置中心配置的dataId
data-id: seataServer.properties
logging:
level:
io:
seata: info
DataSourceProxyConfig、CommonResult与前面的一致
Order
@Data
@NoArgsConstructor
@AllArgsConstructor
public class Order implements Serializable {
private Long id;
private Long userId;
private Long productId;
private Integer count;
private BigDecimal money;
/**
* 订单状态:
* 0:创建中;
* 1:已完结
*/
private Integer status;
}
AccountApi
@FeignClient("seata-account-service")
public interface AccountApi {
/**
* 商品服务调用账户服务
* @param userId 用户id
* @param money 金额
* @return 结果
*/
@PostMapping(value = "/account/decrease")
CommonResult decrease(@RequestParam("userId") Long userId, @RequestParam("money")BigDecimal money);
}
StorageApi
@FeignClient(value = "seata-storage-service")
public interface StorageApi {
/**
* 调用库存服务
* @param productId 商品id
* @param count 数量
* @return 响应结果
*/
@PostMapping(value = "/storage/decrease")
CommonResult decrease(@RequestParam("productId") Long productId, @RequestParam("count") Integer count);
}
OrderMapper
@Repository
public interface OrderMapper {
/**
* 创建订单
* @param order 订单
*
*/
@Insert("insert into `t_order` (user_id,product_id,count,money,status) values (#{userId}, #{productId}, #{count}, #{money}, 0)")
void create(Order order);
/**
* 修改订单状态,从零改为1
* @param userId 用户id
* @param status 订单状态
*/
@Update("update t_order set status = 1 where user_id = #{userId} and status = #{status}")
void update(@Param("userId") Long userId, @Param("status") Integer status);
}
OrderService
public interface OrderService {
/**
* 创建订单
* @param order 订单
*/
void create(Order order);
}
OrderServiceImpl
@Slf4j
@Service
public class OrderServiceImpl implements OrderService {
@Autowired
private OrderMapper orderMapper;
@Autowired
private StorageApi storageApi;
@Autowired
private AccountApi accountApi;
/**
* 创建订单->调用库存服务扣减库存->调用账户服务扣减账户余额->修改订单状态
* 简单说:下订单->扣库存->减余额->改状态
*
* rollbackFor = Exception.class, 一遇到异常就回滚
*/
@Override
@GlobalTransactional(name = "fsp-create-order", rollbackFor = Exception.class)
public void create(Order order) {
// 1. 创建订单
log.info("----->开始新建订单");
orderMapper.create(order);
// 2. 扣库存
log.info("----->开始调用Storage,做扣减:{}", order.getCount());
storageApi.decrease(order.getProductId(), order.getCount());
log.info("----->结束调用Storage,扣减结束");
// 3. 减余额
log.info("----->开始调用Account,做扣减:{}", order.getMoney());
accountApi.decrease(order.getUserId(), order.getMoney());
log.info("----->结束调用Account,扣减结束");
// 4. 改状态
log.info("----->修改订单状态开始");
orderMapper.update(order.getUserId(), 0);
log.info("----->修改订单状态结束");
}
}
OrderController
@RestController
public class OrderController {
@Autowired
private OrderService orderService;
@GetMapping("/order/create")
public CommonResult create(Order order) {
orderService.create(order);
return new CommonResult(200, "订单创建成功");
}
}
启动类
@EnableFeignClients
@EnableDiscoveryClient
@SpringBootApplication(exclude = DataSourceAutoConfiguration.class)
@MapperScan(basePackages = "tech.zger.cloud.mapper")
public class Demo14SeataOrderService9030Application {
public static void main(String[] args) {
SpringApplication.run(Demo14SeataOrderService9030Application.class, args);
}
}
12. 验证
正常访问:http://localhost:9030/order/create?userId=1&productId=1&count=10&money=100
模拟支付超时:
修改 seata-account-service
的 AccountServiceImpl
类:
@Service
public class AccountServiceImpl implements AccountService {
@Resource
private AccountMapper accountDAO;
@Override
public void decrease(Long userId, BigDecimal money) {
LOGGER.info("------->account-service中扣减账户余额开始");
// 模拟超时异常,全局事务回滚
// 暂停几秒钟线程
try {
TimeUnit.SECONDS.sleep(20);
} catch (InterruptedException e) {
e.printStackTrace();
}
accountDAO.decrease(userId, money);
LOGGER.info("------->account-service中扣减账户余额结束");
}
}
seata-order-service中的OrderServiceImpl:
会出现数据一致性问题!!
添加seata全局事务控制
seata-order-service
@Slf4j
@Service
public class OrderServiceImpl implements OrderService {
@Autowired
private OrderMapper orderMapper;
@Autowired
private StorageApi storageApi;
@Autowired
private AccountApi accountApi;
/**
* 创建订单->调用库存服务扣减库存->调用账户服务扣减账户余额->修改订单状态
* 简单说:下订单->扣库存->减余额->改状态
*
* rollbackFor = Exception.class, 一遇到异常就回滚
*/
@Override
@GlobalTransactional(name = "fsp-create-order", rollbackFor = Exception.class)
public void create(Order order) {
// 1. 创建订单
log.info("----->开始新建订单");
orderMapper.create(order);
// 2. 扣库存
log.info("----->开始调用Storage,做扣减:{}", order.getCount());
storageApi.decrease(order.getProductId(), order.getCount());
log.info("----->结束调用Storage,扣减结束");
// 3. 减余额
log.info("----->开始调用Account,做扣减:{}", order.getMoney());
accountApi.decrease(order.getUserId(), order.getMoney());
log.info("----->结束调用Account,扣减结束");
// 4. 改状态
log.info("----->修改订单状态开始");
orderMapper.update(order.getUserId(), 0);
log.info("----->修改订单状态结束");
}
}
再次观察。(日志、数据库变化)
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)