feat(master):物流微服务

代码完成
master
土豆兄弟 9 hours ago
parent 79b4170705
commit 17f4bc0752

@ -0,0 +1,97 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.example</groupId>
<artifactId>dev-protocol</artifactId>
<version>1.0-SNAPSHOT</version>
<relativePath>../../pom.xml</relativePath>
</parent>
<artifactId>dev-protocol-springcloud-project-logistics-service</artifactId>
<version>1.0-SNAPSHOT</version>
<packaging>jar</packaging>
<properties>
<maven.compiler.source>8</maven.compiler.source>
<maven.compiler.target>8</maven.compiler.target>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
</properties>
<!-- 模块名及描述信息 -->
<name>dev-protocol-springcloud-project-logistics-service</name>
<description>物流服务</description>
<dependencies>
<!-- spring cloud alibaba nacos discovery 依赖 -->
<dependency>
<groupId>com.alibaba.cloud</groupId>
<artifactId>spring-cloud-starter-alibaba-nacos-discovery</artifactId>
</dependency>
<!-- zipkin = spring-cloud-starter-sleuth + spring-cloud-sleuth-zipkin-->
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-starter-zipkin</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>2.5.0.RELEASE</version>
</dependency>
<!-- Java Persistence API, ORM 规范 -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-data-jpa</artifactId>
</dependency>
<!-- SpringCloud Stream + Kafka -->
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
</dependency>
<!-- MySQL 驱动, 注意, 这个需要与 MySQL 版本对应 -->
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>8.0.12</version>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>org.example</groupId>
<artifactId>dev-protocol-springcloud-project-service-config</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.example</groupId>
<artifactId>dev-protocol-springcloud-project-service-sdk</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
</dependencies>
<!--
SpringBoot的Maven插件, 能够以Maven的方式为应用提供SpringBoot的支持可以将
SpringBoot应用打包为可执行的jar或war文件, 然后以通常的方式运行SpringBoot应用
-->
<build>
<finalName>${artifactId}</finalName>
<plugins>
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
<executions>
<execution>
<goals>
<goal>repackage</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

@ -0,0 +1,21 @@
package org.example;
import org.example.conf.DataSourceProxyAutoConfiguration;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.cloud.client.discovery.EnableDiscoveryClient;
import org.springframework.context.annotation.Import;
import org.springframework.data.jpa.repository.config.EnableJpaAuditing;
/**
* <h1></h1>
* */
@Import(DataSourceProxyAutoConfiguration.class) // 用于分布式事务的数据源配置
@EnableJpaAuditing
@EnableDiscoveryClient
@SpringBootApplication
public class LogisticsApplication {
public static void main(String[] args) {
SpringApplication.run(LogisticsApplication.class, args);
}
}

@ -0,0 +1,10 @@
package org.example.dao;
import org.example.entity.EcommerceLogistics;
import org.springframework.data.jpa.repository.JpaRepository;
/**
* <h1>EcommerceLogistics Dao </h1>
* */
public interface EcommerceLogisticsDao extends JpaRepository<EcommerceLogistics, Long> {
}

@ -0,0 +1,70 @@
package org.example.entity;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.apache.commons.lang3.StringUtils;
import org.springframework.data.annotation.CreatedDate;
import org.springframework.data.annotation.LastModifiedDate;
import org.springframework.data.jpa.domain.support.AuditingEntityListener;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.EntityListeners;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.Table;
import java.util.Date;
/**
* <h1></h1>
* */
@Data
@NoArgsConstructor
@AllArgsConstructor
@Entity
@EntityListeners(AuditingEntityListener.class)
@Table(name = "t_dev_protocol_cloud_logistics")
public class EcommerceLogistics {
/** 自增主键 */
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
@Column(name = "id", nullable = false)
private Long id;
/** 用户 id */
@Column(name = "user_id", nullable = false)
private Long userId;
/** 订单 id */
@Column(name = "order_id", nullable = false)
private Long orderId;
/** 用户地址 id */
@Column(name = "address_id", nullable = false)
private Long addressId;
/** 备注信息(json 存储) */
@Column(name = "extra_info", nullable = false)
private String extraInfo;
/** 创建时间 */
@CreatedDate
@Column(name = "create_time", nullable = false)
private Date createTime;
/** 更新时间 */
@LastModifiedDate
@Column(name = "update_time", nullable = false)
private Date updateTime;
public EcommerceLogistics(Long userId, Long orderId, Long addressId, String extraInfo) {
this.userId = userId;
this.orderId = orderId;
this.addressId = addressId;
this.extraInfo = StringUtils.isNotBlank(extraInfo) ? extraInfo : "{}";
}
}

@ -0,0 +1,46 @@
package org.example.service;
import com.alibaba.fastjson.JSON;
import lombok.extern.slf4j.Slf4j;
import org.example.dao.EcommerceLogisticsDao;
import org.example.entity.EcommerceLogistics;
import org.example.order.LogisticsMessage;
import org.example.sink.LogisticsSink;
import org.springframework.cloud.stream.annotation.EnableBinding;
import org.springframework.cloud.stream.annotation.StreamListener;
import org.springframework.messaging.handler.annotation.Payload;
/**
* <h1></h1>
* */
@Slf4j
@EnableBinding(LogisticsSink.class)
public class LogisticsServiceImpl {
private final EcommerceLogisticsDao logisticsDao;
public LogisticsServiceImpl(EcommerceLogisticsDao logisticsDao) {
this.logisticsDao = logisticsDao;
}
/**
* <h2></h2>
* */
@StreamListener("logisticsInput")
public void consumeLogisticsMessage(@Payload Object payload) {
log.info("receive and consume logistics message: [{}]", payload.toString());
LogisticsMessage logisticsMessage = JSON.parseObject(
payload.toString(), LogisticsMessage.class
);
EcommerceLogistics ecommerceLogistics = logisticsDao.save(
new EcommerceLogistics(
logisticsMessage.getUserId(),
logisticsMessage.getOrderId(),
logisticsMessage.getAddressId(),
logisticsMessage.getExtraInfo()
)
);
log.info("consume logistics message success: [{}]", ecommerceLogistics.getId());
}
}

@ -0,0 +1,19 @@
package org.example.sink;
import org.springframework.cloud.stream.annotation.Input;
import org.springframework.messaging.SubscribableChannel;
/**
* <h1>(Sink)</h1>
* */
public interface LogisticsSink {
/** 输入信道名称 */
String INPUT = "logisticsInput";
/**
* <h2> Sink -> logisticsInput</h2>
* */
@Input(LogisticsSink.INPUT)
SubscribableChannel logisticsInput();
}

@ -0,0 +1,77 @@
server:
port: 8004
servlet:
context-path: /dev-protocol-springcloud-project-logistics-service
spring:
main:
allow-bean-definition-overriding: true
application:
name: dev-protocol-springcloud-project-logistics-service
cloud:
stream:
kafka:
binder:
brokers: 127.0.0.1:9092
auto-create-topics: true
bindings:
logisticsInput:
destination: e-commerce-topic # kafka topic
content-type: text/plain
alibaba:
seata:
tx-service-group: dev-protocol # seata 全局事务分组
nacos:
discovery:
enabled: true # 如果不想使用 Nacos 进行服务注册和发现, 设置为 false 即可
server-addr: 127.0.0.1:8848
# server-addr: 127.0.0.1:8848,127.0.0.1:8849,127.0.0.1:8850 # Nacos 服务器地址
namespace: 1ccc74ae-9398-4dbe-b9d7-4f9addf9f40c
# 引入 sleuth + zipkin + kafka
kafka:
bootstrap-servers: 127.0.0.1:9092
producer:
retries: 3
consumer:
auto-offset-reset: latest
sleuth:
sampler:
probability: 1.0 # 采样比例, 1.0 表示 100%, 默认是 0.1
zipkin:
sender:
type: kafka # 默认是 http
base-url: http://localhost:9411/
jpa:
show-sql: true
hibernate:
ddl-auto: none
properties:
hibernate.show_sql: true
hibernate.format_sql: true
open-in-view: false
datasource:
# 数据源
url: jdbc:mysql://127.0.0.1:3306/imooc_e_commerce?autoReconnect=true&useUnicode=true&characterEncoding=utf8&useSSL=false&serverTimezone=GMT%2B8
username: root
password: root
type: com.zaxxer.hikari.HikariDataSource
driver-class-name: com.mysql.cj.jdbc.Driver
# 连接池
hikari:
maximum-pool-size: 8
minimum-idle: 4
idle-timeout: 30000
connection-timeout: 30000
max-lifetime: 45000
auto-commit: true
pool-name: devProtocolSpringcloudHikariCP
# 暴露端点
management:
endpoints:
web:
exposure:
include: '*'
endpoint:
health:
show-details: always

@ -0,0 +1,65 @@
## transaction log store, only used in seata-server
store {
## store mode: file、db、redis
mode = "db"
## file store property
file {
## store location dir
dir = "sessionStore"
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
maxBranchSessionSize = 16384
# globe session size , if exceeded throws exceptions
maxGlobalSessionSize = 512
# file buffer size , if exceeded allocate new buffer
fileWriteBufferCacheSize = 16384
# when recover batch read size
sessionReloadReadSize = 100
# async, sync
flushDiskMode = async
}
## database store property
db {
## the implement of javax.sql.DataSource, such as DruidDataSource(druid)/BasicDataSource(dbcp)/HikariDataSource(hikari) etc.
datasource = "druid"
## mysql/oracle/postgresql/h2/oceanbase etc.
dbType = "mysql"
driverClassName = "com.mysql.jdbc.Driver"
url = "jdbc:mysql://127.0.0.1:3306/seata?autoReconnect=true&useUnicode=true&characterEncoding=utf8&useSSL=false"
user = "root"
password = "root"
minConn = 5
maxConn = 100
globalTable = "global_table"
branchTable = "branch_table"
lockTable = "lock_table"
queryLimit = 100
maxWait = 5000
}
## redis store property
redis {
host = "127.0.0.1"
port = "6379"
password = ""
database = "0"
minConn = 1
maxConn = 10
maxTotal = 100
queryLimit = 100
}
}
service {
vgroupMapping.imooc-ecommerce = "default"
default.grouplist = "127.0.0.1:8091"
}
client {
async.commit.buffer.limit = 10000
lock {
retry.internal = 10
retry.times = 30
}
}

@ -0,0 +1,17 @@
registry {
# file、nacos、eureka、redis、zk、consul
type = "file"
file {
name = "file.conf"
}
}
config {
type = "file"
file {
name = "file.conf"
}
}

@ -0,0 +1,11 @@
-- 创建 t_ecommerce_logistics 数据表
CREATE TABLE IF NOT EXISTS `dev_protocol_springcloud_project`.`t_dev_protocol_cloud_logistics` (
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT '自增主键',
`user_id` bigint(20) NOT NULL DEFAULT 0 COMMENT '用户 id',
`order_id` bigint(20) NOT NULL DEFAULT 0 COMMENT '订单 id',
`address_id` bigint(20) NOT NULL DEFAULT 0 COMMENT '用户地址记录 id',
`extra_info` varchar(512) NOT NULL COMMENT '备注信息(json 存储)',
`create_time` datetime NOT NULL DEFAULT '0000-01-01 00:00:00' COMMENT '创建时间',
`update_time` datetime NOT NULL DEFAULT '0000-01-01 00:00:00' COMMENT '更新时间',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=10 DEFAULT CHARSET=utf8mb4 COMMENT='物流表';

@ -1,4 +1,4 @@
# 订单微服务 # 订单微服务
## ## 12. 订单微服务总结

@ -65,6 +65,7 @@
<module>dev-protocol-springcloud/dev-protocol-springcloud-hystrix-dashboard</module> <module>dev-protocol-springcloud/dev-protocol-springcloud-hystrix-dashboard</module>
<module>dev-protocol-springcloud/dev-protocol-springcloud-message-study</module> <module>dev-protocol-springcloud/dev-protocol-springcloud-message-study</module>
<module>dev-protocol-springcloud/dev-protocol-springcloud-project-order-service</module> <module>dev-protocol-springcloud/dev-protocol-springcloud-project-order-service</module>
<module>dev-protocol-springcloud/dev-protocol-springcloud-project-logistics-service</module>
</modules> </modules>
<properties> <properties>

Loading…
Cancel
Save