更新了自动检测删除的逻辑和优化了发送前去重的逻辑

master
土豆兄弟 4 years ago
parent c377c2ca51
commit 491c9d4d76

@ -25,6 +25,7 @@ import org.springframework.boot.web.servlet.server.ServletWebServerFactory;
import org.springframework.context.annotation.Bean;
import org.springframework.data.jpa.repository.config.EnableJpaAuditing;
import org.springframework.scheduling.annotation.EnableAsync;
import org.springframework.scheduling.annotation.EnableScheduling;
import org.springframework.transaction.annotation.EnableTransactionManagement;
import org.springframework.web.bind.annotation.RestController;
@ -40,6 +41,7 @@ import org.springframework.web.bind.annotation.RestController;
@SpringBootApplication
@EnableTransactionManagement
@EnableJpaAuditing(auditorAwareRef = "auditorAware")
@EnableScheduling
public class AppRun {
public static void main(String[] args) {

@ -126,15 +126,23 @@ public class SendBigDataTask {
}
}
/**
*
* modify by x 2020-12-15
*
*
*
* @see me.zhengjie.StreamTest#testFilter()
*/
if (StringUtils.isNotBlank(resultFilePath)){
List<String> fileLines = Lists.newArrayList();
Set<String> fileLines = new HashSet<>(50_0000);
try {
fileLines = Files.readAllLines(Paths.get(resultFilePath));
fileLines = new HashSet<>(Files.readAllLines(Paths.get(resultFilePath)));
} catch (IOException e) {
log.error("================== {read file error , please check is , file path is : {} } ================================", resultFilePath, e);
}
// 过滤的集合
Set<String> filterCollect = collect.stream().map(Tag::getUid).filter(fileLines::contains).collect(Collectors.toSet());
Set<String> filterCollect = collect.stream().parallel().map(Tag::getUid).filter(fileLines::contains).collect(Collectors.toSet());
if (CollectionUtil.isNotEmpty(filterCollect)){
log.info("================== [Filter collect is ready, collect size is {} ] ================================",filterCollect.size());
collect = collect.stream().filter(one -> !filterCollect.contains(one.getUid())).collect(Collectors.toList());

@ -0,0 +1,122 @@
package me.zhengjie.modules.common.handler;
import lombok.extern.slf4j.Slf4j;
import me.zhengjie.modules.tmpfilerecord.service.TempFileRecordService;
import me.zhengjie.modules.tmpfilerecord.service.dto.TempFileRecordDto;
import me.zhengjie.modules.tmpfilerecord.service.dto.TempFileRecordQueryCriteria;
import me.zhengjie.utils.FileUtil;
import me.zhengjie.utils.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import org.springframework.transaction.annotation.Transactional;
import java.io.File;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* -
*/
@Component
@Slf4j
public class DeleteFileScheduler {
/**
*
*/
private static final int FILE_SUCCESS_TAG = 1;
/**
*
*/
private static final String PATH_SPLIT_TAG = ",";
@Autowired
private TempFileRecordService tempFileRecordService;
/**
* 11
*/
// @Scheduled(cron = "0 0 23 ? * *")
public void deleteOrUpdateFilePerDayAtElevenClock() {
log.info("======== [start run delete/update file scheduler task.] ========");
// 先进行分类
Map<Integer, List<TempFileRecordDto>> resultMap = checkDayOfDBDay();
// 然后进行修改或者删除处理
handleOfDays(resultMap);
log.info("======== [end run delete/update file scheduler task.] ========");
}
@Transactional(rollbackFor = Exception.class)
public void handleOfDays(Map<Integer, List<TempFileRecordDto>> resultMap) {
for (Map.Entry<Integer, List<TempFileRecordDto>> each : resultMap.entrySet()) {
if (each.getKey() == 0) {
List<TempFileRecordDto> value = each.getValue();
// 获取所有的需要删除记录的路径
List<String> pathCollect = value.stream()
.map(TempFileRecordDto::getFilePaths)
.collect(Collectors.toList());
delExpireFiles(pathCollect);
// 对存储天数为0的记录进行删除
tempFileRecordService.deleteAll(
value.stream()
.map(TempFileRecordDto::getId)
.toArray(Integer[]::new)
);
}
if (each.getKey() == 1) {
// 每天对存储天数进行减一
List<TempFileRecordDto> value = each.getValue();
value.forEach(
one-> one.setDays(one.getDays() - 1)
);
tempFileRecordService.batchUpdate(value);
}
}
}
private void delExpireFiles(List<String> pathCollect) {
pathCollect.forEach(
// 直接把文件夹进行删除
each->{
if (each.contains(PATH_SPLIT_TAG)){
// 一个目录下有多个文件,多个文件删除
String[] split = each.split(PATH_SPLIT_TAG);
if (split.length > 0){
String dirPath = StringUtils.substringBeforeLast(split[0], File.separator);
if (StringUtils.isNotBlank(dirPath)){
FileUtil.del(dirPath);
}
}
}else{
String dirPath = StringUtils.substringBeforeLast(each, File.separator);
FileUtil.del(dirPath);
}
}
);
}
private Map<Integer, List<TempFileRecordDto>> checkDayOfDBDay() {
// 获取所有的记录
TempFileRecordQueryCriteria tempFileRecordQueryCriteria = new TempFileRecordQueryCriteria();
tempFileRecordQueryCriteria.setFileStatus(FILE_SUCCESS_TAG);
List<TempFileRecordDto> tempFileRecordDtos = tempFileRecordService.queryAll(tempFileRecordQueryCriteria);
// 筛选出Days小于等于1 的 打标识为0 与 其他的打标记为1
Map<Integer, List<TempFileRecordDto>> collect = tempFileRecordDtos.stream()
.collect(Collectors.groupingBy(
item -> {
if (item.getDays() <= 1) {
return 0;
} else {
return 1;
}
}
));
return collect;
}
}

@ -26,9 +26,6 @@ import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.File;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.charset.Charset;
import java.nio.file.FileSystem;
import java.nio.file.Paths;
import java.util.List;
import java.util.Objects;

@ -89,4 +89,11 @@ public interface TempFileRecordService {
*/
TempFileRecord findByVerificationCode(String verificationCode, Integer fileStatus);
/**
*
*
* @return
*/
long batchUpdate(List<TempFileRecordDto> TempFileRecordDto);
}

@ -15,6 +15,7 @@
*/
package me.zhengjie.modules.tmpfilerecord.service.impl;
import cn.hutool.core.bean.BeanUtil;
import cn.hutool.core.util.RandomUtil;
import cn.hutool.system.OsInfo;
import cn.hutool.system.SystemUtil;
@ -116,4 +117,17 @@ public class TempFileRecordServiceImpl implements TempFileRecordService {
public TempFileRecord findByVerificationCode(String verificationCode, Integer fileStatus) {
return tempFileRecordRepository.findByVerificationCodeAndFileStatus(verificationCode, fileStatus);
}
@Override
public long batchUpdate(List<TempFileRecordDto> TempFileRecordDto) {
TempFileRecordDto.forEach(
each->{
TempFileRecord tempFileRecord = new TempFileRecord();
BeanUtil.copyProperties(each, tempFileRecord);
update(tempFileRecord);
}
);
return TempFileRecordDto.size();
}
}

@ -0,0 +1,230 @@
#配置数据源
spring:
shardingsphere:
datasource:
names: eladmin,schema
eladmin:
type: com.alibaba.druid.pool.DruidDataSource
driver-class-name: net.sf.log4jdbc.sql.jdbcapi.DriverSpy
url: jdbc:log4jdbc:mysql://118.178.137.129:3306/eladmin?serverTimezone=Asia/Shanghai&characterEncoding=utf8&useSSL=false
username: root
password: root
# 初始连接数
initial-size: 5
# 最小连接数
min-idle: 10
# 最大连接数
max-active: 20
# 获取连接超时时间
max-wait: 5000
# 连接有效性检测时间
time-between-eviction-runs-millis: 60000
# 连接在池中最小生存的时间
min-evictable-idle-time-millis: 300000
# 连接在池中最大生存的时间
max-evictable-idle-time-millis: 900000
test-while-idle: true
test-on-borrow: false
test-on-return: false
# 检测连接是否有效
validation-query: select 1
# 配置监控统计
webStatFilter:
enabled: true
stat-view-servlet:
enabled: true
url-pattern: /druid/*
reset-enable: false
filter:
stat:
enabled: true
# 记录慢SQL
log-slow-sql: true
slow-sql-millis: 1000
merge-sql: true
wall:
config:
multi-statement-allow: true
schema:
type: com.alibaba.druid.pool.DruidDataSource
driver-class-name: net.sf.log4jdbc.sql.jdbcapi.DriverSpy
url: jdbc:log4jdbc:mysql://118.178.137.129:3306/information_schema?serverTimezone=Asia/Shanghai&characterEncoding=utf8&useSSL=false
username: root
password: root
# 初始连接数
initial-size: 5
# 最小连接数
min-idle: 10
# 最大连接数
max-active: 20
# 获取连接超时时间
max-wait: 5000
# 连接有效性检测时间
time-between-eviction-runs-millis: 60000
# 连接在池中最小生存的时间
min-evictable-idle-time-millis: 300000
# 连接在池中最大生存的时间
max-evictable-idle-time-millis: 900000
test-while-idle: true
test-on-borrow: false
test-on-return: false
# 检测连接是否有效
validation-query: select 1
# 配置监控统计
webStatFilter:
enabled: true
stat-view-servlet:
enabled: true
url-pattern: /druid/*
reset-enable: false
filter:
stat:
enabled: true
# 记录慢SQL
log-slow-sql: true
slow-sql-millis: 1000
merge-sql: true
wall:
config:
multi-statement-allow: true
sharding:
# 默认数据源
default-data-source-name: eladmin
# 配置分表策略
tables:
dc_tag:
actual-data-nodes: eladmin.dc_tag$->{0..9}
table-strategy:
inline:
sharding-column: task_id
algorithm-expression: dc_tag$->{task_id % 10}
# 登录相关配置
login:
# 登录缓存
cache-enable: true
# 是否限制单用户登录
single: false
# 验证码
login-code:
# 验证码类型配置 查看 LoginProperties 类
code-type: arithmetic
# 登录图形验证码有效时间/分钟
expiration: 2
# 验证码高度
width: 111
# 验证码宽度
heigth: 36
# 内容长度
length: 2
# 字体名称,为空则使用默认字体
font-name:
# 字体大小
font-size: 25
#jwt
jwt:
header: Authorization
# 令牌前缀
token-start-with: Bearer
# 必须使用最少88位的Base64对该令牌进行编码
base64-secret: ZmQ0ZGI5NjQ0MDQwY2I4MjMxY2Y3ZmI3MjdhN2ZmMjNhODViOTg1ZGE0NTBjMGM4NDA5NzYxMjdjOWMwYWRmZTBlZjlhNGY3ZTg4Y2U3YTE1ODVkZDU5Y2Y3OGYwZWE1NzUzNWQ2YjFjZDc0NGMxZWU2MmQ3MjY1NzJmNTE0MzI=
# 令牌过期时间 此处单位/毫秒 默认4小时可在此网站生成 https://www.convertworld.com/zh-hans/time/milliseconds.html
token-validity-in-seconds: 14400000
# 在线用户key
online-key: online-token-
# 验证码
code-key: code-key-
# token 续期检查时间范围默认30分钟单位毫秒在token即将过期的一段时间内用户操作了则给用户的token续期
detect: 1800000
# 续期时间范围默认1小时单位毫秒
renew: 3600000
#是否允许生成代码生产环境设置为false
generator:
enabled: true
#是否开启 swagger-ui
swagger:
enabled: true
# IP 本地解析
ip:
local-parsing: true
# 文件存储路径
file:
mac:
path: ~/file/
avatar: ~/avatar/
linux:
path: /home/eladmin/file/
avatar: /home/eladmin/avatar/
windows:
path: C:\eladmin\file\
avatar: C:\eladmin\avatar\
# 文件大小 /M
maxSize: 100
avatarMaxSize: 5
# 配置请求发送路径
req:
db:
# 设置给大坝回传号码的地址
host: http://api.hzdaba.cn/aibot
url: /api/activity/addclient
# 线程池的相关配置
produce:
task:
thread_pool:
corePoolSize: 2
maxPoolSize: 16
queueCapacity: 3
ThreadNamePrefix: 'ProduceLocalFileTaskExecutor-'
send:
task:
thread_pool:
corePoolSize: 2
maxPoolSize: 16
queueCapacity: 3
ThreadNamePrefix: 'SendLocalFileTaskExecutor-'
merge:
task:
thread_pool:
corePoolSize: 2
maxPoolSize: 16
queueCapacity: 3
ThreadNamePrefix: 'MergeFileTaskExecutor-'
producebigdata:
task:
thread_pool:
corePoolSize: 2
maxPoolSize: 16
queueCapacity: 3
ThreadNamePrefix: 'ProduceBigDataTaskExecutor-'
SendBigData:
task:
thread_pool:
corePoolSize: 2
maxPoolSize: 16
queueCapacity: 3
ThreadNamePrefix: 'SendBigDataTaskExecutor-'
# 增加日志相关的配置
logging:
level:
org.springframework.security:
- debug
- info
org.springframework.web: error
org.hibernate.SQL: debug
org.hibernate.engine.QueryParameters: debug
org.hibernate.engine.query.HQLQueryPlan: debug
org.hibernate.type.descriptor.sql.BasicBinder: trace
tag:
split-table:
sum: 10
remote:
link:
address: 'http://116.62.197.152:8000/api/temp/file/download'
file-base-path-linux: /home/eladmin/file/temp/
file-base-path-windows: C:\eladmin\file\temp\
file-base-path-mac: ~/file/eladmin/temp/

@ -5,7 +5,7 @@ spring:
freemarker:
check-template-location: false
profiles:
active: dev
active: test
jackson:
time-zone: GMT+8
data:

@ -0,0 +1,51 @@
package me.zhengjie;
import cn.hutool.core.util.RandomUtil;
import org.junit.Test;
import java.time.LocalDateTime;
import java.time.ZoneOffset;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
public class StreamTest {
private static Set<String> uids;
private static List<String> base;
static {
uids = new HashSet<>(50_0000);
uids.add("111111111111111111111111");
for (int i = 0; i < 500000; i++) {
uids.add(RandomUtil.randomString(24));
}
base = new ArrayList<>(2000_0000);
base.add("111111111111111111111111");
for (int i = 0; i < 2000_0000; i++) {
base.add(RandomUtil.randomString(24));
}
}
/**
*
* :
* 1.
* 2. List Set
*/
@Test
public void testFilter(){
Long start = LocalDateTime.now().toInstant(ZoneOffset.of("+8")).toEpochMilli();
System.out.println(" start , " + start);
// Set<String> collect = uids.stream().parallel().filter(base::contains).collect(Collectors.toSet());
Set<String> collect = base.stream().parallel().filter(uids::contains).collect(Collectors.toSet()); // 2119 334
System.out.println("uids size :" + uids.size() + ", base size: " + base.size() + ", 重复"+ collect.size());
Long end = LocalDateTime.now().toInstant(ZoneOffset.of("+8")).toEpochMilli();
System.out.println(" end, "+ end +", cost , " + (end - start));
}
}

@ -166,4 +166,23 @@ public class TempTest {
System.out.println(StringUtils.countMatches(BASE_URL_CHAR_NUMBER, tag.trim()));
}
@Test
public void testToArray(){
List<Integer> integers = Arrays.asList(1,2,3,4,5);
Integer[] objects = (Integer[]) integers.toArray();
System.out.println(Arrays.toString(objects));
}
@Test
public void testDelDir(){
String path = "C:\\Users\\Administrator\\Desktop\\1\\1.txt";
String dirPath = StringUtils.substringBeforeLast(path, File.separator);
System.out.println(FileUtil.del(dirPath));
}
}

@ -0,0 +1,17 @@
package me.zhengjie;
import me.zhengjie.modules.common.handler.DeleteFileScheduler;
import org.junit.Test;
import org.springframework.beans.factory.annotation.Autowired;
public class TestScheduler extends MailTest{
@Autowired
private DeleteFileScheduler deleteFileScheduler;
@Test
public void testDel(){
deleteFileScheduler.deleteOrUpdateFilePerDayAtElevenClock();
}
}
Loading…
Cancel
Save