DELETE导致数据库卡死,要么畏手畏脚不敢操作。
-- 一个看似简单的删除操作
DELETE FROM user_operation_log
WHERE create_time < '2023-01-01';
-- 实际上MySQL会这样处理:
-- 1. 获取表的写锁
-- 2. 逐行扫描10,000,000条记录
-- 3. 对每条匹配的记录:
-- - 写入undo log(用于回滚)
-- - 写入redo log(用于恢复)
-- - 更新所有相关索引
-- - 标记记录为删除状态
-- 4. 事务提交后才真正释放空间
-
磁盘I/O:undo log、redo log、数据文件、索引文件的大量写入 -
CPU:索引维护、条件判断、事务管理 -
内存:Buffer Pool管理、锁信息维护 -
网络:主从同步数据量巨大
-
锁等待超时:其他查询被阻塞 -
主从延迟:从库同步跟不上 -
磁盘空间:undo log暴增导致磁盘写满 -
性能下降:数据库整体性能受影响
-- 存储过程实现分批删除
DELIMITER $$
CREATE PROCEDURE batch_delete_by_id()
BEGIN
DECLARE done INTDEFAULTFALSE;
DECLARE batch_size INTDEFAULT1000;
DECLARE max_id BIGINT;
DECLARE min_id BIGINT;
DECLARE current_id BIGINTDEFAULT0;
-- 获取需要删除的数据范围
SELECTMIN(id), MAX(id) INTO min_id, max_id
FROM user_operation_log
WHERE create_time < '2023-01-01';
WHILE current_id < max_id DO
-- 每次删除一个批次
DELETEFROM user_operation_log
WHEREidBETWEEN current_id AND current_id + batch_size - 1
AND create_time < '2023-01-01';
-- 提交事务,释放锁
COMMIT;
-- 休眠一下,让数据库喘口气
DOSLEEP(0.1);
-- 更新进度
SET current_id = current_id + batch_size;
-- 记录日志(可选)
INSERTINTO delete_progress_log
VALUES (NOW(), current_id, batch_size);
ENDWHILE;
END$
DELIMITER ;
// Java代码实现基于时间的分批删除
@Service
@Slf4j
public class BatchDeleteService {
@Autowired
private JdbcTemplate jdbcTemplate;
/**
* 基于时间范围的分批删除
*/
public void batchDeleteByTime(String tableName, String timeColumn,
Date startTime, Date endTime,
int batchDays) {
Calendar calendar = Calendar.getInstance();
calendar.setTime(startTime);
int totalDeleted = 0;
long startMs = System.currentTimeMillis();
while (calendar.getTime().before(endTime)) {
Date batchStart = calendar.getTime();
calendar.add(Calendar.DAY_OF_YEAR, batchDays);
Date batchEnd = calendar.getTime();
// 确保不超过结束时间
if (batchEnd.after(endTime)) {
batchEnd = endTime;
}
String sql = String.format(
"DELETE FROM %s WHERE %s BETWEEN ? AND ? LIMIT 1000",
tableName, timeColumn
);
int deleted = jdbcTemplate.update(sql, batchStart, batchEnd);
totalDeleted += deleted;
log.info("批次删除完成: {}-{}, 删除{}条, 总计{}条",
batchStart, batchEnd, deleted, totalDeleted);
// 控制删除频率,避免对数据库造成过大压力
if (deleted > 0) {
try {
Thread.sleep(500); // 休眠500ms
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
break;
}
} else {
// 没有数据可删,跳到下一个时间段
continue;
}
// 每删除10000条记录一次进度
if (totalDeleted % 10000 == 0) {
logProgress(totalDeleted, startMs);
}
}
log.info("删除任务完成! 总计删除{}条记录, 耗时{}秒",
totalDeleted, (System.currentTimeMillis() - startMs) / 1000);
}
private void logProgress(int totalDeleted, long startMs) {
long costMs = System.currentTimeMillis() - startMs;
double recordsPerSecond = totalDeleted * 1000.0 / costMs;
log.info("删除进度: {}条, 速率: {}/秒, 耗时: {}秒",
totalDeleted, String.format("%.2f", recordsPerSecond), costMs / 1000);
}
}
-- 简单的LIMIT分批删除
DELIMITER $$
CREATE PROCEDURE batch_delete_with_limit()
BEGIN
DECLARE done INT DEFAULT 0;
DECLARE batch_size INT DEFAULT 1000;
DECLARE total_deleted INT DEFAULT 0;
WHILE done = 0 DO
-- 每次删除1000条
DELETE FROM user_operation_log
WHERE create_time < '2023-01-01'
LIMIT batch_size;
-- 检查是否还有数据
SET done = ROW_COUNT() = 0;
SET total_deleted = total_deleted + ROW_COUNT();
-- 提交释放锁
COMMIT;
-- 休眠控制频率
DOSLEEP(0.1);
-- 每删除10000条输出日志
IF total_deleted % 10000 = 0 THEN
SELECT CONCAT('已删除: ', total_deleted, ' 条记录') AS progress;
END IF;
END WHILE;
SELECT CONCAT('删除完成! 总计: ', total_deleted, ' 条记录') ASresult;
END$
DELIMITER ;
-
小表:1000-5000条/批次 -
大表:100-1000条/批次 -
需要根据实际情况调整
-
业务高峰期:休眠1-2秒 -
业务低峰期:休眠100-500毫秒 -
夜间维护:可不休眠或短暂休眠
-
监控数据库负载 -
观察主从同步延迟 -
根据实际情况动态调整参数
-- 步骤1: 创建新表(结构同原表)
CREATE TABLE user_operation_log_new LIKE user_operation_log;
-- 步骤2: 导入需要保留的数据
INSERT INTO user_operation_log_new
SELECT * FROM user_operation_log
WHERE create_time >= '2023-01-01';
-- 步骤3: 创建索引(在数据导入后创建,效率更高)
ALTER TABLE user_operation_log_new ADDINDEX idx_create_time(create_time);
ALTER TABLE user_operation_log_new ADDINDEX idx_user_id(user_id);
-- 步骤4: 数据验证
SELECT
(SELECT COUNT(*) FROM user_operation_log_new) as new_count,
(SELECT COUNT(*) FROM user_operation_log WHERE create_time >= '2023-01-01') as expected_count;
-- 步骤5: 原子切换(需要很短的表锁)
RENAME TABLE
user_operation_log TO user_operation_log_old,
user_operation_log_new TO user_operation_log;
-- 步骤6: 删除旧表(可选立即删除或延后删除)
DROP TABLE user_operation_log_old;
@Service
@Slf4j
public class TableRebuildService {
@Autowired
private JdbcTemplate jdbcTemplate;
/**
* 重建表方式删除数据
*/
public void rebuildTableForDeletion(String sourceTable, String condition) {
String newTable = sourceTable + "_new";
String oldTable = sourceTable + "_old";
try {
// 1. 创建新表
log.info("开始创建新表: {}", newTable);
jdbcTemplate.execute("CREATE TABLE " + newTable + " LIKE " + sourceTable);
// 2. 导入需要保留的数据
log.info("开始导入保留数据");
String insertSql = String.format(
"INSERT INTO %s SELECT * FROM %s WHERE %s",
newTable, sourceTable, condition
);
int keptCount = jdbcTemplate.update(insertSql);
log.info("成功导入{}条保留数据", keptCount);
// 3. 创建索引(可选,在导入后创建索引效率更高)
log.info("开始创建索引");
createIndexes(newTable);
// 4. 数据验证
log.info("开始数据验证");
if (!validateData(sourceTable, newTable, condition)) {
throw new RuntimeException("数据验证失败");
}
// 5. 原子切换
log.info("开始表切换");
switchTables(sourceTable, newTable, oldTable);
// 6. 删除旧表(可选立即或延后)
log.info("开始删除旧表");
dropTableSafely(oldTable);
log.info("表重建删除完成!");
} catch (Exception e) {
log.error("表重建过程发生异常", e);
// 清理临时表
cleanupTempTable(newTable);
throw e;
}
}
private void createIndexes(String tableName) {
// 根据业务需要创建索引
String[] indexes = {
"CREATE INDEX idx_create_time ON " + tableName + "(create_time)",
"CREATE INDEX idx_user_id ON " + tableName + "(user_id)"
};
for (String sql : indexes) {
jdbcTemplate.execute(sql);
}
}
private boolean validateData(String sourceTable, String newTable, String condition) {
// 验证新表数据量是否正确
Integer newCount = jdbcTemplate.queryForObject(
"SELECT COUNT(*) FROM " + newTable, Integer.class);
Integer expectedCount = jdbcTemplate.queryForObject(
"SELECT COUNT(*) FROM " + sourceTable + " WHERE " + condition, Integer.class);
return newCount.equals(expectedCount);
}
private void switchTables(String sourceTable, String newTable, String oldTable) {
// 原子性的表重命名操作
String sql = String.format(
"RENAME TABLE %s TO %s, %s TO %s",
sourceTable, oldTable, newTable, sourceTable
);
jdbcTemplate.execute(sql);
}
private void dropTableSafely(String tableName) {
try {
jdbcTemplate.execute("DROP TABLE " + tableName);
} catch (Exception e) {
log.warn("删除表失败: {}, 需要手动清理", tableName, e);
}
}
private void cleanupTempTable(String tableName) {
try {
jdbcTemplate.execute("DROP TABLE IF EXISTS " + tableName);
} catch (Exception e) {
log.warn("清理临时表失败: {}", tableName, e);
}
}
}
-
需要删除表中超过50%的数据 -
业务允许短暂的写停顿(重命名时需要) -
有足够的磁盘空间存储新旧两个表
-- 查看表的分区情况
SELECT table_name, partition_name, table_rows
FROM information_schema.partitions
WHERE table_name = 'user_operation_log';
-- 直接删除整个分区(秒级完成)
ALTER TABLE user_operation_log DROPPARTITION p202201, p202202;
-- 定期删除过期分区的存储过程
DELIMITER $
CREATE PROCEDURE auto_drop_expired_partitions()
BEGIN
DECLARE expired_partition VARCHAR(64);
DECLARE done INT DEFAULT FALSE;
-- 查找需要删除的分区(保留最近12个月)
DECLARE cur CURSOR FOR
SELECT partition_name
FROM information_schema.partitions
WHERE table_name = 'user_operation_log'
AND partition_name LIKE'p%'
AND STR_TO_DATE(REPLACE(partition_name, 'p', ''), '%Y%m') < DATE_SUB(NOW(), INTERVAL12MONTH);
DECLARE CONTINUE HANDLER FOR NOT FOUNDS ET done = TRUE;
OPEN cur;
read_loop: LOOP
FETCH cur INTO expired_partition;
IF done THEN
LEAVE read_loop;
ENDIF;
-- 删除过期分区
SET @sql = CONCAT('ALTER TABLE user_operation_log DROP PARTITION ', expired_partition);
PREPARE stmt FROM @sql;
EXECUTE stmt;
DEALLOCATE PREPARE stmt;
-- 记录日志
INSERT INTO partition_clean_log
VALUES (NOW(), expired_partition, 'DROPPED');
END LOOP;
CLOSE cur;
END$
DELIMITER ;
-- 将普通表改造成分区表
-- 步骤1: 创建分区表
CREATE TABLE user_operation_log_partitioned (
id BIGINT AUTO_INCREMENT,
user_id BIGINT,
operation VARCHAR(100),
create_time DATETIME,
PRIMARY KEY (id, create_time) -- 分区键必须包含在主键中
) PARTITION BY RANGE (YEAR(create_time)*100 + MONTH(create_time)) (
PARTITION p202201 VALUES LESS THAN (202202),
PARTITION p202202 VALUES LESS THAN (202203),
PARTITION p202203 VALUES LESS THAN (202204),
PARTITION p202204 VALUES LESS THAN (202205),
PARTITION pfuture VALUES LESS THAN MAXVALUE
);
-- 步骤2: 导入数据
INSERTINTO user_operation_log_partitioned
SELECT * FROM user_operation_log;
-- 步骤3: 切换表
RENAME TABLE
user_operation_log TO user_operation_log_old,
user_operation_log_partitioned TO user_operation_log;
-- 步骤4: 定期维护:添加新分区
ALTER TABLE user_operation_log REORGANIZE PARTITION pfuture INTO (
PARTITION p202205 VALUESLESSTHAN (202206),
PARTITION p202206 VALUESLESSTHAN (202207),
PARTITION pfuture VALUESLESSTHAN MAXVALUE
);
@Service
@Slf4j
public class PartitionManagerService {
@Autowired
private JdbcTemplate jdbcTemplate;
/**
* 自动管理分区
*/
@Scheduled(cron = "0 0 2 * * ?") // 每天凌晨2点执行
public void autoManagePartitions() {
log.info("开始分区维护任务");
try {
// 1. 删除过期分区(保留最近12个月)
dropExpiredPartitions();
// 2. 创建未来分区
createFuturePartitions();
log.info("分区维护任务完成");
} catch (Exception e) {
log.error("分区维护任务失败", e);
}
}
private void dropExpiredPartitions() {
String sql = "SELECT partition_name " +
"FROM information_schema.partitions " +
"WHERE table_name = 'user_operation_log' " +
"AND partition_name LIKE 'p%' " +
"AND STR_TO_DATE(REPLACE(partition_name, 'p', ''), '%Y%m') < DATE_SUB(NOW(), INTERVAL 12 MONTH)";
List<String> expiredPartitions = jdbcTemplate.queryForList(sql, String.class);
for (String partition : expiredPartitions) {
try {
jdbcTemplate.execute("ALTER TABLE user_operation_log DROP PARTITION " + partition);
log.info("成功删除分区: {}", partition);
// 记录操作日志
logPartitionOperation("DROP", partition, "SUCCESS");
} catch (Exception e) {
log.error("删除分区失败: {}", partition, e);
logPartitionOperation("DROP", partition, "FAILED: " + e.getMessage());
}
}
}
private void createFuturePartitions() {
// 创建未来3个月的分区
for (int i = 1; i <= 3; i++) {
LocalDate futureDate = LocalDate.now().plusMonths(i);
String partitionName = "p" + futureDate.format(DateTimeFormatter.ofPattern("yyyyMM"));
int partitionValue = futureDate.getYear() * 100 + futureDate.getMonthValue();
int nextPartitionValue = partitionValue + 1;
try {
String sql = String.format(
"ALTER TABLE user_operation_log REORGANIZE PARTITION pfuture INTO (" +
"PARTITION %s VALUES LESS THAN (%d), " +
"PARTITION pfuture VALUES LESS THAN MAXVALUE)",
partitionName, nextPartitionValue
);
jdbcTemplate.execute(sql);
log.info("成功创建分区: {}", partitionName);
logPartitionOperation("CREATE", partitionName, "SUCCESS");
} catch (Exception e) {
log.warn("创建分区失败(可能已存在): {}", partitionName, e);
}
}
}
private void logPartitionOperation(String operation, String partition, String status) {
jdbcTemplate.update(
"INSERT INTO partition_operation_log(operation, partition_name, status, create_time) VALUES (?, ?, ?, NOW())",
operation, partition, status
);
}
}
-
删除效率极高:直接删除分区文件 -
不影响业务:无锁表风险 -
管理方便:可以自动化管理 -
查询优化:分区裁剪提升查询性能
@Service
@Slf4j
public classOnlineTableMigrationService{
@Autowired
private JdbcTemplate jdbcTemplate;
/**
* 在线表迁移删除
*/
publicvoidonlineMigrationDelete(String sourceTable, String condition){
String newTable = sourceTable + "_new";
String tempTable = sourceTable + "_temp";
try {
// 阶段1: 准备阶段
log.info("=== 阶段1: 准备阶段 ===");
prepareMigration(sourceTable, newTable, tempTable);
// 阶段2: 双写阶段
log.info("=== 阶段2: 双写阶段 ===");
enableDoubleWrite(sourceTable, newTable);
// 阶段3: 数据同步阶段
log.info("=== 阶段3: 数据同步阶段 ===");
syncExistingData(sourceTable, newTable, condition);
// 阶段4: 验证阶段
log.info("=== 阶段4: 验证阶段 ===");
if (!validateDataSync(sourceTable, newTable)) {
thrownew RuntimeException("数据同步验证失败");
}
// 阶段5: 切换阶段
log.info("=== 阶段5: 切换阶段 ===");
switchToNewTable(sourceTable, newTable, tempTable);
// 阶段6: 清理阶段
log.info("=== 阶段6: 清理阶段 ===");
cleanupAfterSwitch(sourceTable, tempTable);
log.info("在线迁移删除完成!");
} catch (Exception e) {
log.error("在线迁移过程发生异常", e);
// 回滚双写
disableDoubleWrite();
throw e;
}
}
privatevoidprepareMigration(String sourceTable, String newTable, String tempTable){
// 备份原表
jdbcTemplate.execute("CREATE TABLE " + tempTable + " LIKE " + sourceTable);
jdbcTemplate.execute("INSERT INTO " + tempTable + " SELECT * FROM " + sourceTable);
// 创建新表
jdbcTemplate.execute("CREATE TABLE " + newTable + " LIKE " + sourceTable);
}
privatevoidenableDoubleWrite(String sourceTable, String newTable){
// 这里需要修改应用层代码,实现双写
// 或者在数据库层使用触发器(不推荐,影响性能)
log.info("请配置应用层双写: 同时写入 {} 和 {}", sourceTable, newTable);
// 等待双写配置生效
sleep(5000);
}
privatevoidsyncExistingData(String sourceTable, String newTable, String condition){
log.info("开始同步存量数据");
// 同步符合条件的数据到新表
String syncSql = String.format(
"INSERT IGNORE INTO %s SELECT * FROM %s WHERE %s",
newTable, sourceTable, condition
);
int syncedCount = jdbcTemplate.update(syncSql);
log.info("存量数据同步完成: {} 条记录", syncedCount);
// 等待双写追平增量数据
log.info("等待增量数据追平...");
sleep(30000); // 等待30秒,根据业务调整
// 检查数据一致性
checkDataConsistency(sourceTable, newTable);
}
privatevoidcheckDataConsistency(String sourceTable, String newTable){
// 检查关键业务数据的一致性
Integer sourceCount = jdbcTemplate.queryForObject(
"SELECT COUNT(*) FROM " + sourceTable, Integer.class);
Integer newCount = jdbcTemplate.queryForObject(
"SELECT COUNT(*) FROM " + newTable, Integer.class);
log.info("数据一致性检查: 原表{}条, 新表{}条", sourceCount, newCount);
// 这里可以添加更详细的一致性检查
}
privatebooleanvalidateDataSync(String sourceTable, String newTable){
// 验证数据同步的正确性
// 这里可以实现更复杂的验证逻辑
log.info("数据同步验证通过");
returntrue;
}
privatevoidswitchToNewTable(String sourceTable, String newTable, String tempTable){
// 短暂停写(根据业务情况,可能不需要)
log.info("开始停写切换...");
sleep(5000); // 停写5秒
// 原子切换
jdbcTemplate.execute("RENAME TABLE " +
sourceTable + " TO " + sourceTable + "_backup, " +
newTable + " TO " + sourceTable);
log.info("表切换完成");
}
privatevoidcleanupAfterSwitch(String sourceTable, String tempTable){
// 关闭双写
disableDoubleWrite();
// 延迟删除备份表(保留一段时间)
log.info("备份表保留: {}_backup", sourceTable);
log.info("临时表已删除: {}", tempTable);
jdbcTemplate.execute("DROP TABLE " + tempTable);
}
privatevoiddisableDoubleWrite(){
log.info("请关闭应用层双写配置");
}
privatevoidsleep(long millis){
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
}
# 安装Percona Toolkit
# Ubuntu/Debian:
sudo apt-get install percona-toolkit
# 使用pt-archiver归档删除数据
pt-archiver \
--source h=localhost,D=test,t=user_operation_log \
--where"create_time < '2023-01-01'" \
--limit 1000 \
--commit-each \
--sleep 0.1 \
--statistics \
--progress 10000 \
--why-not \
--dry-run # 先试运行,确认无误后移除此参数
# 实际执行删除
pt-archiver \
--source h=localhost,D=test,t=user_operation_log \
--where"create_time < '2023-01-01'" \
--limit 1000 \
--commit-each \
--sleep 0.1 \
--purge
@Component
@Slf4j
public class SmartDeleteTool {
@Autowired
private JdbcTemplate jdbcTemplate;
/**
* 智能删除决策
*/
public void smartDelete(String tableName, String condition) {
try {
// 1. 分析表状态
TableAnalysisResult analysis = analyzeTable(tableName, condition);
// 2. 根据分析结果选择最佳方案
DeleteStrategy strategy = chooseBestStrategy(analysis);
// 3. 执行删除
executeDelete(strategy, tableName, condition);
} catch (Exception e) {
log.error("智能删除失败", e);
throw e;
}
}
private TableAnalysisResult analyzeTable(String tableName, String condition) {
TableAnalysisResult result = new TableAnalysisResult();
// 分析表大小
result.setTotalRows(getTableRowCount(tableName));
result.setDeleteRows(getDeleteRowCount(tableName, condition));
result.setDeleteRatio(result.getDeleteRows() * 1.0 / result.getTotalRows());
// 分析表结构
result.setHasPartition(isTablePartitioned(tableName));
result.setHasPrimaryKey(hasPrimaryKey(tableName));
result.setIndexCount(getIndexCount(tableName));
// 分析系统负载
result.setSystemLoad(getSystemLoad());
return result;
}
private DeleteStrategy chooseBestStrategy(TableAnalysisResult analysis) {
if (analysis.isHasPartition() && analysis.getDeleteRatio() > 0.3) {
return DeleteStrategy.PARTITION_DROP;
}
if (analysis.getDeleteRatio() > 0.5) {
return DeleteStrategy.TABLE_REBUILD;
}
if (analysis.getTotalRows() > 10_000_000) {
return DeleteStrategy.BATCH_DELETE_WITH_PAUSE;
}
return DeleteStrategy.BATCH_DELETE;
}
private void executeDelete(DeleteStrategy strategy, String tableName, String condition) {
switch (strategy) {
case PARTITION_DROP:
executePartitionDrop(tableName, condition);
break;
case TABLE_REBUILD:
executeTableRebuild(tableName, condition);
break;
case BATCH_DELETE_WITH_PAUSE:
executeBatchDeleteWithPause(tableName, condition);
break;
default:
executeBatchDelete(tableName, condition);
}
}
// 各种策略的具体实现...
private long getTableRowCount(String tableName) {
String sql = "SELECT COUNT(*) FROM " + tableName;
return jdbcTemplate.queryForObject(sql, Long.class);
}
private long getDeleteRowCount(String tableName, String condition) {
String sql = "SELECT COUNT(*) FROM " + tableName + " WHERE " + condition;
return jdbcTemplate.queryForObject(sql, Long.class);
}
private boolean isTablePartitioned(String tableName) {
String sql = "SELECT COUNT(*) FROM information_schema.partitions " +
"WHERE table_name = ? AND partition_name IS NOT NULL";
Integer count = jdbcTemplate.queryForObject(sql, Integer.class, tableName);
return count != null && count > 0;
}
// 其他分析方法...
}
enum DeleteStrategy {
BATCH_DELETE, // 普通分批删除
BATCH_DELETE_WITH_PAUSE, // 带休眠的分批删除
TABLE_REBUILD, // 重建表
PARTITION_DROP, // 删除分区
ONLINE_MIGRATION // 在线迁移
}
@Data
class TableAnalysisResult {
private long totalRows;
private long deleteRows;
private double deleteRatio;
private boolean hasPartition;
private boolean hasPrimaryKey;
private int indexCount;
private double systemLoad;
}
|
|
|
|
|
|
|---|---|---|---|---|
|
|
删除比例<30% |
无需停服 |
可能锁表 |
|
|
|
可接受短暂停写 |
整理表碎片 |
需要额外空间 |
|
|
|
|
无性能影响 |
改造成本 |
|
|
|
重要业务表 |
安全可靠 |
周期较长 |
|
|
|
超大表操作 |
自动优化 |
依赖外部工具 |
|
-
测试环境验证:任何删除方案都要先在测试环境验证 -
备份优先:删除前一定要备份数据 -
业务低峰期:选择业务低峰期执行删除操作 -
监控告警:实时监控数据库状态,设置告警阈值 -
回滚预案:准备完善的回滚方案
-
安全第一:任何删除操作都要确保数据安全 -
影响最小:尽量减少对业务的影响 -
效率优先:选择最适合的高效方案 -
可监控:整个过程要可监控、可控制
-
有分区:直接删除分区最快 -
删的少:分批删除最稳妥 -
删的多:重建表最高效 -
不能停:在线同步最安全
-
预防优于治疗:通过数据生命周期管理,定期清理数据 -
架构要合理:在设计阶段就考虑数据清理策略 -
工具要熟练:掌握各种删除工具的使用方法 -
经验要积累:每次操作后都要总结经验教训
往期推荐
SpringBoot 时间轮实现延时任务
Spring Event,贼好用的业务解耦神器!
Postman替代品:一款极简的网页版 API 调试神器!
不好意思,HttpClient该换了!
5.7k Star,下一代开源堡垒机,安全连接无需客户端
改变习惯性 !=null 判空,只需一秒!

