概念:这些是线性回归的正则化版本,用于处理多重共线性和过拟合问题。
原理:
岭回归:在损失函数中加入L2正则项,即∑(y_i - ŷ_i)^2 + λ∑β_j^2,防止系数过大。
套索回归:加入L1正则项,即∑(y_i - ŷ_i)^2 + λ∑|β_j|,可以使一些系数为零,实现特征选择。
弹性网络:结合L1和L2正则项,即∑(y_i - ŷ_i)^2 + λ1∑|β_j| + λ2∑β_j^2。
思想:通过正则化约束模型复杂度,避免过拟合,同时套索回归可以进行特征选择。
应用:高维数据、特征选择、共线性数据。
-数据预处理:
-模型构建:
-训练:
-评估:
-可视化:
-保存结果:
定量变量预测的机器学习模型可分为传统统计模型、树基集成模型、核方法和深度学习模型四大类,每类模型通过不同机制捕捉数据模式,适用于从线性到复杂非线性关系的预测任务。
代码涵盖了从数据准备到结果保存的自动化过程,包括数据预处理、模型配置、性能评估和报告生成。
# 设置工作目录和清理环境
rm(list = ls())
if (!is.null(dev.list())) dev.off()
setwd("C:/Users/hyy/Desktop/")
if (!dir.exists("Results-Regularization")) dir.create("Results-Regularization")
# 加载必要的包
if (!require(pacman)) install.packages("pacman")
pacman::p_load(
readxl, dplyr, ggplot2, car, lmtest, ggpubr, corrplot,
performance, see, effects, sjPlot, report, officer, flextable,
broom, gridExtra, patchwork, glmnet, caret, MASS, plotmo, tidyr,
pROC, MLmetrics, modelr
)
# 设置中文字体
font_family <- "sans"
# 设置随机种子保证结果可重复
set.seed(123)
# 读取数据
data <- read_excel("示例数据.xlsx", sheet = "示例数据")
# 数据预处理
str(data)
summary(data)
# 处理分类变量
data$结局 <- as.factor(data$结局)
data$肥胖程度 <- as.factor(data$肥胖程度)
data$教育水平 <- as.factor(data$教育水平)
data$血型 <- as.factor(data$血型)
data$指标8 <- as.factor(data$指标8)
# 检查缺失值
sum(is.na(data))
# 如果有缺失值,进行简单处理
if (sum(is.na(data)) > 0) {
data <- na.omit(data)
cat("删除了", sum(is.na(data)), "个含有缺失值的行\n")
}
# 数据划分:70%训练集,30%测试集
train_index <- createDataPartition(data$指标1, p = 0.7, list = FALSE)
train_data <- data[train_index, ]
test_data <- data[-train_index, ]
cat("训练集样本量:", nrow(train_data), "\n")
cat("测试集样本量:", nrow(test_data), "\n")
# 准备模型矩阵
x_train <- model.matrix(指标1 ~ . - 序号, data = train_data)[, -1]
y_train <- train_data$指标1
x_test <- model.matrix(指标1 ~ . - 序号, data = test_data)[, -1]
y_test <- test_data$指标1
# 自定义性能指标函数
calculate_r2 <- function(pred, actual) {
1 - sum((actual - pred)^2) / sum((actual - mean(actual))^2)
}
calculate_rmse <- function(pred, actual) {
sqrt(mean((actual - pred)^2))
}
calculate_mae <- function(pred, actual) {
mean(abs(actual - pred))
}
calculate_mse <- function(pred, actual) {
mean((actual - pred)^2)
}
# 存储所有模型结果的列表
all_models <- list()
performance_comparison <- data.frame()
# 1. 线性回归(基准模型)
cat("正在训练线性回归模型...\n")
lm_model <- lm(指标1 ~ . - 序号, data = train_data)
lm_train_pred <- predict(lm_model, newdata = train_data)
lm_test_pred <- predict(lm_model, newdata = test_data)
lm_performance <- data.frame(
Model = "Linear Regression",
Train_R2 = calculate_r2(lm_train_pred, train_data$指标1),
Test_R2 = calculate_r2(lm_test_pred, test_data$指标1),
Train_RMSE = calculate_rmse(lm_train_pred, train_data$指标1),
Test_RMSE = calculate_rmse(lm_test_pred, test_data$指标1),
Train_MAE = calculate_mae(lm_train_pred, train_data$指标1),
Test_MAE = calculate_mae(lm_test_pred, test_data$指标1),
Parameters = "None",
Selected_Vars = ncol(x_train)
)
performance_comparison <- rbind(performance_comparison, lm_performance)
all_models$Linear <- lm_model
# 2. 岭回归
cat("正在训练岭回归模型...\n")
cv_ridge <- cv.glmnet(x_train, y_train, alpha = 0, nfolds = 10)
best_lambda_ridge <- cv_ridge$lambda.min
ridge_model <- glmnet(x_train, y_train, alpha = 0, lambda = best_lambda_ridge)
ridge_train_pred <- predict(ridge_model, newx = x_train)
ridge_test_pred <- predict(ridge_model, newx = x_test)
ridge_coef <- as.matrix(coef(ridge_model))
ridge_selected_vars <- sum(ridge_coef[-1] != 0) # 排除截距
ridge_performance <- data.frame(
Model = "Ridge Regression",
Train_R2 = calculate_r2(ridge_train_pred, y_train),
Test_R2 = calculate_r2(ridge_test_pred, y_test),
Train_RMSE = calculate_rmse(ridge_train_pred, y_train),
Test_RMSE = calculate_rmse(ridge_test_pred, y_test),
Train_MAE = calculate_mae(ridge_train_pred, y_train),
Test_MAE = calculate_mae(ridge_test_pred, y_test),
Parameters = paste0("lambda=", round(best_lambda_ridge, 6)),
Selected_Vars = ridge_selected_vars
)
performance_comparison <- rbind(performance_comparison, ridge_performance)
all_models$Ridge <- ridge_model
# 4. 弹性网络
cat("正在训练弹性网络模型...\n")
# 测试多个alpha值找到最佳参数
alpha_values <- c(0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1)
best_alpha <- 0.5
best_lambda_enet <- cv_ridge$lambda.min # 初始值
best_rmse <- Inf
for (alpha_val in alpha_values) {
cv_model <- cv.glmnet(x_train, y_train, alpha = alpha_val, nfolds = 5) # 减少折数加快计算
current_rmse <- min(cv_model$cvm)
if (current_rmse < best_rmse) {
best_rmse <- current_rmse
best_alpha <- alpha_val
best_lambda_enet <- cv_model$lambda.min
}
}
cat("最佳alpha:", best_alpha, "最佳lambda:", best_lambda_enet, "\n")
# 使用最佳参数拟合最终模型
enet_model <- glmnet(x_train, y_train, alpha = best_alpha, lambda = best_lambda_enet)
enet_train_pred <- predict(enet_model, newx = x_train)
enet_test_pred <- predict(enet_model, newx = x_test)
enet_coef <- as.matrix(coef(enet_model))
enet_selected_vars <- sum(enet_coef[-1] != 0) # 排除截距
enet_performance <- data.frame(
Model = "Elastic Net",
Train_R2 = calculate_r2(enet_train_pred, y_train),
Test_R2 = calculate_r2(enet_test_pred, y_test),
Train_RMSE = calculate_rmse(enet_train_pred, y_train),
Test_RMSE = calculate_rmse(enet_test_pred, y_test),
Train_MAE = calculate_mae(enet_train_pred, y_train),
Test_MAE = calculate_mae(enet_test_pred, y_test),
Parameters = paste0("alpha=", round(best_alpha, 2), ", lambda=", round(best_lambda_enet, 6)),
Selected_Vars = enet_selected_vars
)
performance_comparison <- rbind(performance_comparison, enet_performance)
all_models$ElasticNet <- enet_model
# 保存性能比较结果
write.csv(performance_comparison, "Results-Regularization/模型性能比较.csv", row.names = FALSE, fileEncoding = "UTF-8")
# 修复可视化部分 - 使用基础R的subset代替dplyr::select
# 1. 模型性能比较图
performance_long <- data.frame(
Model = rep(performance_comparison$Model, 4),
Metric = c(
rep("R2", nrow(performance_comparison) * 2),
rep("RMSE", nrow(performance_comparison) * 2)
),
Dataset = rep(c("Training", "Testing"), nrow(performance_comparison) * 2),
Value = c(
performance_comparison$Train_R2,
performance_comparison$Test_R2,
performance_comparison$Train_RMSE,
performance_comparison$Test_RMSE
)
)
p_performance_comparison <- ggplot(performance_long, aes(x = Model, y = Value, fill = Dataset)) +
geom_bar(stat = "identity", position = "dodge", alpha = 0.8) +
facet_wrap(~Metric, scales = "free_y") +
labs(title = "Regularization Models Performance Comparison",
x = "Model",
y = "Value") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 45, hjust = 1),
legend.position = "bottom")
# 2. 测试集预测效果比较
test_predictions <- data.frame(
Actual = y_test,
Linear = as.numeric(lm_test_pred),
Ridge = as.numeric(ridge_test_pred),
Lasso = as.numeric(lasso_test_pred),
ElasticNet = as.numeric(enet_test_pred)
)
test_predictions_long <- test_predictions %>%
pivot_longer(cols = -Actual, names_to = "Model", values_to = "Predicted")
p_test_comparison <- ggplot(test_predictions_long, aes(x = Actual, y = Predicted, color = Model)) +
geom_point(alpha = 0.6) +
geom_abline(intercept = 0, slope = 1, linetype = "dashed", color = "black") +
labs(title = "Test Set Predictions Comparison",
x = "Actual Values",
y = "Predicted Values") +
theme_minimal() +
theme(legend.position = "bottom")
# 3. 系数比较图
# 提取各模型的系数
coef_comparison <- data.frame()
# 线性回归系数
lm_coef_df <- data.frame(
Variable = names(coef(lm_model)),
Coefficient = as.numeric(coef(lm_model)),
Model = "Linear"
) %>% filter(Variable != "(Intercept)")
# 岭回归系数
ridge_coef_df <- data.frame(
Variable = rownames(ridge_coef),
Coefficient = as.numeric(ridge_coef),
Model = "Ridge"
) %>% filter(Variable != "(Intercept)")
# Lasso系数
lasso_coef_df <- data.frame(
Variable = rownames(lasso_coef),
Coefficient = as.numeric(lasso_coef),
Model = "Lasso"
) %>% filter(Variable != "(Intercept)")
# 弹性网络系数
enet_coef_df <- data.frame(
Variable = rownames(enet_coef),
Coefficient = as.numeric(enet_coef),
Model = "ElasticNet"
) %>% filter(Variable != "(Intercept)")
# 合并所有系数
coef_comparison <- rbind(lm_coef_df, ridge_coef_df, lasso_coef_df, enet_coef_df)
# 选择系数绝对值最大的10个变量进行可视化
important_vars <- coef_comparison %>%
group_by(Variable) %>%
summarise(max_coef = max(abs(Coefficient))) %>%
arrange(desc(max_coef)) %>%
head(10) %>%
pull(Variable)
coef_comparison_filtered <- coef_comparison %>%
filter(Variable %in% important_vars)
p_coef_comparison <- ggplot(coef_comparison_filtered, aes(x = Variable, y = Coefficient, fill = Model)) +
geom_bar(stat = "identity", position = "dodge", alpha = 0.8) +
labs(title = "Coefficient Comparison Across Models",
x = "Variables",
y = "Coefficient Value") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 45, hjust = 1),
legend.position = "bottom")
# 4. 过拟合分析图(训练集vs测试集R²)
overfitting_data <- data.frame(
Model = performance_comparison$Model,
Train_R2 = performance_comparison$Train_R2,
Test_R2 = performance_comparison$Test_R2,
Overfitting_Gap = performance_comparison$Train_R2 - performance_comparison$Test_R2
)
p_overfitting <- ggplot(overfitting_data, aes(x = Model)) +
geom_bar(aes(y = Train_R2, fill = "Training R²"), stat = "identity", alpha = 0.7, position = position_dodge(0.8)) +
geom_bar(aes(y = Test_R2, fill = "Testing R²"), stat = "identity", alpha = 0.7, position = position_dodge(0.8)) +
labs(title = "Overfitting Analysis: Training vs Testing R-squared",
x = "Model",
y = "R-squared") +
scale_fill_manual(values = c("Training R²" = "blue", "Testing R²" = "red")) +
theme_minimal() +
theme(axis.text.x = element_text(angle = 45, hjust = 1),
legend.position = "bottom")
# 5. 变量选择结果
variable_selection <- data.frame(
Model = c("Linear Regression", "Ridge Regression", "Lasso Regression", "Elastic Net"),
Total_Variables = rep(ncol(x_train), 4),
Selected_Variables = c(
ncol(x_train),
ridge_selected_vars,
lasso_selected_vars,
enet_selected_vars
)
)
p_variable_selection <- ggplot(variable_selection, aes(x = Model, y = Selected_Variables)) +
geom_bar(stat = "identity", fill = "steelblue", alpha = 0.8) +
geom_text(aes(label = Selected_Variables), vjust = -0.5) +
labs(title = "Variable Selection Results",
x = "Model",
y = "Number of Selected Variables") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
# 6. 交叉验证曲线比较
# 岭回归CV曲线
ridge_cv_data <- data.frame(
lambda = cv_ridge$lambda,
cvm = cv_ridge$cvm,
Model = "Ridge"
)
# Lasso CV曲线
lasso_cv_data <- data.frame(
lambda = cv_lasso$lambda,
cvm = cv_lasso$cvm,
Model = "Lasso"
)
cv_comparison <- rbind(ridge_cv_data, lasso_cv_data)
p_cv_comparison <- ggplot(cv_comparison, aes(x = lambda, y = cvm, color = Model)) +
geom_line(size = 1) +
scale_x_log10() +
labs(title = "Cross-Validation Curve Comparison",
x = "Log(Lambda)",
y = "Mean Squared Error") +
theme_minimal() +
theme(legend.position = "bottom")
# 保存所有图形
plots_to_save <- list(
p_performance_comparison,
p_test_comparison,
p_coef_comparison,
p_overfitting,
p_variable_selection,
p_cv_comparison
)
plot_names <- c(
"performance_comparison",
"test_predictions_comparison",
"coefficient_comparison",
"overfitting_analysis",
"variable_selection",
"cv_curve_comparison"
)
# 保存为不同格式
for (i in 1:length(plots_to_save)) {
tryCatch({
# JPG
ggsave(paste0("Results-Regularization/", plot_names[i], ".jpg"),
plots_to_save[[i]], width = 10, height = 8, units = "in", dpi = 300)
# PNG
ggsave(paste0("Results-Regularization/", plot_names[i], ".png"),
plots_to_save[[i]], width = 10, height = 8, units = "in", dpi = 300)
# PDF
ggsave(paste0("Results-Regularization/", plot_names[i], ".pdf"),
plots_to_save[[i]], width = 10, height = 8, units = "in")
cat("成功保存:", plot_names[i], "\n")
}, error = function(e) {
cat("Error saving", plot_names[i], ":", e$message, "\n")
})
}
# 保存详细的模型系数
write.csv(lm_coef_df, "Results-Regularization/线性回归系数.csv", row.names = FALSE, fileEncoding = "UTF-8")
write.csv(ridge_coef_df, "Results-Regularization/岭回归系数.csv", row.names = FALSE, fileEncoding = "UTF-8")
write.csv(lasso_coef_df, "Results-Regularization/Lasso回归系数.csv", row.names = FALSE, fileEncoding = "UTF-8")
write.csv(enet_coef_df, "Results-Regularization/弹性网络系数.csv", row.names = FALSE, fileEncoding = "UTF-8")
# 生成综合Word报告
tryCatch({
doc <- read_docx()
# 标题
doc <- doc %>%
body_add_par("正则化回归模型综合分析报告", style = "heading 1") %>%
body_add_par(paste("生成日期:", Sys.Date()), style = "Normal") %>%
body_add_par("", style = "Normal")
# 数据概述
doc <- doc %>%
body_add_par("数据概述", style = "heading 2") %>%
body_add_par(paste("总样本量:", nrow(data)), style = "Normal") %>%
body_add_par(paste("训练集样本量:", nrow(train_data)), style = "Normal") %>%
body_add_par(paste("测试集样本量:", nrow(test_data)), style = "Normal") %>%
body_add_par(paste("特征数量:", ncol(x_train)), style = "Normal") %>%
body_add_par("", style = "Normal")
# 模型性能比较
doc <- doc %>%
body_add_par("模型性能比较", style = "heading 2")
perf_ft <- flextable(performance_comparison) %>%
theme_zebra() %>%
autofit()
doc <- doc %>%
body_add_flextable(perf_ft) %>%
body_add_par("", style = "Normal")
# 最佳模型识别
best_test_r2 <- max(performance_comparison$Test_R2)
best_model <- performance_comparison$Model[which.max(performance_comparison$Test_R2)]
doc <- doc %>%
body_add_par("最佳模型识别", style = "heading 3") %>%
body_add_par(paste("基于测试集R平方,最佳模型为:", best_model), style = "Normal") %>%
body_add_par(paste("测试集R平方:", round(best_test_r2, 4)), style = "Normal") %>%
body_add_par("", style = "Normal")
# 过拟合分析
doc <- doc %>%
body_add_par("过拟合分析", style = "heading 3") %>%
body_add_par("训练集与测试集性能差异:", style = "Normal")
overfitting_ft <- flextable(overfitting_data) %>%
theme_zebra() %>%
autofit()
doc <- doc %>%
body_add_flextable(overfitting_ft) %>%
body_add_par("", style = "Normal")
# 变量选择总结
doc <- doc %>%
body_add_par("变量选择总结", style = "heading 3")
selection_ft <- flextable(variable_selection) %>%
theme_zebra() %>%
autofit()
doc <- doc %>%
body_add_flextable(selection_ft) %>%
body_add_par("", style = "Normal")
# 可视化结果 - 只添加确实存在的图片
doc <- doc %>%
body_add_par("可视化结果", style = "heading 2")
# 检查图片文件是否存在,然后添加
img_files <- c(
"performance_comparison.jpg",
"test_predictions_comparison.jpg",
"coefficient_comparison.jpg",
"overfitting_analysis.jpg",
"variable_selection.jpg"
)
img_titles <- c(
"模型性能比较:",
"测试集预测效果比较:",
"系数比较:",
"过拟合分析:",
"变量选择结果:"
)
for (i in 1:length(img_files)) {
img_path <- paste0("Results-Regularization/", img_files[i])
if (file.exists(img_path)) {
doc <- doc %>%
body_add_par(img_titles[i], style = "Normal") %>%
body_add_img(img_path, width = 7, height = 6)
} else {
cat("图片文件不存在:", img_path, "\n")
}
}
# 结论与建议
doc <- doc %>%
body_add_par("结论与建议", style = "heading 2") %>%
body_add_par("基于综合分析,主要发现如下:", style = "Normal")
# 根据性能给出建议
if (best_model == "Linear Regression") {
doc <- doc %>% body_add_par("- 线性回归表现最佳,数据可能没有严重的多重共线性问题", style = "Normal")
} elseif (best_model == "Ridge Regression") {
doc <- doc %>% body_add_par("- 岭回归表现最佳,数据可能存在多重共线性,L2正则化有效", style = "Normal")
} elseif (best_model == "Lasso Regression") {
doc <- doc %>% body_add_par("- Lasso回归表现最佳,特征选择对模型性能有积极影响", style = "Normal")
} else {
doc <- doc %>% body_add_par("- 弹性网络表现最佳,结合了L1和L2正则化的优势", style = "Normal")
}
# 过拟合情况
avg_overfitting_gap <- mean(overfitting_data$Overfitting_Gap)
if (avg_overfitting_gap > 0.1) {
doc <- doc %>% body_add_par("- 存在明显的过拟合现象,建议加强正则化", style = "Normal")
} elseif (avg_overfitting_gap < 0.05) {
doc <- doc %>% body_add_par("- 模型泛化能力良好", style = "Normal")
} else {
doc <- doc %>% body_add_par("- 模型具有一定的泛化能力,但仍有改进空间", style = "Normal")
}
# 变量选择情况
if (lasso_selected_vars < ncol(x_train) * 0.5) {
doc <- doc %>% body_add_par("- Lasso成功进行了特征选择,减少了特征数量", style = "Normal")
}
doc <- doc %>%
body_add_par("推荐使用策略:", style = "Normal") %>%
body_add_par(paste("1. 生产环境推荐使用:", best_model, "模型"), style = "Normal") %>%
body_add_par("2. 如需特征选择,可考虑Lasso或弹性网络", style = "Normal") %>%
body_add_par("3. 如数据存在多重共线性,推荐岭回归或弹性网络", style = "Normal") %>%
body_add_par("4. 可进一步尝试其他机器学习算法进行比较", style = "Normal")
# 保存Word文档
print(doc, target = "Results-Regularization/正则化回归模型综合分析报告.docx")
cat("Word报告生成成功!\n")
}, error = function(e) {
cat("生成Word报告时出错:", e$message, "\n")
})
# 保存R工作环境
save.image("Results-Regularization/正则化回归分析.RData")
# 保存模型对象
saveRDS(all_models, "Results-Regularization/all_models.rds")
# 输出完成信息
cat("\n=== 正则化回归模型分析完成 ===\n")
cat("所有结果已保存到 Results-Regularization 文件夹中。\n")
cat("包含:\n")
cat("- 四种模型的训练和测试结果\n")
cat("- 模型性能比较\n")
cat("- 详细的系数分析\n")
cat("- 多种可视化图形\n")
cat("- 完整的Word分析报告\n")
cat("- R工作环境文件和模型对象\n\n")
# 输出最佳模型信息
cat("最佳模型信息:\n")
cat("模型:", best_model, "\n")
cat("测试集R平方:", round(best_test_r2, 4), "\n")
best_model_selected_vars <- variable_selection$Selected_Variables[variable_selection$Model == best_model]
cat("选择的变量数:", best_model_selected_vars, "\n")
# 输出各模型测试集R平方
cat("\n各模型测试集R平方:\n")
for (i in 1:nrow(performance_comparison)) {
cat(performance_comparison$Model[i], ":", round(performance_comparison$Test_R2[i], 4), "\n")
}
# 输出性能差异
cat("\n性能差异分析:\n")
cat("训练集平均R平方:", round(mean(performance_comparison$Train_R2), 4), "\n")
cat("测试集平均R平方:", round(mean(performance_comparison$Test_R2), 4), "\n")
cat("平均过拟合差距:", round(mean(overfitting_data$Overfitting_Gap), 4), "\n")
# 输出模型选择建议
cat("\n模型选择建议:\n")
if (best_model == "Elastic Net") {
cat("弹性网络表现最佳,推荐在生产环境中使用。\n")
cat("该模型平衡了特征选择和系数收缩的优势。\n")
} elseif (best_model == "Lasso Regression") {
cat("Lasso回归表现良好,特别适合特征选择任务。\n")
} elseif (best_model == "Ridge Regression") {
cat("岭回归表现稳定,适合处理多重共线性数据。\n")
} else {
cat("线性回归表现尚可,但正则化模型可能有改进空间。\n")
}
2. 套索回归(Lasso Regression)、岭回归(Ridge Regression)、弹性网络(Elastic Net)
概念
正则化线性回归方法,通过添加惩罚项防止过拟合。
原理
- 岭回归:L2正则化,$\min \sum(y_i - \hat{y}_i)^2 + \lambda\sum\beta_j^2$
- 套索回归:L1正则化,$\min \sum(y_i - \hat{y}_i)^2 + \lambda\sum|\beta_j|$
- 弹性网络:结合L1和L2,$\min \sum(y_i - \hat{y}_i)^2 + \lambda_1\sum|\beta_j| + \lambda_2\sum\beta_j^2$
思想
通过惩罚大系数来简化模型,提高泛化能力。
应用
- 高维数据特征选择
- 多重共线性处理
- 基因组学、文本挖掘
医学统计数据分析分享交流SPSS、R语言、Python、ArcGis、Geoda、GraphPad、数据分析图表制作等心得。承接数据分析,论文返修,医学统计,机器学习,生存分析,空间分析,问卷分析业务。若有投稿和数据分析代做需求,可以直接联系我,谢谢!
“医学统计数据分析”公众号右下角;
找到“联系作者”,
可加我微信,邀请入粉丝群!
有临床流行病学数据分析
如(t检验、方差分析、χ2检验、logistic回归)、
(重复测量方差分析与配对T检验、ROC曲线)、
(非参数检验、生存分析、样本含量估计)、
(筛检试验:灵敏度、特异度、约登指数等计算)、
(绘制柱状图、散点图、小提琴图、列线图等)、
机器学习、深度学习、生存分析
等需求的同仁们,加入【临床】粉丝群。
疾控,公卫岗位的同仁,可以加一下【公卫】粉丝群,分享生态学研究、空间分析、时间序列、监测数据分析、时空面板技巧等工作科研自动化内容。
有实验室数据分析需求的同仁们,可以加入【生信】粉丝群,交流NCBI(基因序列)、UniProt(蛋白质)、KEGG(通路)、GEO(公共数据集)等公共数据库、基因组学转录组学蛋白组学代谢组学表型组学等数据分析和可视化内容。
或者可扫码直接加微信进群!!!
精品视频课程-“医学统计数据分析”视频号付费合集
在“医学统计数据分析”视频号-付费合集兑换相应课程后,获取课程理论课PPT、代码、基础数据等相关资料,请大家在【医学统计数据分析】公众号右下角,找到“联系作者”,加我微信后打包发送。感谢您的支持!!

