适用场景:
前置条件:
目标: 记录优化前的关键指标,用于对比。
# 系统负载与 CPU
uptime
# 输出: load average: 15.2, 12.8, 10.3 (基线记录)
mpstat -P ALL 1 10 | tee /tmp/cpu-baseline.txt
# 记录 CPU 利用率、%sys、%soft(软中断)
# 网络吞吐与连接数
sar -n DEV 1 10 | tee /tmp/network-baseline.txt
# 记录 rxpck/s、txpck/s、rxkB/s、txkB/s
ss -s
# 记录 TCP 连接数(estab/timewait/orphaned)
# TCP 重传率(关键指标)
netstat -s | grep -E "retransmit|timeout"
# 记录 segments retransmited、timeouts
# 使用 wrk 压测建立基线
wrk -t8 -c1000 -d60s --latency http://192.168.1.10:8080/
# 记录 QPS、P50/P99 延迟基线示例:
优化前基线:
- QPS: 12000 req/s
- P99 延迟: 850ms
- 重传率: 1.2%
- CPU %sys: 35%
- TCP ESTABLISHED: 8500目标: 优化 TCP 连接建立、数据传输、连接回收效率。
# /etc/sysctl.d/10-tcp-tuning.conf
# SYN 队列大小(处理大量新连接)
net.ipv4.tcp_max_syn_backlog = 16384 # 默认 1024
net.core.somaxconn = 16384 # 应用层 listen() backlog 上限
# SYN Cookies(防 SYN Flood)
net.ipv4.tcp_syncookies = 1 # 1=开启
# SYN-ACK 重试次数
net.ipv4.tcp_synack_retries = 2 # 默认 5,降低可快速释放资源
# 应用配置立即生效
sysctl -p /etc/sysctl.d/10-tcp-tuning.conf验证 SYN 队列溢出:
# 压测前后对比
netstat -s | grep -i "SYNs to LISTEN"
# 输出: 1234 SYNs to LISTEN sockets dropped (优化后应接近 0)# TCP 读写缓冲区(关键性能参数)
net.ipv4.tcp_rmem = 4096 87380 16777216 # min default max (读缓冲,16MB)
net.ipv4.tcp_wmem = 4096 65536 16777216 # 写缓冲
# TCP 自动调整缓冲区
net.ipv4.tcp_moderate_rcvbuf = 1
# 全局 socket 缓冲区限制(单位:页,4KB/页)
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.core.rmem_default = 262144
net.core.wmem_default = 262144
# 应用配置
sysctl -w net.ipv4.tcp_rmem="4096 87380 16777216"
sysctl -w net.ipv4.tcp_wmem="4096 65536 16777216"参数解释:
tcp_rmem[2]: 接收窗口最大值,影响下载速度(高带宽环境需增大)tcp_wmem[2]: 发送窗口最大值,影响上传速度tcp_moderate_rcvbuf: 自动调整窗口,基于 RTT 与带宽验证缓冲区:
# 查看连接实际使用的缓冲区
ss -tm | grep -A1 "ESTAB"
# 输出:
# skmem:(r0,rb369280,t0,tb87040,f0,w0,o0,bl0,d0)
# rb=接收缓冲,tb=发送缓冲# TIME_WAIT 复用与快速回收
net.ipv4.tcp_tw_reuse = 1 # 允许复用 TIME_WAIT socket
net.ipv4.tcp_fin_timeout = 15 # FIN_WAIT2 超时(默认 60s)
# TIME_WAIT bucket 上限
net.ipv4.tcp_max_tw_buckets = 50000 # 默认 16384
# 孤儿连接(未被进程持有)限制
net.ipv4.tcp_max_orphans = 65536 # 默认 16384
# 应用配置
sysctl -w net.ipv4.tcp_tw_reuse=1
sysctl -w net.ipv4.tcp_fin_timeout=15注意: tcp_tw_recycle 已在内核 4.12 废弃,使用 tcp_tw_reuse 替代。
验证 TIME_WAIT:
# 压测时监控 TIME_WAIT 数量
watch -n1 'ss -s | grep TIME-WAIT'
# 优化后应显著减少# 查看可用算法
sysctl net.ipv4.tcp_available_congestion_control
# 输出: cubic reno bbr
# 切换到 BBR(需内核 4.9+)
modprobe tcp_bbr
echo"tcp_bbr" >> /etc/modules-load.d/bbr.conf
sysctl -w net.ipv4.tcp_congestion_control=bbr
sysctl -w net.core.default_qdisc=fq
# 持久化
cat <<EOF >> /etc/sysctl.d/10-tcp-tuning.conf
net.ipv4.tcp_congestion_control = bbr
net.core.default_qdisc = fq
EOF
# 验证 BBR 生效
sysctl net.ipv4.tcp_congestion_control
lsmod | grep bbrBBR vs Cubic 对比:
# 本地端口范围(客户端连接使用)
net.ipv4.ip_local_port_range = 10000 65000 # 默认 32768-60999
# 连接跟踪表大小(NAT/防火墙环境)
net.netfilter.nf_conntrack_max = 1048576 # 默认 65536
net.netfilter.nf_conntrack_tcp_timeout_established = 600 # 默认 432000(5天)
# ARP 缓存
net.ipv4.neigh.default.gc_thresh1 = 1024
net.ipv4.neigh.default.gc_thresh2 = 4096
net.ipv4.neigh.default.gc_thresh3 = 8192
# 应用配置
sysctl -p /etc/sysctl.d/10-tcp-tuning.conf验证 conntrack 表:
# 查看当前使用量
cat /proc/sys/net/netfilter/nf_conntrack_count
# 输出: 62341
# 查看上限
cat /proc/sys/net/netfilter/nf_conntrack_max
# 输出: 1048576
# 监控溢出(有溢出说明需增大 max)
dmesg | grep "nf_conntrack: table full"# 查看网卡队列数
ethtool -l eth0
# 输出:
# Combined: 8 (当前)
# Combined: 16 (最大支持)
# 增加队列数(需硬件支持)
ethtool -L eth0 combined 16
# 查看中断分布
cat /proc/interrupts | grep eth0
# 理想情况: 每个队列绑定不同 CPU
# 手动绑定中断到 CPU(示例:中断 125 绑定到 CPU 0)
echo 1 > /proc/irq/125/smp_affinity_list # 1 表示 CPU 0
echo 2 > /proc/irq/126/smp_affinity_list # 2 表示 CPU 1自动化脚本(irqbalance 替代方案):
#!/bin/bash
# set-irq-affinity.sh - 网卡中断均衡分布
IFACE=eth0
QUEUES=$(ls -d /sys/class/net/$IFACE/queues/rx-* | wc -l)
CPU_COUNT=$(nproc)
for ((i=0; i<$QUEUES; i++)); do
IRQ=$(cat /sys/class/net/$IFACE/queues/rx-$i/../../../msi_irqs/* | head -1)
CPU=$((i % CPU_COUNT))
echo$CPU > /proc/irq/$IRQ/smp_affinity_list
echo"IRQ $IRQ -> CPU $CPU"
done# 启用 RPS(Receive Packet Steering,单队列网卡软件实现多队列)
# 对于网卡队列 < CPU 核数的场景
# 配置 RPS(以 eth0 为例)
for i in /sys/class/net/eth0/queues/rx-*/rps_cpus; do
echo"ffff" > $i# ffff=使用所有 16 个 CPU
done
# 启用 RFS(Receive Flow Steering,进一步优化到应用所在 CPU)
sysctl -w net.core.rps_sock_flow_entries=32768
for i in /sys/class/net/eth0/queues/rx-*/rps_flow_cnt; do
echo 2048 > $i
done目标: 减少 CPU 上下文切换,降低软中断开销。
# 查看软中断占比
mpstat -P ALL 1 5
# 关注 %soft 列,正常应 <10%
# 详细软中断统计
cat /proc/softirqs
# NET_RX 行显示每 CPU 的网络接收中断数
# 实时监控
watch -n1 'cat /proc/softirqs | grep NET_RX'# 查看进程当前 CPU 亲和性
taskset -cp <PID>
# 输出: pid 12345's current affinity list: 0-15
# 绑定 Nginx worker 到特定 CPU
# 编辑 /etc/nginx/nginx.conf
worker_processes 8;
worker_cpu_affinity 00000001 00000010 00000100 00001000 00010000 00100000 01000000 10000000;
# 验证绑定
ps -eLo pid,tid,psr,comm | grep nginx
# psr 列显示当前运行的 CPU 编号# 脏页回写优化(减少 I/O 延迟)
vm.dirty_ratio = 10 # 脏页占比达 10% 触发同步写
vm.dirty_background_ratio = 5 # 后台回写起始点
vm.dirty_writeback_centisecs = 100 # 每 1 秒回写一次
# 内存分配策略
vm.overcommit_memory = 1 # 允许超额分配(Redis/MongoDB 推荐)
# 交换分区(生产环境建议禁用或最小化)
vm.swappiness = 10 # 默认 60,降低换页频率
# 应用配置
cat <<EOF >> /etc/sysctl.d/20-memory.conf
vm.dirty_ratio = 10
vm.dirty_background_ratio = 5
vm.swappiness = 10
EOF
sysctl -p /etc/sysctl.d/20-memory.conf# 系统级限制
fs.file-max = 2097152
# 进程级限制(/etc/security/limits.conf)
cat <<EOF >> /etc/security/limits.conf
* soft nofile 100000
* hard nofile 100000
root soft nofile 100000
root hard nofile 100000
EOF
# 当前 shell 立即生效
ulimit -n 100000
# 验证
ulimit -n
cat /proc/sys/fs/file-max验证文件描述符使用:
# 系统级使用率
cat /proc/sys/fs/file-nr
# 输出: 12345 0 2097152
# ↑已用 ↑未用 ↑总量
# 进程级使用(以 Nginx 为例)
PID=$(pgrep nginx | head -1)
ls /proc/$PID/fd | wc -l
cat /proc/$PID/limits | grep "open files"# 查看当前调度器
cat /sys/block/sda/queue/scheduler
# 输出: noop [deadline] cfq
# SSD 推荐使用 noop 或 none(内核 5.0+)
echo noop > /sys/block/sda/queue/scheduler
# 持久化(/etc/udev/rules.d/60-scheduler.rules)
cat <<EOF > /etc/udev/rules.d/60-scheduler.rules
ACTION=="add|change", KERNEL=="sd[a-z]", ATTR{queue/rotational}=="0", ATTR{queue/scheduler}="noop"
EOF# /etc/fstab 优化示例(XFS)
/dev/sda1 /data xfs defaults,noatime,nodiratime,nobarrier 0 0
# 参数解释:
# noatime: 不更新访问时间(减少 I/O)
# nodiratime: 不更新目录访问时间
# nobarrier: 禁用写屏障(SSD + UPS 环境可用)
# 重新挂载生效
mount -o remount /data
# 验证挂载参数
mount | grep /data# /etc/nginx/nginx.conf
user nginx;
worker_processes auto;
worker_rlimit_nofile100000;
worker_cpu_affinity auto;
events {
useepoll;
worker_connections10000;
multi_accepton;
}
http {
sendfileon;
tcp_nopushon;
tcp_nodelayon;
keepalive_timeout65;
keepalive_requests1000;
# 连接池
upstream backend {
server192.168.1.21:8080;
keepalive256;
keepalive_requests1000;
keepalive_timeout60s;
}
}# JVM 启动参数(堆外内存与 GC)
JAVA_OPTS="-Xms8g -Xmx8g \
-XX:+UseG1GC \
-XX:MaxGCPauseMillis=200 \
-XX:+ParallelRefProcEnabled \
-XX:+UnlockExperimentalVMOptions \
-XX:G1NewSizePercent=30 \
-XX:InitiatingHeapOccupancyPercent=35 \
-XX:+DisableExplicitGC \
-Djava.net.preferIPv4Stack=true"
# 连接池配置(HikariCP 示例)
spring.datasource.hikari.maximum-pool-size=200
spring.datasource.hikari.minimum-idle=20
spring.datasource.hikari.connection-timeout=5000# 使用相同压测参数对比
wrk -t8 -c1000 -d60s --latency http://192.168.1.10:8080/
# 优化后预期结果:
# QPS: 38000 req/s (提升 316%)
# P99 延迟: 420ms (降低 50%)
# 重传率: 0.3% (降低 75%)
# CPU %sys: 18% (降低 48%)# 合并所有配置到单一文件
cat /etc/sysctl.d/*.conf > /etc/sysctl.conf
# 验证下次重启后生效
sysctl -p /etc/sysctl.conf
# 检查服务自启动
systemctl list-unit-files | grep enabled | grep -E "nginx|network"# 实时监控脚本
#!/bin/bash
# perf-monitor.sh
whiletrue; do
echo"=== $(date) ==="
# TCP 连接状态
ss -s | grep TCP
# 重传率
netstat -s | grep -E "segments retransmited" | awk '{print "Retrans: " $1}'
# 软中断
mpstat -P ALL 1 1 | grep Average | awk '{print "CPU Soft IRQ: " $8 "%"}'
# 网络吞吐
sar -n DEV 1 1 | grep eth0 | tail -1 | awk '{print "RX: " $5 " KB/s, TX: " $6 " KB/s"}'
echo"---"
sleep 5
done# node-exporter 关键指标
node_netstat_Tcp_RetransSegs# TCP 重传数
node_netstat_TcpExt_TCPTimeouts# TCP 超时数
node_softnet_dropped_total# 软中断丢包
node_network_receive_drop_total# 网卡接收丢包告警规则:
groups:
-name:kernel_network_alerts
rules:
-alert:HighTCPRetrans
expr:rate(node_netstat_Tcp_RetransSegs[1m])>100
for:5m
labels:
severity:warning
annotations:
summary:"TCP 重传率异常(>100/s)"
-alert:SoftnetDropped
expr:rate(node_softnet_dropped_total[1m])>10
for:2m
labels:
severity:critical
annotations:
summary:"软中断丢包(内核网络栈过载)"单机理论并发 = (本地端口数 × 目标 IP 数) / 平均连接时长(s)
实际可用并发 = min(
理论并发,
文件描述符限制,
内存限制(每连接约 4KB),
conntrack 表大小
)
示例(16C64G 服务器):
- 端口范围: 10000~65000(55000 个)
- 目标后端: 10 个
- 平均连接时长: 1s
- 理论并发: (55000 × 10) / 1 = 550,000
- fd 限制: 100,000
- 内存限制: 64GB × 0.5 / 4KB ≈ 8,388,608
- 推荐配置: 80,000 并发(预留 20% buffer)# 启用 SYN Cookies
net.ipv4.tcp_syncookies = 1
# 限制 SYN 队列
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_synack_retries = 2
# 配合 iptables 限速
iptables -A INPUT -p tcp --dport 80 --syn -m limit --limit 100/s --limit-burst 200 -j ACCEPT
iptables -A INPUT -p tcp --dport 80 --syn -j DROP# 启用反向路径过滤(防 IP 欺骗)
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.rp_filter = 1
# 禁止 ICMP 重定向
net.ipv4.conf.all.accept_redirects = 0
net.ipv4.conf.all.send_redirects = 0mpstat -P ALL 1 | ethtool -L | |||
netstat -s | grep retrans | ||||
dmesg | grep nf_conntrack | nf_conntrack_max | |||
ss -s | grep TIME-WAIT | tcp_tw_reuse | |||
netstat -s | grep "port already in use" | ip_local_port_range | |||
cat /proc/sys/fs/file-nr | ulimit -n 100000 |
tcp_moderate_rcvbuf,让内核根据 RTT 自动调整窗口tw_reuse,但勿过度减小 fin_timeout(<10s 可能导致连接复用冲突)# /etc/sysctl.d/99-production-tuning.conf
# 生产环境 Linux 内核调优完整配置
### TCP 协议栈优化 ###
net.ipv4.tcp_max_syn_backlog = 16384
net.core.somaxconn = 16384
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_synack_retries = 2
net.ipv4.tcp_rmem = 4096 87380 16777216
net.ipv4.tcp_wmem = 4096 65536 16777216
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_fin_timeout = 15
net.ipv4.tcp_max_tw_buckets = 50000
net.ipv4.tcp_max_orphans = 65536
net.ipv4.tcp_congestion_control = bbr
net.core.default_qdisc = fq
### 网络层优化 ###
net.ipv4.ip_local_port_range = 10000 65000
net.netfilter.nf_conntrack_max = 1048576
net.netfilter.nf_conntrack_tcp_timeout_established = 600
net.core.netdev_max_backlog = 8192
net.ipv4.neigh.default.gc_thresh1 = 1024
net.ipv4.neigh.default.gc_thresh2 = 4096
net.ipv4.neigh.default.gc_thresh3 = 8192
### 内存管理 ###
vm.dirty_ratio = 10
vm.dirty_background_ratio = 5
vm.swappiness = 10
vm.overcommit_memory = 1
### 文件系统 ###
fs.file-max = 2097152
### 安全防护 ###
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.all.accept_redirects = 0
net.ipv4.conf.all.send_redirects = 0
net.ipv4.icmp_echo_ignore_broadcasts = 1
net.ipv4.icmp_ignore_bogus_error_responses = 1
### 应用配置 ###
sysctl -p /etc/sysctl.d/99-production-tuning.conf#!/bin/bash
# apply-kernel-tuning.sh - 一键应用内核优化
set -euo pipefail
BACKUP_DIR="/root/kernel-tuning-backup-$(date +%Y%m%d-%H%M%S)"
mkdir -p $BACKUP_DIR
echo"==> 备份当前配置"
sysctl -a > $BACKUP_DIR/sysctl-before.txt
cp -r /etc/sysctl.d $BACKUP_DIR/
echo"==> 应用优化配置"
curl -sO https://example.com/99-production-tuning.conf
mv 99-production-tuning.conf /etc/sysctl.d/
sysctl -p /etc/sysctl.d/99-production-tuning.conf
echo"==> 加载 BBR 模块"
modprobe tcp_bbr
echo"tcp_bbr" >> /etc/modules-load.d/bbr.conf
echo"==> 验证关键参数"
sysctl net.ipv4.tcp_congestion_control | grep -q bbr || { echo"BBR 未生效"; exit 1; }
sysctl net.ipv4.tcp_tw_reuse | grep -q "= 1" || { echo"tw_reuse 未生效"; exit 1; }
echo"==> 优化完成,备份保存于 $BACKUP_DIR"
echo"==> 建议重启后验证: reboot"测试于: 2025-10, RHEL 8.6 / Ubuntu 22.04, Kernel 5.15, 16C64G
推荐阅读 一键部署,轻松上手!DeepSeek-R1本地部署指南,开启你的AI探索之旅!
干货 | PXE+kickstart无人值守批量装机(原理与架构)
干货 | PXE+kickstart无人值守批量装机(实战部署)
ifconfig已淘汰,ip登场
Linux 网络状态工具 ss 命令详解
这次终于搞明白VLAN技术了
领导让我选监控,Zabbix 和 Prometheus 我该选哪个
Linux的TCP连接数量最大不能超过65535?那服务器是如何应对百万千万的并发的?
高性能GPU服务器架构分析(上篇)
高性能GPU服务器架构分析(下篇)