当前位置:首页>python>Python环境自动配置脚本

Python环境自动配置脚本

  • 2026-02-24 17:34:30
Python环境自动配置脚本
本文整理了实用的一键安装脚本,涵盖了WSL Ubuntu系统级渲染依赖,Miniforge环境配置,以及Python库安装。 脚本会自动检测系统类型并安装必要的依赖,最后验证安装结果。
#!/bin/bash
set -e  # 遇到错误立即停止

# --- 颜色定义 ---
GREEN='\033[0;32m'
BLUE='\033[0;34m'
RED='\033[0;31m'
NC='\033[0m'

echo -e "${BLUE}==============================================${NC}"
echo -e "${BLUE}   Embodied AI Python环境自动配置脚本         ${NC}"
echo -e "${BLUE}==============================================${NC}"

# 0. 检查是否为 Root 用户 (安全检查)
if [ "$EUID" -eq 0 ]; then
echo -e "${RED}[警告] 请不要以 root 身份运行此脚本!${NC}"
echo"请使用你创建的普通用户 (如 leo) 运行。"
exit 1
fi

# 1. 安装必要的系统级渲染库 (MuJoCo 需要这些)
echo -e "${GREEN}[1/4] 安装底层渲染依赖 (需要 sudo 密码)...${NC}"
sudo apt-get update -qq
sudo apt-get install -y build-essential libgl1-mesa-dev libgl1-mesa-glx \
    libglew-dev libosmesa6-dev software-properties-common patchelf \
    git ffmpeg wget > /dev/null
echo">>> 系统依赖安装完成。"

# 2. 安装 Miniforge (如果未安装)
if [ ! -d "$HOME/miniforge3" ]; then
echo -e "${GREEN}[2/4] 正在下载并安装 Miniforge...${NC}"
    wget -q -O Miniforge3.sh "https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-Linux-x86_64.sh"
    bash Miniforge3.sh -b -p "$HOME/miniforge3"
rm Miniforge3.sh

# 初始化 Conda 到 .bashrc
"$HOME/miniforge3/bin/conda" init bash
else
echo -e "${GREEN}[2/4] Miniforge 已存在,跳过安装。${NC}"
fi

# 临时激活 conda 以便后续操作
source"$HOME/miniforge3/bin/activate"

# 3. 创建具身智能专属环境
echo -e "${GREEN}[3/4] 创建 Conda 环境 (embodied_ai)...${NC}"
# 如果存在则先删除,保证纯净
conda env remove -n embodied_ai -y > /dev/null 2>&1 || true
conda create -n embodied_ai python=3.10 -y

# 4. 安装核心库 (PyTorch + Gymnasium)
echo -e "${GREEN}[4/4] 正在安装 PyTorch 和 Gymnasium...${NC}"
# 使用刚创建的环境的 pip
"$HOME/miniforge3/envs/embodied_ai/bin/pip" install torch torchvision torchaudio
"$HOME/miniforge3/envs/embodied_ai/bin/pip" install "gymnasium[mujoco]"

# 5. 生成测试脚本
cat <<EOF > hello_embodied.py
import gymnasium as gym
import time

print(f">>> [测试启动] 正在加载 Ant-v4 (机械蚂蚁)...")
try:
    # render_mode='human' 会尝试弹出窗口
    env = gym.make("Ant-v4", render_mode="human")
    observation, info = env.reset()

    print(">>> [成功] 环境已初始化!请看屏幕上的 3D 窗口。")
    print(">>> 蚂蚁将随机运动 5 秒...")

    for _ in range(300):
        action = env.action_space.sample()
        env.step(action)
        time.sleep(0.01)

    env.close()
    print(">>> [完成] 测试通过。环境配置完美!")

except Exception as e:
    print(f">>> [错误] 发生异常: {e}")
    print("提示: 如果看不到窗口,请检查 Windows 端是否支持 WSLg (Win11) 或安装了 VcXsrv (Win10)。")
EOF


echo -e "${BLUE}==============================================${NC}"
echo -e "${GREEN}🎉 配置全部完成!${NC}"
echo -e "请依次执行以下命令开始体验:"
echo -e "1. ${GREEN}source ~/.bashrc${NC}"
echo -e "2. ${GREEN}conda activate embodied_ai${NC}"
echo -e "3. ${GREEN}python hello_embodied.py${NC}"
echo -e "${BLUE}==============================================${NC}"

完成以上步骤后,你可以在具身智能环境中运行 hello_embodied.py 测试脚本。 如果一切正常,你应该能看到一个 3D 窗口弹出,显示机械蚂蚁在随机运动。


下面是一个简单的测试脚本,用于验证 PyTorch 是否正确安装并能够使用 GPU:

#!/usr/bin/env python3
"""
PyTorch信息获取与测试脚本
获取PyTorch、CUDA、GPU等详细信息,并进行简单的功能测试
"""


import sys
import platform
import json
import time
import numpy as np
from datetime import datetime
import subprocess
import os


defsafe_getattr(obj, attr_name, default=None):
"""安全获取对象属性,如果属性不存在则返回默认值"""
try:
returngetattr(obj, attr_name, default)
except (AttributeError, TypeError):
return default

defget_system_info():
"""获取系统信息"""
    info = {
"platform": platform.platform(),
"system": platform.system(),
"release": platform.release(),
"version": platform.version(),
"machine": platform.machine(),
"processor": platform.processor(),
"python_version": platform.python_version(),
"python_implementation": platform.python_implementation(),
"python_compiler": platform.python_compiler(),
    }
return info


defget_nvidia_info():
"""获取NVIDIA驱动和CUDA信息"""
    info = {
"driver_version"None,
"cuda_version"None,
"nvidia_smi_output"None
    }

try:
# 尝试获取nvidia-smi输出
        result = subprocess.run(['nvidia-smi'], 
            capture_output=True, text=True, timeout=5)
if result.returncode == 0:
            info["nvidia_smi_output"] = result.stdout[:1000]  # 只取前1000字符
except (subprocess.TimeoutExpired, FileNotFoundError, OSError):
pass

try:
# 尝试获取nvidia-smi版本
        result = subprocess.run(['nvidia-smi''--query-gpu=driver_version''--format=csv,noheader'], 
            capture_output=True, text=True, timeout=5)
if result.returncode == 0:
            info["driver_version"] = result.stdout.strip()
except (subprocess.TimeoutExpired, FileNotFoundError, OSError):
pass

# 检查CUDA安装
    cuda_paths = [
r"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA",  # Windows
"/usr/local/cuda",  # Linux/macOS
"/opt/cuda"# Alternative Linux
    ]

for path in cuda_paths:
if os.path.exists(path):
            info["cuda_installed_path"] = path
# 尝试获取CUDA版本
            version_file = os.path.join(path, "version.txt")
if os.path.exists(version_file):
withopen(version_file, 'r'as f:
                    info["cuda_version"] = f.read().strip()
break

return info


defget_torch_info():
"""获取PyTorch信息 - 修复版本"""
import torch

    info = {
"torch_version": torch.__version__,
"torch_cuda_version": torch.version.cuda ifhasattr(torch.version, 'cuda'elseNone,
"torch_cudnn_version": torch.backends.cudnn.version() ifhasattr(torch.backends.cudnn, 'is_available'and torch.backends.cudnn.is_available() elseNone,
"cuda_available": torch.cuda.is_available(),
"cuda_device_count": torch.cuda.device_count() if torch.cuda.is_available() else0,
"devices": []
    }

if torch.cuda.is_available():
for i inrange(torch.cuda.device_count()):
            device_props = torch.cuda.get_device_properties(i)
            capability = torch.cuda.get_device_capability(i)

# 使用安全的方式获取属性,兼容不同版本的PyTorch
            device_info = {
"id": i,
"name": torch.cuda.get_device_name(i),
"compute_capability"f"{capability[0]}.{capability[1]}",
"total_memory_gb": device_props.total_memory / (1024**3),
"multi_processor_count": safe_getattr(device_props, 'multi_processor_count'),

# 不同版本的属性名称
"max_threads_per_block": safe_getattr(device_props, 'max_threads_per_block'
                    safe_getattr(device_props, 'max_threads_per_block_dim'None)),

# 尝试不同的属性名称
"max_threads_dim": safe_getattr(device_props, 'max_threads_dim'
                    safe_getattr(device_props, 'max_threads_per_block_dim'None)),

"max_grid_size": safe_getattr(device_props, 'max_grid_size'
                    safe_getattr(device_props, 'max_grid_dim'None)),

"shared_memory_per_block_kb": safe_getattr(device_props, 'shared_memory_per_block'0) / 1024,
"warp_size": safe_getattr(device_props, 'warp_size'32),

# 尝试获取时钟频率(不同版本可能有不同属性名)
"memory_clock_rate_mhz"None,
"memory_bus_width": safe_getattr(device_props, 'memory_bus_width'),
            }

# 尝试不同的时钟频率属性名
for attr in ['memory_clock_rate''memoryClockRate''clock_rate']:
                value = safe_getattr(device_props, attr)
if value:
                    device_info["memory_clock_rate_mhz"] = value / 1000
break

# 获取所有可用的属性(用于调试)
            device_info["all_attributes"] = {}
for attr indir(device_props):
ifnot attr.startswith('_'):
try:
                        device_info["all_attributes"][attr] = getattr(device_props, attr)
except:
pass

            info["devices"].append(device_info)

# 获取PyTorch配置信息
    info["torch_config"] = {}
try:
        info["torch_config"]["debug"] = torch.is_debug()
except:
        info["torch_config"]["debug"] = "Unknown"

try:
        info["torch_config"]["parallel_info"] = torch.__config__.parallel_info()
except:
        info["torch_config"]["parallel_info"] = "Not available"

try:
        info["torch_config"]["show_config"] = torch.__config__.show()
except:
        info["torch_config"]["show_config"] = "Not available"

return info


deftest_basic_torch_operations():
"""测试基本的PyTorch操作"""
import torch

    results = {
"tensor_operations": {},
"gpu_operations": {},
"autograd_test": {},
"neural_network_test": {}
    }

# 1. 测试基本张量操作
print("测试基本张量操作...")
try:
# 创建张量
        x = torch.tensor([[12], [34]], dtype=torch.float32)
        y = torch.tensor([[56], [78]], dtype=torch.float32)

# 基本运算
        add_result = x + y
        mul_result = x * y
        matmul_result = torch.matmul(x, y)

        results["tensor_operations"] = {
"addition": add_result.tolist(),
"multiplication": mul_result.tolist(),
"matrix_multiplication": matmul_result.tolist(),
"success"True
        }
print("✓ 基本张量操作测试通过")
except Exception as e:
        results["tensor_operations"] = {"success"False"error"str(e)}
print(f"✗ 基本张量操作测试失败: {e}")

# 2. 测试GPU操作(如果可用)
if torch.cuda.is_available():
print("测试GPU操作...")
try:
# 创建GPU张量
            device = torch.device('cuda:0')
            x_gpu = x.to(device)
            y_gpu = y.to(device)

# GPU上的矩阵乘法
            gpu_result = torch.matmul(x_gpu, y_gpu)

# 同步确保计算完成
            torch.cuda.synchronize()

# 测试CUDA事件计时
            start_event = torch.cuda.Event(enable_timing=True)
            end_event = torch.cuda.Event(enable_timing=True)

            start_event.record()
for _ inrange(100):
                _ = torch.matmul(x_gpu, y_gpu)
            end_event.record()
            torch.cuda.synchronize()

            elapsed_time = start_event.elapsed_time(end_event)

            results["gpu_operations"] = {
"gpu_matrix_multiplication": gpu_result.cpu().tolist(),
"gpu_computation_time_ms": elapsed_time,
"success"True
            }
print(f"✓ GPU操作测试通过 (100次矩阵乘法耗时: {elapsed_time:.2f}ms)")
except Exception as e:
            results["gpu_operations"] = {"success"False"error"str(e)}
print(f"✗ GPU操作测试失败: {e}")
else:
        results["gpu_operations"] = {"success"False"error""CUDA不可用"}
print("⚠ GPU操作测试跳过 (CUDA不可用)")

# 3. 测试自动微分
print("测试自动微分...")
try:
        x = torch.tensor([2.0], requires_grad=True)
        y = x ** 3 + 2 * x ** 2 + 3 * x + 4

        y.backward()
        gradient = x.grad.item()

# 验证梯度正确性(手动计算导数)
        expected_gradient = 3 * 2**2 + 4 * 2 + 3# 3x² + 4x + 3 在 x=2 处的值

        results["autograd_test"] = {
"computed_gradient": gradient,
"expected_gradient": expected_gradient,
"gradient_match"abs(gradient - expected_gradient) < 1e-6,
"success"True
        }
print(f"✓ 自动微分测试通过 (梯度: {gradient:.6f})")
except Exception as e:
        results["autograd_test"] = {"success"False"error"str(e)}
print(f"✗ 自动微分测试失败: {e}")

# 4. 测试神经网络模块
print("测试神经网络模块...")
try:
# 创建一个简单的神经网络
classSimpleNet(torch.nn.Module):
def__init__(self):
super(SimpleNet, self).__init__()
self.fc1 = torch.nn.Linear(105)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(51)
self.sigmoid = torch.nn.Sigmoid()

defforward(self, x):
                x = self.fc1(x)
                x = self.relu(x)
                x = self.fc2(x)
                x = self.sigmoid(x)
return x

# 创建模型和测试数据
        model = SimpleNet()
        test_input = torch.randn(110)

# 前向传播
        output = model(test_input)

# 测试训练步骤
        optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
        criterion = torch.nn.BCELoss()

        target = torch.tensor([[0.5]])
        loss = criterion(output, target)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        results["neural_network_test"] = {
"model_output": output.item(),
"loss_value": loss.item(),
"parameters_count"sum(p.numel() for p in model.parameters()),
"success"True
        }
print(f"✓ 神经网络测试通过 (输出: {output.item():.6f}, 损失: {loss.item():.6f})")
except Exception as e:
        results["neural_network_test"] = {"success"False"error"str(e)}
print(f"✗ 神经网络测试失败: {e}")

return results


deftest_performance():
"""增强版性能测试 - 多种计算类型和更大数据规模"""
import torch

    results = {}

# 测试CPU性能
print("测试CPU性能...")
    cpu_tests = {}

try:
# 基础矩阵乘法测试
        size = 2000
        a_cpu = torch.randn(size, size)
        b_cpu = torch.randn(size, size)

        start_time = time.time()
        c_cpu = torch.matmul(a_cpu, b_cpu)
        cpu_time = time.time() - start_time

        cpu_tests["matrix_multiplication"] = {
"matrix_size": size,
"computation_time_s": cpu_time,
"flops_estimate": (2 * size**3) / cpu_time / 1e9
        }
print(f"✓ CPU矩阵乘法: {cpu_time:.3f}s (约 {cpu_tests['matrix_multiplication']['flops_estimate']:.2f} GFLOPS)")

# 额外CPU测试:卷积运算
        batch_size, channels, height, width = 464128128
        kernel_size = 3

        input_cpu = torch.randn(batch_size, channels, height, width)
        kernel_cpu = torch.randn(64, channels, kernel_size, kernel_size)

        start_time = time.time()
        output_cpu = torch.nn.functional.conv2d(input_cpu, kernel_cpu, padding=1)
        conv_time = time.time() - start_time

        cpu_tests["convolution"] = {
"input_shape": [batch_size, channels, height, width],
"computation_time_s": conv_time,
"operations": batch_size * channels * 64 * height * width * kernel_size * kernel_size
        }
print(f"✓ CPU卷积运算: {conv_time:.3f}s")

        results["cpu_performance"] = cpu_tests

except Exception as e:
        results["cpu_performance"] = {"error"str(e)}
print(f"✗ CPU性能测试失败: {e}")

# 增强GPU性能测试(如果可用)
if torch.cuda.is_available():
print("\n增强GPU性能测试...")
        gpu_tests = {}

try:
            device = torch.device('cuda:0')

# 测试1:大规模矩阵乘法
print("测试1: 大规模矩阵乘法")
            size = 4096# 4K x 4K 矩阵
            a_gpu = torch.randn(size, size, device=device)
            b_gpu = torch.randn(size, size, device=device)

# 预热
for _ inrange(5):
                _ = torch.matmul(a_gpu, b_gpu)

            torch.cuda.synchronize()
            start_event = torch.cuda.Event(enable_timing=True)
            end_event = torch.cuda.Event(enable_timing=True)

            start_event.record()
for _ inrange(10):
                _ = torch.matmul(a_gpu, b_gpu)
            end_event.record()
            torch.cuda.synchronize()

            gpu_time = start_event.elapsed_time(end_event) / 1000

            gpu_tests["large_matrix_multiplication"] = {
"matrix_size": size,
"computation_time_s": gpu_time,
"flops_estimate": (2 * size**3 * 10) / gpu_time / 1e9,
"speedup_vs_cpu": cpu_tests.get("matrix_multiplication", {}).get("computation_time_s"0) / (gpu_time / 10if cpu_tests elseNone
            }
print(f"  ✓ 4K矩阵乘法: {gpu_time:.3f}s (约 {gpu_tests['large_matrix_multiplication']['flops_estimate']:.2f} GFLOPS)")

# 测试2:大规模卷积运算
print("测试2: 大规模卷积运算")
            batch_size, channels, height, width = 16128256256
            kernel_size = 3

            input_gpu = torch.randn(batch_size, channels, height, width, device=device)
            kernel_gpu = torch.randn(128, channels, kernel_size, kernel_size, device=device)

# 预热
for _ inrange(5):
                _ = torch.nn.functional.conv2d(input_gpu, kernel_gpu, padding=1)

            torch.cuda.synchronize()
            start_event = torch.cuda.Event(enable_timing=True)
            end_event = torch.cuda.Event(enable_timing=True)

            start_event.record()
for _ inrange(10):
                _ = torch.nn.functional.conv2d(input_gpu, kernel_gpu, padding=1)
            end_event.record()
            torch.cuda.synchronize()

            conv_gpu_time = start_event.elapsed_time(end_event) / 1000

            gpu_tests["large_convolution"] = {
"input_shape": [batch_size, channels, height, width],
"computation_time_s": conv_gpu_time,
"operations": batch_size * channels * 128 * height * width * kernel_size * kernel_size * 10,
"speedup_vs_cpu": cpu_tests.get("convolution", {}).get("computation_time_s"0) / (conv_gpu_time / 10if cpu_tests elseNone
            }
print(f"  ✓ 大规模卷积: {conv_gpu_time:.3f}s")

# 测试3:张量运算(逐元素操作)
print("测试3: 张量逐元素运算")
            tensor_size = [1024102416]  # 1GB+ 张量

            x_gpu = torch.randn(*tensor_size, device=device)
            y_gpu = torch.randn(*tensor_size, device=device)

            torch.cuda.synchronize()
            start_event = torch.cuda.Event(enable_timing=True)
            end_event = torch.cuda.Event(enable_timing=True)

            start_event.record()
for _ inrange(100):
                _ = x_gpu * y_gpu + torch.sin(x_gpu) - torch.exp(y_gpu)
            end_event.record()
            torch.cuda.synchronize()

            elementwise_time = start_event.elapsed_time(end_event) / 1000

            gpu_tests["elementwise_operations"] = {
"tensor_size": tensor_size,
"computation_time_s": elementwise_time,
"operations_per_second"100 * 4 * torch.prod(torch.tensor(tensor_size)).item() / elementwise_time / 1e9
            }
print(f"  ✓ 逐元素运算: {elementwise_time:.3f}s")

# 测试4:内存带宽测试
print("测试4: 内存带宽测试")
            memory_size = 1024 * 1024 * 1024# 1GB
            data_gpu = torch.randn(memory_size // 4, device=device)  # float32占4字节

            torch.cuda.synchronize()
            start_event = torch.cuda.Event(enable_timing=True)
            end_event = torch.cuda.Event(enable_timing=True)

            start_event.record()
for _ inrange(10):
                _ = data_gpu * 2.0# 简单的内存操作
            end_event.record()
            torch.cuda.synchronize()

            memory_time = start_event.elapsed_time(end_event) / 1000

            gpu_tests["memory_bandwidth"] = {
"memory_size_gb": memory_size / (1024**3),
"computation_time_s": memory_time,
"bandwidth_gbs": (memory_size * 10) / memory_time / (1024**3)
            }
print(f"  ✓ 内存带宽: {memory_time:.3f}s (约 {gpu_tests['memory_bandwidth']['bandwidth_gbs']:.1f} GB/s)")

# 测试5:多GPU测试(如果可用)
if torch.cuda.device_count() > 1:
print("测试5: 多GPU并行测试")
                multi_gpu_results = {}

for i inrange(torch.cuda.device_count()):
                    device_i = torch.device(f'cuda:{i}')
                    size = 2048

                    a_multi = torch.randn(size, size, device=device_i)
                    b_multi = torch.randn(size, size, device=device_i)

                    torch.cuda.synchronize(device_i)
                    start_event = torch.cuda.Event(enable_timing=True)
                    end_event = torch.cuda.Event(enable_timing=True)

                    start_event.record()
for _ inrange(5):
                        _ = torch.matmul(a_multi, b_multi)
                    end_event.record()
                    torch.cuda.synchronize(device_i)

                    multi_time = start_event.elapsed_time(end_event) / 1000

                    multi_gpu_results[f"gpu_{i}"] = {
"matrix_size": size,
"computation_time_s": multi_time,
"flops_estimate": (2 * size**3 * 5) / multi_time / 1e9
                    }
print(f"    GPU {i}{multi_time:.3f}s")

                gpu_tests["multi_gpu"] = multi_gpu_results

            results["gpu_performance"] = gpu_tests

# 总体性能摘要
if gpu_tests and cpu_tests:
                avg_gpu_speedup = sum([
                    gpu_tests["large_matrix_multiplication"].get("speedup_vs_cpu"0),
                    gpu_tests["large_convolution"].get("speedup_vs_cpu"0)
                ]) / 2
print(f"\n✓ GPU平均加速比: {avg_gpu_speedup:.1f}x")

except Exception as e:
            results["gpu_performance"] = {"error"str(e)}
print(f"✗ GPU性能测试失败: {e}")

return results


defprint_summary(info_dict):
"""打印摘要信息"""
print("\n" + "="*80)
print("PyTorch环境摘要")
print("="*80)

# 系统信息
    sys_info = info_dict["system_info"]
print(f"系统: {sys_info['platform']}")
print(f"Python: {sys_info['python_version']} ({sys_info['python_implementation']})")

# NVIDIA信息
    nvidia_info = info_dict["nvidia_info"]
if nvidia_info["driver_version"]:
print(f"NVIDIA驱动: {nvidia_info['driver_version']}")
if nvidia_info["cuda_version"]:
print(f"系统CUDA: {nvidia_info['cuda_version']}")

# PyTorch信息
    torch_info = info_dict["torch_info"]
print(f"PyTorch版本: {torch_info['torch_version']}")
print(f"PyTorch CUDA版本: {torch_info['torch_cuda_version']}")
print(f"CUDA可用: {torch_info['cuda_available']}")

if torch_info["cuda_available"]:
print(f"GPU数量: {torch_info['cuda_device_count']}")
for i, device inenumerate(torch_info["devices"]):
print(f"  GPU {i}{device['name']}")
print(f"    计算能力: {device['compute_capability']}")
print(f"    显存: {device['total_memory_gb']:.2f} GB")

# 测试结果摘要
print("\n测试结果摘要:")
    test_results = info_dict["test_results"]

for test_name, result in test_results.items():
ifisinstance(result, dictand"success"in result:
            status = "✓ 通过"if result["success"else"✗ 失败"
print(f"  {test_name.replace('_'' ').title()}{status}")

# 性能摘要
if"performance_test"in info_dict:
        perf = info_dict["performance_test"]
if"cpu_performance"in perf and"computation_time_s"in perf["cpu_performance"]:
print(f"\nCPU性能: {perf['cpu_performance']['computation_time_s']:.3f}s")

if"gpu_performance"in perf and"computation_time_s"in perf["gpu_performance"]:
print(f"GPU性能: {perf['gpu_performance']['computation_time_s']:.3f}s")
if perf["gpu_performance"]["speedup_vs_cpu"]:
print(f"GPU加速比: {perf['gpu_performance']['speedup_vs_cpu']:.1f}x")


defmain():
"""主函数"""
print("正在收集PyTorch环境信息...")
print("-" * 80)

# 收集所有信息
    all_info = {
"timestamp": datetime.now().isoformat(),
"script_version""1.0.0",
"system_info": get_system_info(),
"nvidia_info": get_nvidia_info(),
    }

# 检查是否安装了torch
try:
import torch
        all_info["torch_info"] = get_torch_info()

# 运行测试
print("\n运行PyTorch功能测试...")
print("-" * 80)
        all_info["test_results"] = test_basic_torch_operations()

print("\n运行性能测试...")
print("-" * 80)
        all_info["performance_test"] = test_performance()

except ImportError:
print("错误: PyTorch未安装!")
print("请使用以下命令安装PyTorch:")
print("  pip install torch torchvision torchaudio")
        all_info["torch_info"] = {"error""PyTorch not installed"}
        all_info["test_results"] = {"error""PyTorch not installed"}
        all_info["performance_test"] = {"error""PyTorch not installed"}

# 打印摘要
    print_summary(all_info)


main()

关注我,一起探索具身智能的无限可能。

最新文章

随机文章

基本 文件 流程 错误 SQL 调试
  1. 请求信息 : 2026-03-01 03:16:17 HTTP/2.0 GET : https://f.mffb.com.cn/a/475382.html
  2. 运行时间 : 0.073217s [ 吞吐率:13.66req/s ] 内存消耗:4,795.82kb 文件加载:140
  3. 缓存信息 : 0 reads,0 writes
  4. 会话信息 : SESSION_ID=0f60d89f0e2d757fb1267033578b4cbb
  1. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/public/index.php ( 0.79 KB )
  2. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/autoload.php ( 0.17 KB )
  3. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/composer/autoload_real.php ( 2.49 KB )
  4. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/composer/platform_check.php ( 0.90 KB )
  5. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/composer/ClassLoader.php ( 14.03 KB )
  6. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/composer/autoload_static.php ( 4.90 KB )
  7. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-helper/src/helper.php ( 8.34 KB )
  8. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-validate/src/helper.php ( 2.19 KB )
  9. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/helper.php ( 1.47 KB )
  10. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/stubs/load_stubs.php ( 0.16 KB )
  11. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/Exception.php ( 1.69 KB )
  12. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-container/src/Facade.php ( 2.71 KB )
  13. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/symfony/deprecation-contracts/function.php ( 0.99 KB )
  14. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/symfony/polyfill-mbstring/bootstrap.php ( 8.26 KB )
  15. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/symfony/polyfill-mbstring/bootstrap80.php ( 9.78 KB )
  16. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/symfony/var-dumper/Resources/functions/dump.php ( 1.49 KB )
  17. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-dumper/src/helper.php ( 0.18 KB )
  18. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/symfony/var-dumper/VarDumper.php ( 4.30 KB )
  19. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/App.php ( 15.30 KB )
  20. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-container/src/Container.php ( 15.76 KB )
  21. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/psr/container/src/ContainerInterface.php ( 1.02 KB )
  22. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/app/provider.php ( 0.19 KB )
  23. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/Http.php ( 6.04 KB )
  24. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-helper/src/helper/Str.php ( 7.29 KB )
  25. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/Env.php ( 4.68 KB )
  26. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/app/common.php ( 0.03 KB )
  27. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/helper.php ( 18.78 KB )
  28. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/Config.php ( 5.54 KB )
  29. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/config/app.php ( 0.95 KB )
  30. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/config/cache.php ( 0.78 KB )
  31. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/config/console.php ( 0.23 KB )
  32. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/config/cookie.php ( 0.56 KB )
  33. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/config/database.php ( 2.48 KB )
  34. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/facade/Env.php ( 1.67 KB )
  35. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/config/filesystem.php ( 0.61 KB )
  36. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/config/lang.php ( 0.91 KB )
  37. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/config/log.php ( 1.35 KB )
  38. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/config/middleware.php ( 0.19 KB )
  39. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/config/route.php ( 1.89 KB )
  40. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/config/session.php ( 0.57 KB )
  41. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/config/trace.php ( 0.34 KB )
  42. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/config/view.php ( 0.82 KB )
  43. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/app/event.php ( 0.25 KB )
  44. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/Event.php ( 7.67 KB )
  45. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/app/service.php ( 0.13 KB )
  46. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/app/AppService.php ( 0.26 KB )
  47. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/Service.php ( 1.64 KB )
  48. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/Lang.php ( 7.35 KB )
  49. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/lang/zh-cn.php ( 13.70 KB )
  50. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/initializer/Error.php ( 3.31 KB )
  51. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/initializer/RegisterService.php ( 1.33 KB )
  52. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/services.php ( 0.14 KB )
  53. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/service/PaginatorService.php ( 1.52 KB )
  54. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/service/ValidateService.php ( 0.99 KB )
  55. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/service/ModelService.php ( 2.04 KB )
  56. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-trace/src/Service.php ( 0.77 KB )
  57. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/Middleware.php ( 6.72 KB )
  58. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/initializer/BootService.php ( 0.77 KB )
  59. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/Paginator.php ( 11.86 KB )
  60. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-validate/src/Validate.php ( 63.20 KB )
  61. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/Model.php ( 23.55 KB )
  62. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/model/concern/Attribute.php ( 21.05 KB )
  63. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/model/concern/AutoWriteData.php ( 4.21 KB )
  64. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/model/concern/Conversion.php ( 6.44 KB )
  65. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/model/concern/DbConnect.php ( 5.16 KB )
  66. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/model/concern/ModelEvent.php ( 2.33 KB )
  67. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/model/concern/RelationShip.php ( 28.29 KB )
  68. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-helper/src/contract/Arrayable.php ( 0.09 KB )
  69. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-helper/src/contract/Jsonable.php ( 0.13 KB )
  70. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/model/contract/Modelable.php ( 0.09 KB )
  71. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/Db.php ( 2.88 KB )
  72. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/DbManager.php ( 8.52 KB )
  73. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/Log.php ( 6.28 KB )
  74. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/Manager.php ( 3.92 KB )
  75. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/psr/log/src/LoggerTrait.php ( 2.69 KB )
  76. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/psr/log/src/LoggerInterface.php ( 2.71 KB )
  77. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/Cache.php ( 4.92 KB )
  78. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/psr/simple-cache/src/CacheInterface.php ( 4.71 KB )
  79. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-helper/src/helper/Arr.php ( 16.63 KB )
  80. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/cache/driver/File.php ( 7.84 KB )
  81. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/cache/Driver.php ( 9.03 KB )
  82. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/contract/CacheHandlerInterface.php ( 1.99 KB )
  83. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/app/Request.php ( 0.09 KB )
  84. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/Request.php ( 55.78 KB )
  85. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/app/middleware.php ( 0.25 KB )
  86. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/Pipeline.php ( 2.61 KB )
  87. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-trace/src/TraceDebug.php ( 3.40 KB )
  88. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/middleware/SessionInit.php ( 1.94 KB )
  89. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/Session.php ( 1.80 KB )
  90. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/session/driver/File.php ( 6.27 KB )
  91. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/contract/SessionHandlerInterface.php ( 0.87 KB )
  92. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/session/Store.php ( 7.12 KB )
  93. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/Route.php ( 23.73 KB )
  94. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/route/RuleName.php ( 5.75 KB )
  95. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/route/Domain.php ( 2.53 KB )
  96. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/route/RuleGroup.php ( 22.43 KB )
  97. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/route/Rule.php ( 26.95 KB )
  98. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/route/RuleItem.php ( 9.78 KB )
  99. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/route/app.php ( 1.72 KB )
  100. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/facade/Route.php ( 4.70 KB )
  101. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/route/dispatch/Controller.php ( 4.74 KB )
  102. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/route/Dispatch.php ( 10.44 KB )
  103. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/app/controller/Index.php ( 4.81 KB )
  104. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/app/BaseController.php ( 2.05 KB )
  105. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/facade/Db.php ( 0.93 KB )
  106. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/db/connector/Mysql.php ( 5.44 KB )
  107. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/db/PDOConnection.php ( 52.47 KB )
  108. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/db/Connection.php ( 8.39 KB )
  109. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/db/ConnectionInterface.php ( 4.57 KB )
  110. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/db/builder/Mysql.php ( 16.58 KB )
  111. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/db/Builder.php ( 24.06 KB )
  112. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/db/BaseBuilder.php ( 27.50 KB )
  113. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/db/Query.php ( 15.71 KB )
  114. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/db/BaseQuery.php ( 45.13 KB )
  115. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/db/concern/TimeFieldQuery.php ( 7.43 KB )
  116. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/db/concern/AggregateQuery.php ( 3.26 KB )
  117. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/db/concern/ModelRelationQuery.php ( 20.07 KB )
  118. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/db/concern/ParamsBind.php ( 3.66 KB )
  119. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/db/concern/ResultOperation.php ( 7.01 KB )
  120. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/db/concern/WhereQuery.php ( 19.37 KB )
  121. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/db/concern/JoinAndViewQuery.php ( 7.11 KB )
  122. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/db/concern/TableFieldInfo.php ( 2.63 KB )
  123. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-orm/src/db/concern/Transaction.php ( 2.77 KB )
  124. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/log/driver/File.php ( 5.96 KB )
  125. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/contract/LogHandlerInterface.php ( 0.86 KB )
  126. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/log/Channel.php ( 3.89 KB )
  127. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/event/LogRecord.php ( 1.02 KB )
  128. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-helper/src/Collection.php ( 16.47 KB )
  129. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/facade/View.php ( 1.70 KB )
  130. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/View.php ( 4.39 KB )
  131. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/Response.php ( 8.81 KB )
  132. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/response/View.php ( 3.29 KB )
  133. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/Cookie.php ( 6.06 KB )
  134. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-view/src/Think.php ( 8.38 KB )
  135. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/framework/src/think/contract/TemplateHandlerInterface.php ( 1.60 KB )
  136. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-template/src/Template.php ( 46.61 KB )
  137. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-template/src/template/driver/File.php ( 2.41 KB )
  138. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-template/src/template/contract/DriverInterface.php ( 0.86 KB )
  139. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/runtime/temp/067d451b9a0c665040f3f1bdd3293d68.php ( 11.98 KB )
  140. /yingpanguazai/ssd/ssd1/www/f.mffb.com.cn/vendor/topthink/think-trace/src/Html.php ( 4.42 KB )
  1. CONNECT:[ UseTime:0.000644s ] mysql:host=127.0.0.1;port=3306;dbname=f_mffb;charset=utf8mb4
  2. SHOW FULL COLUMNS FROM `fenlei` [ RunTime:0.000744s ]
  3. SELECT * FROM `fenlei` WHERE `fid` = 0 [ RunTime:0.000308s ]
  4. SELECT * FROM `fenlei` WHERE `fid` = 63 [ RunTime:0.000323s ]
  5. SHOW FULL COLUMNS FROM `set` [ RunTime:0.000490s ]
  6. SELECT * FROM `set` [ RunTime:0.000202s ]
  7. SHOW FULL COLUMNS FROM `article` [ RunTime:0.000716s ]
  8. SELECT * FROM `article` WHERE `id` = 475382 LIMIT 1 [ RunTime:0.000539s ]
  9. UPDATE `article` SET `lasttime` = 1772306178 WHERE `id` = 475382 [ RunTime:0.001061s ]
  10. SELECT * FROM `fenlei` WHERE `id` = 66 LIMIT 1 [ RunTime:0.001098s ]
  11. SELECT * FROM `article` WHERE `id` < 475382 ORDER BY `id` DESC LIMIT 1 [ RunTime:0.000394s ]
  12. SELECT * FROM `article` WHERE `id` > 475382 ORDER BY `id` ASC LIMIT 1 [ RunTime:0.000491s ]
  13. SELECT * FROM `article` WHERE `id` < 475382 ORDER BY `id` DESC LIMIT 10 [ RunTime:0.000744s ]
  14. SELECT * FROM `article` WHERE `id` < 475382 ORDER BY `id` DESC LIMIT 10,10 [ RunTime:0.001605s ]
  15. SELECT * FROM `article` WHERE `id` < 475382 ORDER BY `id` DESC LIMIT 20,10 [ RunTime:0.001055s ]
0.074802s