基础
要求
- 电脑需要含有NVIDIA的显卡,且支持CUDA
- 已安装NVIDIA显卡驱动
- 注意显卡版本的计算能力要求,我的显卡NVIDIA GeForce GTX 1050 Ti只支持6.x算力,只能用CUDA11.x版本
Windows10使用wsl 2 安装Linux
安装
-
在设置-应用-应用和功能-程序和功能-安装或关闭Windows功能 ✔Hyper-V和适用于Linux的Windows子系统
-
1 2 3 4
# 查看适用于Windows的Linux版本 wsl.exe --list --online # 安装想要的版本 wsl --install -d Ubuntu-24.04
Linux迁移到D盘
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# 查看已安装的发行版名称:
wsl -l -v
# 创建导出的目标文件夹
D:\ProgramFiles\WSL
# 执行导出命令
wsl --export Ubuntu-24.04 D:\ProgramFiles\WSL\ubuntu-export.tar
# 注销 (卸载) 原来的 Ubuntu 发行版
wsl --unregister Ubuntu-24.04
# 创建目标安装目录
D:\ProgramFiles\WSL\Ubuntu-24.04
# 执行导入命令
wsl --import Ubuntu-24.04 D:\ProgramFiles\WSL\Ubuntu-24.04 D:\ProgramFiles\WSL\ubuntu-export.tar --version 2
# 验证导入
wsl -l -v
# 启动新导入的 Ubuntu
wsl -d Ubuntu-24.04
添加新用户
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
# 添加新用户
adduser gumc
# 显示
# Adding user `gumc' ...
# Adding new group `gumc' (1000) ...
# Adding new user `gumc' (1000) with group `gumc' ...
# Creating home directory `/home/gumc' ...
# Copying files from `/etc/skel' ...
# New password: <输入你的密码>
# Retype new password: <再次输入密码>
# passwd: password updated successfully
# Changing the user information for gumc
# Enter the new value, or press ENTER for the default
# Full Name []: <可以直接按 Enter 跳过>
# Room Number []: <可以直接按 Enter 跳过>
# Work Phone []: <可以直接按 Enter 跳过>
# Home Phone []: <可以直接按 Enter 跳过>
# Other []: <可以直接按 Enter 跳过>
# Is the information correct? [Y/n] Y
# 将新用户添加到 sudo 组 (使其拥有管理员权限):
usermod -aG sudo gumc
# 设置默认登录用户
echo "default=gumc" >> /etc/wsl.conf
# vim /etc/wsl.conf确认是以下内容
[user]
default=gumc
# 关闭和重启操作
exit
wsl --shutdown
wsl -d Ubuntu-24.04
创建Data/Pack文件夹
1
2
3
4
# 注意:避免将软件包安装在Windows挂载目录,例如/mnt/d/ProgramFiles/WSL肯定会出问题,而是放在WSL的Linux文件系统,例如/home/gumc/
sudo mkdir -p /data/pack
# 修改文件夹权限
sudo chown -R gumc:gumc /data
安装 Conda (Miniconda 推荐):
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
# 进入安装目录
cd /data/pack
# 下载 Miniconda 安装脚本:
wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh
# 运行安装脚本
bash Miniconda3-latest-Linux-x86_64.sh
# 按 Enter 查看许可,然后输入 yes 同意。
# 确认安装路径:
/data/miniconda3
# 当询问是否初始化 Miniconda3 (conda init) 时,输入 yes。
# 初始化 Miniconda: 当询问是否运行 conda init 时 (例如 Do you wish the installer to initialize Miniconda3 by running conda init? [yes|no]),强烈建议输入 yes。这会自动修改你的 shell 配置文件 (.bashrc 或 .zshrc),以便每次打开终端时都能自动配置好 Conda 环境。
# 关闭后重新打开
exit
wsl --shutdown
wsl -d Ubuntu-24.04
# 检测Conda
which conda
# Conda基本操作
# 显示所有环境
conda env list
# 删除环境,sampart3d是环境名
conda remove -n sampart3d --all
# 清除cache 索引缓存、锁文件、未使用的包、tarballs
conda clean --all -y
# 确保你安装在 Windows 上的 NVIDIA 驱动程序是最新的,并且支持 WSL2
nvidia-smi
# 启动 WSL2 终端并激活你的 Conda 环境(强制使用python 3.10)
conda create -n sampart3d python=3.10 -y
conda activate sampart3d
# 其他操作:如需要退出当前环境
# conda deactivate
# # 导出环境
# conda env export --from-history --no-builds > environment.yml
# # 修改环境后如果要更新环境
# conda env update --file environment.yml --prune
# 添加 Channel (并影响顺序)
vim /data/miniconda3/.condarc
channels:
- nvidia # 优先级 1
- conda-forge # 优先级 2
- defaults # 优先级 4 (Conda 官方默认 channel)
- rapidsai
- pytorch
channel_priority: flexible #strict, flexible, disabled (后面会解释)
# auto_update_conda: true (可选)
# show_channel_urls: true (可选,在解决输出中显示 channel URL)
# 此时,如果你查看 channels 列表,顺序可能是:
# 0. rapidsai
# 1. nvidia (最后添加的,优先级最高)
# 2. pytorch
# 3. conda-forge
# 4. defaults (通常默认存在)
# conda config --show-sources
WSL2 中安装 CUDA
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
# 确保在想要的环境 conda activate sampart3d
# 如果你之前在这个环境里尝试安装过 cudatoolkit 或 cudnn,先移除它们以确保干净的状态:
# conda remove -n sampart3d cudatoolkit cuda --force -y
# 查看cuda
# conda list | grep -E "cuda|cuml|cudatoolkit|libcumlprims"
# 安装cuda,明确指定所有 CUDA 组件版本
conda install cudatoolkit=11.8.0 cuda-nvcc=11.8.89 cuda-cudart-dev=11.8.89 cuda-cccl=11.8.89 cuda-cudart=11.8.89 python=3.10 #不加-y方便确认要安装的版本是否正确
#如不确定版本,可通过conda search cuda-cudart-dev=11.8.* 查找版本号
# conda install cudatoolkit=11.8 python=3.10
# 固定cudatoolkit版本:创建或追加到 pinned 文件
echo $CONDA_PREFIX
echo "cudatoolkit ==11.8.*" >> $CONDA_PREFIX/conda-meta/pinned
# 安装其他
# conda install cuda=11.8 cuda-version=11.8
# conda install rapids=23.12 cudf=23.12 cuml=23.12 #不加-y方便确认要安装的版本是否正确
# 在安装其他依赖包时,显式声明 CUDA 版本,防止 Conda 自动升级
# conda install rapids=23.12 cudf=23.12 cuml=23.12 cudatoolkit=11.8.0 cuda-nvcc=11.8.89 cuda-cudart-dev=11.8.89 cuda-cccl=11.8.89 cuda-cudart=11.8.89 python=3.10
# 安装cuda相关
# conda install rapids=23.12 cudf=23.12 cuml=23.12 python=3.10 cuda-version=11.8 -y
# conda install -c rapidsai -c conda-forge -c nvidia rapids=23.12 python=3.10 'cuda-version>=11.8,<=11.8' 'pytorch=*=*cuda*'
# 验证 Conda 环境中的 CUDA
nvcc --version
# 检查 CUDA 开发头文件
ls $CUDA_HOME/include/cuda/std/cudart*.h
# 如安装失败,很可能是因为需要更新(不失败也建议进行)
sudo apt update
sudo apt install build-essential -y
# 如提示gcc版本不对,则安装gcc11和g++11(不失败也建议进行)
conda install -c conda-forge gcc=11 gxx=11 -y
# 验证安装
# gcc --version # 应输出 11.x.x
# g++ --version # 应输出 11.x.x
安装SAMPart3D
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
#下载SAMPart3D
cd /data
git clone https://github.com/Pointcept/SAMPart3D.git
cd SAMPart3D
# 确保在想要的环境
conda activate sampart3d
# 安装基本模块:torch
# pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118
# conda install pytorch torchvision torchaudio pytorch-cuda=11.8
conda install pytorch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 torch-scatter=2.1.1 cudatoolkit=11.8.0 cuda-nvcc=11.8.89 cuda-cudart-dev=11.8.89 cuda-cccl=11.8.89 cuda-cudart=11.8.89 python=3.10
#[备注:cu118根据自己的cuda版本决定,可通过nvcc --version查看cuda版本],--index-url代表pip 将从这个地址查找和下载包,而不是使用默认的官方 PyPI 服务器 (pypi.org)
# 验证
python - <<EOF
import torch
import torch_scatter
print(f"PyTorch 版本: {torch.__version__}") # 应输出 2.2.2 或 2.3.0
print(f"torch-scatter 版本: {torch_scatter.__version__}") # 应输出 2.1.2+ 或兼容版本
EOF
# 如果torch-scatter报错则重新安装torch-scatter
pip install torch-scatter -f https://data.pyg.org/whl/torch-2.2.0+cu118.html --force-reinstall --no-cache-dir
# requirements.txt 中的包(使用aliyun会更快)
pip install -i https://pypi.tuna.tsinghua.edu.cn/simple -r requirements.txt
# 为 PTv3-object 安装模块
cd libs/pointops
pip install . # 不建议使用python setup.py install
cd ../..
# 如果显示 OSError: CUDA_HOME environment variable is not set. Please set it to your CUDA install root.
# echo 'export CUDA_HOME=$CONDA_PREFIX' >> ~/.bashrc
# source ~/.bashrc
# echo $CUDA_HOME
# 可能需要使用,但重启后又得重新设置
export CUDA_HOME=$CONDA_PREFIX:$CONDA_PREFIX/include && echo "Set CUDA_HOME to: $CUDA_HOME"
echo $CUDA_HOME #为了方便建议使用最后面的"Conda 环境激活时自动设置"
# spconv (SparseUNet)
# refer https://github.com/traveller59/spconv
# pip install spconv-cu124 # choose version match your local cuda
pip install spconv-cu118
安装Flash-attention
1
2
3
4
5
6
7
8
9
10
11
12
# 确定是否启用C++11 ABI,如果是False则下载FALSE的
python - <<EOF
import torch
print("PyTorch 是否启用了 C++11 ABI:", torch._C._GLIBCXX_USE_CXX11_ABI)
EOF
# Linux or MacOs
# 下载地址https://github.com/Dao-AILab/flash-attention/releases/download
# 根据cuda、torch等的版本来下载安装
# 将 N 替换为你想要的最大并行作业数,例如 4
MAX_JOBS=4 pip install https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.3/flash_attn-2.7.3+cu11torch2.2cxx11abiFALSE-cp310-cp310-linux_x86_64.whl --no-build-isolation
# MAX_JOBS=4 pip install flash-attn --no-build-isolation
安装加速模块
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
pip install ninja git+https://github.com/NVlabs/tiny-cuda-nn/#subdirectory=bindings/torch
# # 如提示运行错误找不到libcuda.so
# # 运行 find 命令来查找 libcuda.so(可能带有版本号后缀 .1 或类似):
# sudo find /usr/lib /lib -name "libcuda.so*"
# sudo mkdir -p /usr/local/cuda/lib64
# sudo ln -s /usr/lib/wsl/lib/libcuda.so.1 /usr/local/cuda/lib64/libcuda.so
# # # 验证
# ls -l /usr/local/cuda/lib64/libcuda.so
# # 更新链接器缓存,让系统知道这个新链接
# sudo ldconfig
# 可能需要使用,但重启后又得重新设置
# export LD_LIBRARY_PATH=$CUDA_HOME/lib64:$LD_LIBRARY_PATH && echo "Set LD_LIBRARY_PATH to: $LD_LIBRARY_PATH"
# export LD_LIBRARY_PATH=/data/miniconda3/envs/sampart3d/lib/:$LD_LIBRARY_PATH
# export LD_LIBRARY_PATH=/usr/lib/wsl/lib:/usr/local/cuda/lib64:$CONDA_PREFIX/lib:$LD_LIBRARY_PATH
# export LIBRARY_PATH=/usr/lib/wsl/lib:/usr/local/cuda/lib64:$CONDA_PREFIX/lib:$LIBRARY_PATH
# echo $LD_LIBRARY_PATH
# echo $LIBRARY_PATH
# export CPLUS_INCLUDE_PATH=$CONDA_PREFIX/include:$CPLUS_INCLUDE_PATH
# export C_INCLUDE_PATH=$CONDA_PREFIX/include:$CPLUS_INCLUDE_PATH
# echo $CPLUS_INCLUDE_PATH
# echo $C_INCLUDE_PATH
# export PATH="$CONDA_PREFIX/bin:$PATH"
# echo $PATH
# using GPU-based HDBSCAN clustering algorithm
# refer https://docs.rapids.ai/install
# pip install --extra-index-url=https://pypi.nvidia.com cudf-cu12==24.6.* cuml-cu12==24.6.*
# 进入https://docs.rapids.ai/install/#selector选择安装命令行
# conda install -c nvidia -c rapidsai -c conda-forge \
# cudf=25.04 cuml=25.04 python=3.12 "cuda-version>=12.0,<=12.8"
conda install rapids=23.12.* cudf=23.12.* cuml=23.12.* cudatoolkit=11.8.0 cuda-nvcc=11.8.89 cuda-cudart-dev=11.8.89 cuda-cccl=11.8.89 cuda-cudart=11.8.89 python=3.10
# 如安装错误则卸载
# conda uninstall rapids
# conda list | grep -E "rapids|cudf|cuml" # Linux/macOS
SAMPart3D训练
下载预训练的 PTv3 对象
https://huggingface.co/yhyang-myron/SAMPart3D/tree/main (windows下载ptv3-object.pth ,放到d:/ProgramFiles/WSL/SAMPart3D/ckpt下)
数据预处理
使用 Blender 渲染 3D glb 网格的多视图 RGB 和深度
1
2
3
wget https://download.blender.org/release/Blender4.0/blender-4.0.0-linux-x64.tar.xz
tar -xf blender-4.0.0-linux-x64.tar.xz
# rm blender-4.0.0-linux-x64.tar.xz
- 然后渲染 rgb 和深度
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
# 在Windowsd的D:\ProgramFiles\WSL\下创建文件夹SAMPart3D文件夹,并创建ckpt,mesh_root,data_root文件夹
SAMPart3D
|-- ckpt
|-- ptv3-object.pth
|-- mesh_root
|-- 000001.ZaanseSchans-Bike.glb
|-- data_root
|-- 000001.ZaanseSchans-Bike
|-- meta.json
|-- render_0000.webp
|-- depth_0000.exr
...
# 连接windows下的文件夹到Linux下
ln -s "/mnt/d/ProgramFiles/WSL/SAMPart3D/ckpt" /data/SAMPart3D/ckpt
ln -s "/mnt/d/ProgramFiles/WSL/SAMPart3D/mesh_root" /data/SAMPart3D/mesh_root
ln -s "/mnt/d/ProgramFiles/WSL/SAMPart3D/data_root" /data/SAMPart3D/data_root
# 修改配置
vim /data/SAMPart3D/configs/sampart3d/sampart3d-trainmlp-render16views.py
# misc custom setting
batch_size = 16 # 增大批量大小,根据GPU显存调整
num_worker = 2 # 增加数据加载的工作进程数,建议设置为CPU核心数的2-4倍
# scheduler settings
epoch = 1000 # 减少总训练轮次
eval_epoch = 1000 # 减少评估轮次
# 数据采样数量
data = dict(
train=dict(
sample_num=10000, # 减少每个点云的采样点数
pixels_per_image=128, # 减少每张图像的像素数
batch_size=16, # 增大批处理大小
)
)
# 模型结构简化
model = dict(
backbone = dict(
enc_depths=(2, 2, 2, 4, 8), # 减少编码器深度
enc_channels=(32, 64, 128, 256, 384), # 可以适当减少通道数
enc_patch_size=(512, 512, 512, 512, 512), # 减小patch大小
# enable_flash=True, # 启用Flash Attention加速
)
)
# 优化器和调度器设置
# optimizer = dict(type="AdamW", lr=2e-4) # 适当增大学习率
# scheduler = dict(
# type="OneCycleLR",
# max_lr=[2e-4],
# pct_start=0.1,
# div_factor=10.0,
# final_div_factor=10.0,
# )
# 1050Ti不支持Flash-Attention,所以只能设置False
enable_flash=False
# ${PATH_TO_BLENDER} -b -P blender_render_16views.py ${MESH_PATH} ${TYPES} ${OUTPUT_PATH}
/data/pack/blender-4.0.0-linux-x64/blender -b -P /data/SAMPart3D/tools/blender_render_16views.py /data/SAMPart3D/mesh_root/ZaanseSchansBike.glb glb /data/SAMPart3D/data_root/ZaanseSchansBike
# 如提示blender-4.3.2-linux-x64/blender: error while loading shared libraries: libSM.so.6: cannot open shared object file: No such file or directory,则安装
sudo apt install libsm6
# 如提示 Unable to find 'libdecor-0.so'
sudo apt install libdecor-0-0 libdecor-0-plugin-1-cairo
# 使用blender-4.3.2会出现BLENDER_EEVEE_NEXT,例如:
# bpy_struct: item.attr = val: enum "BLENDER_EEVEE" not found in ('BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH', 'CYCLES')
# AttributeError: 'SceneEEVEE' object has no attribute 'use_ssr'
# AttributeError: 'Mesh' object has no attribute 'use_auto_smooth'
# 以上问题似乎比较难解决,建议用blender-4.0.0
# 注意
# glb的名字不能包含连字符 (-)。它们只能包含字母、数字和下划线 (_),且不能以数字开头
训练
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
# Download pretrained PTv3-object https://huggingface.co/yhyang-myron/SAMPart3D/tree/main
# # 下载ptv3-object.pth放到ckpt
# /mnt/d/ProgramFiles/WSL/SAMPart3D/ckpt
# 修改configs/sampart3d/sampart3d-trainmlp-render16views.py中的data_root、mesh_root和backbone_weight_path
vim /data/SAMPart3D/configs/sampart3d/sampart3d-trainmlp-render16views.py
# 修改为以下参数:
data_root = "/data/SAMPart3D/data_root"
mesh_root = "/data/SAMPart3D/mesh_root"
backbone_weight_path = "/data/SAMPart3D/ckpt/ptv3-object.pth"
# 链接cuda的路径
mkdir -p /usr/local/cuda/
# sudo ln -s $CONDA_PREFIX/lib /usr/local/cuda/lib # 链接 Conda 的 lib 到 cuda/lib
sudo ln -s $CONDA_PREFIX/lib /usr/local/cuda/lib64 # 链接 Conda 的 lib 到 cuda/lib64
sudo ln -s $CONDA_PREFIX/include /usr/local/cuda/include # 链接 Conda 的 include 到 cuda/include
sudo ln -s $CONDA_PREFIX/bin /usr/local/cuda/bin # 创建 bin 目录符号链接
sudo ln -s $CONDA_PREFIX/nvvm /usr/local/cuda/nvvm # NVIDIA 虚拟机的库,某些 CUDA 高级功能(如 JIT 编译)会依赖
# 训练
# export CUDA_VISIBLE_DEVICES=0
MAX_JOBS=2 sh /data/SAMPart3D/scripts/train.sh -g 1 -d sampart3d -c sampart3d-trainmlp-render16views -n ZaanseSchansBike -o ZaanseSchansBike
# 发生SyntaxError
pip uninstall networkx # 先卸载确保干净
pip install --upgrade networkx
# 发生ModuleNotFoundError: No module named 'termcolor'
pip install termcolor
# 发生ModuleNotFoundError: No module named 'google'
conda install protobuf
# 发生ModuleNotFoundError: No module named 'narwhals'
conda install narwhals
# 如发生OSError: /data/miniconda3/envs/sampart3d/lib/python3.10/site-packages/torch_scatter/_version_cuda.so: undefined symbol: _ZN3c1017RegisterOperatorsD1Ev
python - <<EOF
import torch
print(f"PyTorch version: {torch.__version__}")
print(f"PyTorch CUDA version: {torch.version.cuda}") # CUDA version PyTorch was built with
print(f"Is CUDA available: {torch.cuda.is_available()}")
# 检查 PyTorch 使用的 C++ ABI
print(torch.__config__.show()) # 查找 -D_GLIBCXX_USE_CXX11_ABI=0 或 =1
EOF
# 发生OSError: Trainer: SAMPart3DDataset16Views: We couldn't connect to 'https://huggingface.co' to load the files, and couldn't find them in the cached files
# 可以设置 hf mirror 来修复 huggingface.co 的网络连接问题。
export HF_ENDPOINT=https://hf-mirror.com
# windows访问WSL,通过路径,可以使用vscode开发方便修改代码
\\wsl$\Ubuntu-24.04\data\SAMPart3D
# 如提示Using a slow image processor as use_fast is unset and a slow processor was saved with this model. use_fast=True will be the default behavior in v4.52, even if the model was saved with a slow processor. This will result in minor differences in outputs. You'll still be able to use a slow processor with use_fast=False.
# 将from transformers import pipeline, SamModel修改为
from transformers import pipeline, SamModel, SamImageProcessor
# 将SAM_model = pipeline(xxx)修改为
def prepare_meta_data(self, data_path=None):
# 显式创建图像处理器和模型
processor = SamImageProcessor.from_pretrained("facebook/sam-vit-huge", use_fast=True)
model = SamModel.from_pretrained("facebook/sam-vit-huge")
# 创建pipeline时指定processor
SAM_model = pipeline(
"mask-generation",
model=model,
image_processor=processor,
device=self.device
)
# ...其余代码保持不变...
# 判断CUDA头文件是否可被正确访问
python - <<EOF
import os
os.environ["CUDA_HOME"] = "/data/miniconda3/envs/sampart3d" # 替换为你的路径
cassert_path = os.path.join(os.environ["CUDA_HOME"], "include", "cuda", "std", "cassert")
if os.path.exists(cassert_path):
print(f"成功找到 CUDA 头文件!路径: {cassert_path}")
else:
print(f"路径仍无效,请检查: {cassert_path}")
print("手动验证文件是否存在: ", end="")
os.system(f"ls -l {cassert_path}")
EOF
HoloPart
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
conda activate sampart3d
#
git clone https://github.com/VAST-AI-Research/HoloPart.git
cd HoloPart
# 安装依赖
pip install -i https://pypi.tuna.tsinghua.edu.cn/simple -r requirements.txt
# 在Windowsd的D:\ProgramFiles\WSL\下创建文件夹HoloPart,并创建input_mesh文件夹
HoloPart
|-- input_mesh
# 连接windows下的文件夹到Linux下
ln -s "/mnt/d/ProgramFiles/WSL/HoloPart/input_mesh" /data/HoloPart/input_mesh
# 进入input_mesh
cd /data/HoloPart/input_mesh/
python - <<EOF
import trimesh
import numpy as np
import os
print(f"Python script CWD: {os.getcwd()}")
input_mesh_name = "AmsterdamPaleisopdeDam"
mesh_path = f"/data/SAMPart3D/mesh_root/{input_mesh_name}.glb"
mask_path = f"/data/SAMPart3D/exp/sampart3d/{input_mesh_name}/results/last/mesh_0.0.npy"
output_path = f"{input_mesh_name}_output.glb"
print(f"Attempting to load mesh from: {mesh_path}")
print(f"Attempting to load mask from: {mask_path}")
mesh = trimesh.load(mesh_path, force="mesh")
mask_npy = np.load(mask_path)
mesh_parts = []
for part_id in np.unique(mask_npy):
mesh_part = mesh.submesh([mask_npy == part_id], append=True)
mesh_parts.append(mesh_part)
mesh_parts = trimesh.Scene(mesh_parts).export(output_path)
EOF
# 将3D网格分解为完整的部分
python -m scripts.inference_holopart --mesh-input /data/HoloPart/input_mesh/AmsterdamPaleisopdeDam_output.glb
# 发生ModuleNotFoundError: No module named 'pymeshlab'
pip install pymeshlab
Conda 环境激活时自动设置
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
# 找到你的 Conda 环境的激活脚本目录。对于名为 sampart3d 的环境,路径通常是:
/data/miniconda3/envs/sampart3d/etc/conda/activate.d/
# 如果这个目录不存在,请创建它:
mkdir -p /data/miniconda3/envs/sampart3d/etc/conda/activate.d/
# 在该目录下创建一个 .sh 脚本文件,例如 env_vars.sh:
vim /data/miniconda3/envs/sampart3d/etc/conda/activate.d/env_vars.sh
# 在文件中添加以下内容
#!/bin/bash
export OLD_CUDA_HOME="${CUDA_HOME}"
if [ -n "${CUDA_HOME}" ]; then
export CUDA_HOME="${CONDA_PREFIX}:${CUDA_HOME}"
else
export CUDA_HOME="${CONDA_PREFIX}"
fi
echo "Set CUDA_HOME to: $CUDA_HOME"
export OLD_LD_LIBRARY_PATH="${LD_LIBRARY_PATH}"
if [ -n "${LD_LIBRARY_PATH}" ]; then
export LD_LIBRARY_PATH="/usr/lib/wsl/lib:/usr/local/cuda/lib64:${CONDA_PREFIX}/lib:${LD_LIBRARY_PATH}"
else
export LD_LIBRARY_PATH="/usr/lib/wsl/lib:/usr/local/cuda/lib64:${CONDA_PREFIX}/lib"
fi
echo "Set LD_LIBRARY_PATH to: $LD_LIBRARY_PATH"
export OLD_LIBRARY_PATH="${LIBRARY_PATH}"
if [ -n "${LIBRARY_PATH}" ]; then
export LIBRARY_PATH="/usr/lib/wsl/lib:/usr/local/cuda/lib64:${CONDA_PREFIX}/lib:${LIBRARY_PATH}"
else
export LIBRARY_PATH="/usr/lib/wsl/lib:/usr/local/cuda/lib64:${CONDA_PREFIX}/lib"
fi
echo "Set LIBRARY_PATH to: $LIBRARY_PATH"
export OLD_CPLUS_INCLUDE_PATH="${CPLUS_INCLUDE_PATH}"
if [ -n "${CPLUS_INCLUDE_PATH}" ]; then
export CPLUS_INCLUDE_PATH="${CONDA_PREFIX}/include:${CPLUS_INCLUDE_PATH}"
else
export CPLUS_INCLUDE_PATH="${CONDA_PREFIX}/include"
fi
echo "Set CPLUS_INCLUDE_PATH to: $CPLUS_INCLUDE_PATH"
export OLD_C_INCLUDE_PATH="${C_INCLUDE_PATH}"
if [ -n "${C_INCLUDE_PATH}" ]; then
export C_INCLUDE_PATH="${CONDA_PREFIX}/include:${C_INCLUDE_PATH}"
else
export C_INCLUDE_PATH="${CONDA_PREFIX}/include"
fi
export HF_ENDPOINT=https://hf-mirror.com
echo "Set C_INCLUDE_PATH to: $C_INCLUDE_PATH"
# export OLD_PATH="${PATH}"
# if [ -n "${PATH}" ]; then
# export PATH="${CONDA_PREFIX}/bin:${PATH}"
# else
# export PATH="${CONDA_PREFIX}/bin"
# fi
# echo "Set PATH to: $PATH"
# 还需要一个对应的反激活脚本来恢复原始值
# 创建反激活脚本目录:
mkdir -p /data/miniconda3/envs/sampart3d/etc/conda/deactivate.d/
# 创建反激活脚本文件 env_vars.sh:
vim /data/miniconda3/envs/sampart3d/etc/conda/deactivate.d/env_vars.sh
# 添加内容:
#!/bin/bash
export CUDA_HOME="${OLD_CUDA_HOME}"
unset OLD_CUDA_HOME
echo "set CUDA_HOME to: $CUDA_HOME"
export LD_LIBRARY_PATH="${OLD_LD_LIBRARY_PATH}"
unset OLD_LD_LIBRARY_PATH
echo "set LD_LIBRARY_PATH to: $LD_LIBRARY_PATH"
export LIBRARY_PATH="${OLD_LIBRARY_PATH}"
unset OLD_LIBRARY_PATH
echo "set LIBRARY_PATH to: $LIBRARY_PATH"
export CPLUS_INCLUDE_PATH="${OLD_CPLUS_INCLUDE_PATH}"
unset OLD_CPLUS_INCLUDE_PATH
echo "set CPLUS_INCLUDE_PATH to: $CPLUS_INCLUDE_PATH"
export C_INCLUDE_PATH="${OLD_C_INCLUDE_PATH}"
unset OLD_C_INCLUDE_PATH
echo "set C_INCLUDE_PATH to: $C_INCLUDE_PATH"
# export PATH="${OLD_PATH}"
# unset OLD_PATH
# echo "set PATH to: $PATH"
# 现在,每次你运行 conda activate sampart3d 时,CPLUS_INCLUDE_PATH等就会被设置。当你运行 conda deactivate 时,它会尝试恢复