builder.braak.pro/buildbot/config/worker/blender/compile.py
2024-11-20 16:13:44 +01:00

561 lines
22 KiB
Python

# SPDX-License-Identifier: GPL-2.0-or-later
# SPDX-FileCopyrightText: 2011-2024 Blender Authors
# <pep8 compliant>
import multiprocessing
import os
import platform
import pathlib
from typing import Dict
from pathlib import Path
import worker.blender
import worker.utils
def fetch_ideal_cpu_count(estimate_core_memory_in_mb: int) -> int:
"""Fetch cpu ideal for the building process based on machine info"""
worker.utils.info(f"estimate_core_memory_in_mb={estimate_core_memory_in_mb}")
total_cpu_count = multiprocessing.cpu_count()
worker.utils.info(f"total_cpu_count={total_cpu_count}")
ideal_cpu_count = total_cpu_count
spare_cpu_count = 2
if platform.system().lower() != "darwin":
worker.utils.info(f"In current path {os.getcwd()}")
import psutil
virtual_memory = psutil.virtual_memory()
worker.utils.info(f"virtual_memory={virtual_memory}")
total_memory_in_bytes = virtual_memory.total
worker.utils.info(f"total_memory_in_bytes={total_memory_in_bytes}")
available_memory_in_bytes = virtual_memory.available
worker.utils.info(f"available_memory_in_bytes={available_memory_in_bytes}")
usable_memory_in_bytes = available_memory_in_bytes
worker.utils.info(f"usable_memory_in_bytes={usable_memory_in_bytes}")
estimate_memory_per_code_in_bytes = estimate_core_memory_in_mb * 1024 * 1024
worker.utils.info(
f"estimate_memory_per_code_in_bytes={estimate_memory_per_code_in_bytes}"
)
capable_cpu_count = int(
total_memory_in_bytes / estimate_memory_per_code_in_bytes
)
worker.utils.info(f"capable_cpu_count={capable_cpu_count}")
min_cpu_count = min(total_cpu_count, capable_cpu_count)
worker.utils.info(f"min_cpu_count={min_cpu_count}")
ideal_cpu_count = (
min_cpu_count if min_cpu_count <= 8 else min_cpu_count - spare_cpu_count
)
worker.utils.info(f"ideal_cpu_count={ideal_cpu_count}")
return ideal_cpu_count
def get_cmake_build_type(builder: worker.blender.CodeBuilder) -> str:
if builder.build_configuration == "debug":
return "Debug"
elif builder.build_configuration == "sanitizer":
# No reliable ASAN on Windows currently.
if builder.platform != "windows":
return "RelWithDebInfo"
else:
return "Release"
else:
return "Release"
def get_cmake_options(builder: worker.blender.CodeBuilder) -> worker.utils.CmdSequence:
needs_gtest_compile = not builder.python_module
with_gtests_state = "ON" if needs_gtest_compile else "OFF"
with_gpu_binaries_state = "ON" if builder.needs_gpu_binaries else "OFF"
with_gpu_tests = False
buildbotConfig = builder.pipeline_config()
# This is meant for stable build compilation
config_file_path = "build_files/cmake/config/blender_release.cmake"
platform_config_file_path = None
if builder.platform == "darwin":
platform_config_file_path = "build_files/buildbot/config/blender_macos.cmake"
elif builder.platform == "linux":
platform_config_file_path = "build_files/buildbot/config/blender_linux.cmake"
elif builder.platform == "windows":
platform_config_file_path = "build_files/buildbot/config/blender_windows.cmake"
if platform_config_file_path:
worker.utils.info(
f'Trying platform-specific buildbot configuration "{platform_config_file_path}"'
)
if (Path(builder.blender_dir) / platform_config_file_path).exists():
worker.utils.info(
f'Using platform-specific buildbot configuration "{platform_config_file_path}"'
)
config_file_path = platform_config_file_path
else:
worker.utils.info(f'Using generic buildbot configuration "{config_file_path}"')
# Must be first so that we can override some of the options found in the file
options = ["-C", os.path.join(builder.blender_dir, config_file_path)]
# Optional build as Python module.
if builder.python_module:
bpy_config_file_path = "build_files/cmake/config/bpy_module.cmake"
options += ["-C", os.path.join(builder.blender_dir, bpy_config_file_path)]
options += ["-DWITH_INSTALL_PORTABLE=ON"]
can_enable_oneapi_binaries = True
if builder.service_env_id != "PROD":
# UATEST machines are too slow currently.
worker.utils.info(f'Disabling oneAPI binaries on "{builder.service_env_id}"')
can_enable_oneapi_binaries = False
if builder.patch_id:
# No enough throughput of the systems to cover AoT oneAPI binaries for patches.
worker.utils.info("Disabling oneAPI binaries for patch build")
can_enable_oneapi_binaries = False
if builder.track_id == "vexp":
# Only enable AoT oneAPI binaries for main and release branches.
worker.utils.info("Disabling oneAPI binaries for branch build")
can_enable_oneapi_binaries = False
# Add platform specific generator and configs
if builder.platform == "darwin":
if builder.needs_ninja:
options += ["-G", "Ninja"]
else:
options += ["-G", "Unix Makefiles"]
options += [f"-DCMAKE_OSX_ARCHITECTURES:STRING={builder.architecture}"]
elif builder.platform == "linux":
if builder.needs_ninja:
options += ["-G", "Ninja"]
else:
options += ["-G", "Unix Makefiles"]
elif builder.platform == "windows":
if builder.needs_ninja:
# set CC=%LLVM_DIR%\bin\clang-cl
# set CXX=%LLVM_DIR%\bin\clang-cl
# set CFLAGS=-m64 -fmsc-version=1922
# set CXXFLAGS=-m64 -fmsc-version=1922
vc_tools_install_dir = os.environ.get("VCToolsInstallDir")
if not vc_tools_install_dir:
raise BaseException("Missing environment variable VCToolsInstallDir")
vc_tool_install_path = pathlib.PureWindowsPath(vc_tools_install_dir)
if builder.architecture == "arm64":
compiler_file_path = "C:/Program Files/LLVM/bin/clang-cl.exe"
compiler_file_path = "C:/Program Files/LLVM/bin/clang-cl.exe"
linker_file_path = "C:/Program Files/LLVM/bin/lld-link.exe"
else:
vs_tool_install_dir_suffix = "bin/Hostx64/x64"
compiler_file_path = str(
vc_tool_install_path / f"{vs_tool_install_dir_suffix}/cl.exe"
)
linker_file_path = str(
vc_tool_install_path / f"{vs_tool_install_dir_suffix}/link.exe"
)
options += ["-G", "Ninja"]
# -DWITH_WINDOWS_SCCACHE=On
options += [
f"-DCMAKE_C_COMPILER:FILEPATH={compiler_file_path}",
f"-DCMAKE_CXX_COMPILER:FILEPATH={compiler_file_path}",
]
# options += ["-DCMAKE_EXE_LINKER_FLAGS:STRING=/machine:x64"]
options += [f"-DCMAKE_LINKER:FILEPATH={linker_file_path}"]
# Skip the test, it does not work
options += ["-DCMAKE_C_COMPILER_WORKS=1"]
options += ["-DCMAKE_CXX_COMPILER_WORKS=1"]
else:
if builder.architecture == "arm64":
options += ["-G", "Visual Studio 17 2022", "-A", "arm64"]
else:
options += ["-G", "Visual Studio 16 2019", "-A", "x64"]
# Add configured overrides
platform_architecure = f"{builder.platform}-{builder.architecture}"
cmake_overrides: Dict[str, str] = {}
cmake_overrides.update(buildbotConfig["cmake"]["default"]["overrides"])
cmake_overrides.update(buildbotConfig["cmake"][platform_architecure]["overrides"])
# Disallow certain options
restricted_key_patterns = [
"POSTINSTALL_SCRIPT",
"OPTIX_",
"CMAKE_OSX_ARCHITECTURES",
"CMAKE_BUILD_TYPE",
"CMAKE_INSTALL_PREFIX",
"WITH_GTESTS",
"CUDA",
"WITH_CYCLES",
"CYCLES_CUDA",
]
for cmake_key in cmake_overrides.keys():
for restricted_key_pattern in restricted_key_patterns:
if restricted_key_pattern in cmake_key:
raise Exception(
f"CMake key [{cmake_key}] cannot be overriden, aborting"
)
for cmake_key, cmake_value in cmake_overrides.items():
options += [f"-D{cmake_key}={cmake_value}"]
cmake_build_type = get_cmake_build_type(builder)
options += [f"-DCMAKE_BUILD_TYPE:STRING={cmake_build_type}"]
if builder.build_configuration == "sanitizer":
# No reliable ASAN on Windows currently.
if builder.platform != "windows":
options += ["-DWITH_COMPILER_ASAN=ON"]
options += ["-DWITH_ASSERT_RELEASE=ON"]
# Avoid buildbot timeouts, see blender/blender#116635.
options += ["-DWITH_UNITY_BUILD=OFF"]
elif builder.build_configuration == "asserts":
options += ["-DWITH_ASSERT_RELEASE=ON"]
options += [f"-DCMAKE_INSTALL_PREFIX={builder.install_dir}"]
options += ["-DWITH_INSTALL_COPYRIGHT=ON"]
options += [f"-DWITH_GTESTS={with_gtests_state}"]
if builder.platform == "windows":
if builder.architecture != "arm64":
# CUDA + HIP + oneAPI on Windows
options += [f"-DWITH_CYCLES_CUDA_BINARIES={with_gpu_binaries_state}"]
options += [f"-DWITH_CYCLES_HIP_BINARIES={with_gpu_binaries_state}"]
if can_enable_oneapi_binaries:
options += [f"-DWITH_CYCLES_ONEAPI_BINARIES={with_gpu_binaries_state}"]
options += ["-DSYCL_OFFLINE_COMPILER_PARALLEL_JOBS=2"]
else:
options += ["-DWITH_CYCLES_ONEAPI_BINARIES=OFF"]
if "hip" in buildbotConfig:
hip_version = buildbotConfig["hip"]["version"]
else:
hip_version = "5.2.21440"
if "ocloc" in buildbotConfig:
ocloc_version = buildbotConfig["ocloc"]["version"]
else:
ocloc_version = "dev_01"
options += [f"-DHIP_ROOT_DIR=C:/ProgramData/AMD/HIP/hip_sdk_{hip_version}"]
options += ["-DHIP_PERL_DIR=C:/ProgramData/AMD/HIP/strawberry/perl/bin"]
options += [
f"-DOCLOC_INSTALL_DIR=C:/ProgramData/Intel/ocloc/ocloc_{ocloc_version}"
]
elif builder.platform == "linux":
# CUDA on Linux
options += [f"-DWITH_CYCLES_CUDA_BINARIES={with_gpu_binaries_state}"]
options += [f"-DWITH_CYCLES_HIP_BINARIES={with_gpu_binaries_state}"]
if can_enable_oneapi_binaries:
options += [f"-DWITH_CYCLES_ONEAPI_BINARIES={with_gpu_binaries_state}"]
options += ["-DSYCL_OFFLINE_COMPILER_PARALLEL_JOBS=2"]
else:
options += ["-DWITH_CYCLES_ONEAPI_BINARIES=OFF"]
# Directory changed to just /opt/rocm in 6.x
rocm_path = pathlib.Path("/opt/rocm/hip")
if not rocm_path.exists():
rocm_path = pathlib.Path("/opt/rocm")
options += [f"-DHIP_ROOT_DIR:PATH={rocm_path}"]
# GPU render tests support Linux + NVIDIA currently
if builder.needs_gpu_tests:
with_gpu_tests = True
if builder.needs_gpu_binaries:
options += ["-DCYCLES_TEST_DEVICES=CPU;OPTIX"]
elif builder.platform == "darwin":
# Metal on macOS
if builder.architecture == "arm64":
if builder.needs_gpu_tests:
with_gpu_tests = True
options += ["-DCYCLES_TEST_DEVICES=CPU;METAL"]
if with_gpu_tests:
# Needs X11 or Wayland, and fails with xvfb to emulate X11.
# options += [f"-DWITH_GPU_DRAW_TESTS=ON"]
options += ["-DWITH_GPU_RENDER_TESTS=ON"]
options += ["-DWITH_GPU_RENDER_TESTS_SILENT=OFF"]
options += ["-DWITH_COMPOSITOR_REALTIME_TESTS=ON"]
if "optix" in buildbotConfig:
optix_version = buildbotConfig["optix"]["version"]
if builder.platform == "windows" and builder.architecture != "arm64":
options += [
f"-DOPTIX_ROOT_DIR:PATH=C:/ProgramData/NVIDIA Corporation/OptiX SDK {optix_version}"
]
elif builder.platform == "linux":
optix_base_dir = pathlib.Path.home() / ".devops" / "apps"
options += [
f"-DOPTIX_ROOT_DIR:PATH={optix_base_dir}/NVIDIA-OptiX-SDK-{optix_version}-linux64-x86_64"
]
# Blender 4.3 has switched to pre-compiled HIP-RT libraries.
if "hiprt" in buildbotConfig:
hiprt_version = buildbotConfig["hiprt"]["version"]
if builder.platform == "windows" and builder.architecture != "arm64":
options += [
f"-DHIPRT_ROOT_DIR:PATH=C:/ProgramData/AMD/HIP/hiprtsdk-{hiprt_version}/hiprt{hiprt_version}"
]
elif builder.platform == "linux":
hiprt_base_dir = pathlib.Path.home() / ".devops" / "apps"
options += [
f"-DHIPRT_ROOT_DIR:PATH={hiprt_base_dir}/hiprtsdk-{hiprt_version}/hiprt{hiprt_version}"
]
# Enable option to verify enabled libraries and features did not get disabled.
options += ["-DWITH_STRICT_BUILD_OPTIONS=ON"]
if builder.needs_gpu_binaries:
try:
cuda10_version = buildbotConfig["cuda10"]["version"]
except KeyError:
cuda10_version = buildbotConfig["sdks"]["cuda10"]["version"]
cuda10_folder_version = ".".join(cuda10_version.split(".")[:2])
try:
cuda11_version = buildbotConfig["cuda11"]["version"]
except KeyError:
cuda11_version = buildbotConfig["sdks"]["cuda11"]["version"]
cuda11_folder_version = ".".join(cuda11_version.split(".")[:2])
try:
cuda12_version = buildbotConfig["cuda12"]["version"]
cuda12_folder_version = ".".join(cuda12_version.split(".")[:2])
have_cuda12 = True
except KeyError:
have_cuda12 = False
if builder.platform == "windows" and builder.architecture != "arm64":
# CUDA 10
cuda10_path = pathlib.Path(
f"C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v{cuda10_folder_version}"
)
if not cuda10_path.exists():
raise Exception(
f"Was not able to find CUDA path [{cuda10_path}] for version [{cuda10_version}], aborting"
)
cuda10_file_path = cuda10_path / "bin" / "nvcc.exe"
options += [f"-DCUDA10_TOOLKIT_ROOT_DIR:PATH={cuda10_path}"]
options += [f"-DCUDA10_NVCC_EXECUTABLE:FILEPATH={cuda10_file_path}"]
# CUDA 11
cuda11_path = pathlib.Path(
f"C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v{cuda11_folder_version}"
)
if not cuda11_path.exists():
raise Exception(
f"Was not able to find CUDA path [{cuda11_path}] for version [{cuda11_version}], aborting"
)
cuda11_file_path = cuda11_path / "bin" / "nvcc.exe"
# CUDA 12
if have_cuda12:
cuda12_path = pathlib.Path(
f"C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v{cuda12_folder_version}"
)
if not cuda12_path.exists():
raise Exception(
f"Was not able to find CUDA path [{cuda12_path}] for version [{cuda12_version}], aborting"
)
cuda12_file_path = cuda12_path / "bin" / "nvcc.exe"
options += [f"-DCUDA11_TOOLKIT_ROOT_DIR:PATH={cuda11_path}"]
options += [f"-DCUDA11_NVCC_EXECUTABLE:FILEPATH={cuda11_file_path}"]
options += [f"-DCUDA_TOOLKIT_ROOT_DIR:PATH={cuda12_path}"]
options += [f"-DCUDA_NVCC_EXECUTABLE:FILEPATH={cuda12_file_path}"]
else:
options += [f"-DCUDA_TOOLKIT_ROOT_DIR:PATH={cuda11_path}"]
options += [f"-DCUDA_NVCC_EXECUTABLE:FILEPATH={cuda11_file_path}"]
elif builder.platform == "linux":
# CUDA 10
cuda10_path = pathlib.Path(f"/usr/local/cuda-{cuda10_folder_version}")
if not cuda10_path.exists():
raise Exception(
f"Was not able to find CUDA path [{cuda10_path}] for version [{cuda10_version}], aborting"
)
cuda10_file_path = cuda10_path / "bin" / "nvcc"
# CUDA 11
cuda11_path = pathlib.Path(f"/usr/local/cuda-{cuda11_folder_version}")
if not cuda11_path.exists():
raise Exception(
f"Was not able to find CUDA path [{cuda11_path}] for version [{cuda11_version}], aborting"
)
cuda11_file_path = cuda11_path / "bin" / "nvcc"
# CUDA 12
if have_cuda12:
cuda12_path = pathlib.Path(f"/usr/local/cuda-{cuda12_folder_version}")
if not cuda12_path.exists():
raise Exception(
f"Was not able to find CUDA path [{cuda12_path}] for version [{cuda12_version}], aborting"
)
cuda12_file_path = cuda12_path / "bin" / "nvcc"
# CUDA 10, must provide compatible host compiler.
options += [f"-DCUDA10_TOOLKIT_ROOT_DIR:PATH={cuda10_path}"]
if pathlib.Path(
"/etc/rocky-release"
).exists(): # We check for Rocky. Version 8 has GCC 8 in /usr/bin
options += [f"-DCUDA10_NVCC_EXECUTABLE:STRING={cuda10_file_path}"]
options += ["-DCUDA_HOST_COMPILER=/usr/bin/gcc"]
else:
# Use new CMake option.
options += [f"-DCUDA10_NVCC_EXECUTABLE:STRING={cuda10_file_path}"]
options += [
"-DCUDA_HOST_COMPILER=/opt/rh/devtoolset-8/root/usr/bin/gcc"
]
# CUDA 11 or 12.
if have_cuda12:
options += [f"-DCUDA11_TOOLKIT_ROOT_DIR:PATH={cuda11_path}"]
options += [f"-DCUDA11_NVCC_EXECUTABLE:STRING={cuda11_file_path}"]
options += [f"-DCUDA_TOOLKIT_ROOT_DIR:PATH={cuda12_path}"]
options += [f"-DCUDA_NVCC_EXECUTABLE:FILEPATH={cuda12_file_path}"]
else:
options += [f"-DCUDA_TOOLKIT_ROOT_DIR:PATH={cuda11_path}"]
options += [f"-DCUDA_NVCC_EXECUTABLE:FILEPATH={cuda11_file_path}"]
else:
worker.utils.info("Skipping gpu compilation as requested")
return options
def clean_directories(builder: worker.blender.CodeBuilder) -> None:
worker.utils.info(
f"Cleaning directory [{builder.install_dir})] from the previous run"
)
worker.utils.remove_dir(builder.install_dir)
os.makedirs(builder.build_dir, exist_ok=True)
worker.utils.info("Remove buildinfo files to re-generate them")
for build_info_file_name in (
"buildinfo.h",
"buildinfo.h.txt",
):
full_path = builder.build_dir / "source" / "creator" / build_info_file_name
if full_path.exists():
worker.utils.info(f"Removing file [{full_path}]")
worker.utils.remove_file(full_path)
def cmake_configure(builder: worker.blender.CodeBuilder) -> None:
cmake_cache_file_path = builder.build_dir / "CMakeCache.txt"
if cmake_cache_file_path.exists():
worker.utils.info("Removing CMake cache")
worker.utils.remove_file(cmake_cache_file_path)
worker.utils.info("CMake configure options")
cmake_options = get_cmake_options(builder)
cmd = ["cmake", "-S", builder.blender_dir, "-B", builder.build_dir] + list(
cmake_options
)
builder.call(cmd)
# This hack does not work as expected, since cmake cache is the always updated, we end up recompiling on each compile step, code, gpu and install
needs_cmake_cache_hack = False
if needs_cmake_cache_hack and pathlib.Path("/usr/lib64/libpthread.a").exists():
# HACK: The detection for lib pthread does not work on CentOS 7
worker.utils.warning(f"Hacking file [{cmake_cache_file_path}]")
tmp_cmake_cache_file_path = builder.build_dir / "CMakeCache.txt.tmp"
fin = open(cmake_cache_file_path)
fout = open(tmp_cmake_cache_file_path, "wt")
for line in fin:
# worker.utils.info(line)
if (
"OpenMP_pthread_LIBRARY:FILEPATH=OpenMP_pthread_LIBRARY-NOTFOUND"
in line
):
worker.utils.warning(
"Replacing [OpenMP_pthread_LIBRARY-NOTFOUND] to [/usr/lib64/libpthread.a]"
)
line = line.replace(
"OpenMP_pthread_LIBRARY:FILEPATH=OpenMP_pthread_LIBRARY-NOTFOUND",
"OpenMP_pthread_LIBRARY:FILEPATH=/usr/lib64/libpthread.a",
)
fout.write(line)
fin.close()
fout.close()
worker.utils.warning(f"Updating [{cmake_cache_file_path}]")
os.replace(tmp_cmake_cache_file_path, cmake_cache_file_path)
def cmake_build(builder: worker.blender.CodeBuilder, do_install: bool) -> None:
if builder.track_id in ["vdev", "v430"]:
if builder.platform == "windows":
estimate_gpu_memory_in_mb = 6000
else:
estimate_gpu_memory_in_mb = 4000
else:
estimate_gpu_memory_in_mb = 6000
estimate_core_memory_in_mb = (
estimate_gpu_memory_in_mb if builder.needs_gpu_binaries else 1000
)
ideal_cpu_count = fetch_ideal_cpu_count(estimate_core_memory_in_mb)
# Enable verbose building to make ninja to output more often.
# It should help with slow build commands like OneAPI, as well as will help
# troubleshooting situations when the compile-gpu step times out.
needs_verbose = builder.needs_gpu_binaries
build_type = get_cmake_build_type(builder)
cmd = ["cmake", "--build", builder.build_dir, "--config", build_type]
cmd += ["--parallel", f"{ideal_cpu_count}"]
if do_install:
cmd += ["--target", "install"]
if needs_verbose:
cmd += ["--verbose"]
builder.call(cmd)
def compile_code(builder: worker.blender.CodeBuilder) -> None:
builder.needs_gpu_binaries = False
builder.setup_build_environment()
clean_directories(builder)
cmake_configure(builder)
cmake_build(builder, False)
def compile_gpu(builder: worker.blender.CodeBuilder) -> None:
if builder.platform == "darwin":
worker.utils.info("Compile GPU not required on macOS")
return
builder.needs_gpu_binaries = True
builder.setup_build_environment()
cmake_configure(builder)
cmake_build(builder, False)
def compile_install(builder: worker.blender.CodeBuilder) -> None:
builder.setup_build_environment()
cmake_configure(builder)
cmake_build(builder, True)