basilisk-monitoring-worker (v2025.1023.56aa3288)

Published 2025-10-23 19:08:24 -07:00 by CanuteTheGreat

Installation

docker pull git.canutethegreat.com/canutethegreat/basilisk-monitoring-worker:v2025.1023.56aa3288
sha256:bc01edfdc2e0cd4ed2353014c4ef474b9b0a48bb20fcc046c31edb7ea8057a8d

Image layers

ARG RELEASE
ARG LAUNCHPAD_BUILD_ARCH
LABEL org.opencontainers.image.ref.name=ubuntu
LABEL org.opencontainers.image.version=24.04
ADD file:bcebbf0fddcba5b864d5d267b68dd23bcfb01275e6ec7bcab69bf8b56af14804 in /
CMD ["/bin/bash"]
RUN /bin/sh -c export DEBIAN_FRONTEND=noninteractive && apt-get update && apt-get install -y --no-install-recommends apt-utils build-essential ca-certificates curl libncurses6 libncursesw6 patch wget rsync unzip jq gnupg libtcmalloc-minimal4 && rm -rf /var/lib/apt/lists/* && echo "hsts=0" > /root/.wgetrc # buildkit
ARG CUDA_VERSION=12.8.0.038
ARG CUDA_DRIVER_VERSION=570.86.10
ARG JETPACK_HOST_MOUNTS=
ENV CUDA_VERSION=12.8.0.038 CUDA_DRIVER_VERSION=570.86.10 CUDA_CACHE_DISABLE=1 NVIDIA_REQUIRE_JETPACK_HOST_MOUNTS=
RUN |3 CUDA_VERSION=12.8.0.038 CUDA_DRIVER_VERSION=570.86.10 JETPACK_HOST_MOUNTS= /bin/sh -c if [ -n "${JETPACK_HOST_MOUNTS}" ]; then echo "/usr/lib/aarch64-linux-gnu/tegra" > /etc/ld.so.conf.d/nvidia-tegra.conf && echo "/usr/lib/aarch64-linux-gnu/tegra-egl" >> /etc/ld.so.conf.d/nvidia-tegra.conf; fi # buildkit
RUN |3 CUDA_VERSION=12.8.0.038 CUDA_DRIVER_VERSION=570.86.10 JETPACK_HOST_MOUNTS= /bin/sh -c /nvidia/build-scripts/installCUDA.sh # buildkit
RUN |3 CUDA_VERSION=12.8.0.038 CUDA_DRIVER_VERSION=570.86.10 JETPACK_HOST_MOUNTS= /bin/sh -c cp -vprd /nvidia/. / && patch -p0 < /etc/startup_scripts.patch && rm -f /etc/startup_scripts.patch # buildkit
ENV _CUDA_COMPAT_PATH=/usr/local/cuda/compat ENV=/etc/shinit_v2 BASH_ENV=/etc/bash.bashrc SHELL=/bin/bash NVIDIA_REQUIRE_CUDA=cuda>=9.0
LABEL com.nvidia.volumes.needed=nvidia_driver com.nvidia.cuda.version=9.0
ARG NCCL_VERSION=2.25.1
ARG CUBLAS_VERSION=12.8.3.14
ARG CUFFT_VERSION=11.3.3.41
ARG CURAND_VERSION=10.3.9.55
ARG CUSPARSE_VERSION=12.5.7.53
ARG CUSOLVER_VERSION=11.7.2.55
ARG CUTENSOR_VERSION=2.1.0.9
ARG NPP_VERSION=12.3.3.65
ARG NVJPEG_VERSION=12.3.5.57
ARG CUFILE_VERSION=1.13.0.11
ARG NVJITLINK_VERSION=12.8.61
ARG CUDNN_VERSION=9.7.0.66
ARG CUDNN_FRONTEND_VERSION=1.9.0
ARG TRT_VERSION=10.8.0.40
ARG TRTOSS_VERSION=
ARG NSIGHT_SYSTEMS_VERSION=2024.6.2.225
ARG NSIGHT_COMPUTE_VERSION=2025.1.0.14
ARG CUSPARSELT_VERSION=0.6.3.2
ENV NCCL_VERSION=2.25.1 CUBLAS_VERSION=12.8.3.14 CUFFT_VERSION=11.3.3.41 CURAND_VERSION=10.3.9.55 CUSPARSE_VERSION=12.5.7.53 CUSPARSELT_VERSION=0.6.3.2 CUSOLVER_VERSION=11.7.2.55 CUTENSOR_VERSION=2.1.0.9 NPP_VERSION=12.3.3.65 NVJPEG_VERSION=12.3.5.57 CUFILE_VERSION=1.13.0.11 NVJITLINK_VERSION=12.8.61 CUDNN_VERSION=9.7.0.66 CUDNN_FRONTEND_VERSION=1.9.0 TRT_VERSION=10.8.0.40 TRTOSS_VERSION= NSIGHT_SYSTEMS_VERSION=2024.6.2.225 NSIGHT_COMPUTE_VERSION=2025.1.0.14
RUN |21 CUDA_VERSION=12.8.0.038 CUDA_DRIVER_VERSION=570.86.10 JETPACK_HOST_MOUNTS= NCCL_VERSION=2.25.1 CUBLAS_VERSION=12.8.3.14 CUFFT_VERSION=11.3.3.41 CURAND_VERSION=10.3.9.55 CUSPARSE_VERSION=12.5.7.53 CUSOLVER_VERSION=11.7.2.55 CUTENSOR_VERSION=2.1.0.9 NPP_VERSION=12.3.3.65 NVJPEG_VERSION=12.3.5.57 CUFILE_VERSION=1.13.0.11 NVJITLINK_VERSION=12.8.61 CUDNN_VERSION=9.7.0.66 CUDNN_FRONTEND_VERSION=1.9.0 TRT_VERSION=10.8.0.40 TRTOSS_VERSION= NSIGHT_SYSTEMS_VERSION=2024.6.2.225 NSIGHT_COMPUTE_VERSION=2025.1.0.14 CUSPARSELT_VERSION=0.6.3.2 /bin/sh -c /nvidia/build-scripts/installLIBS.sh && /nvidia/build-scripts/installCUDNN.sh && /nvidia/build-scripts/installTRT.sh && /nvidia/build-scripts/installNSYS.sh && /nvidia/build-scripts/installNCU.sh && /nvidia/build-scripts/installCUTENSOR.sh && /nvidia/build-scripts/installCUSPARSELT.sh && if [ -z "${JETPACK_HOST_MOUNTS}" ]; then /nvidia/build-scripts/installNCCL.sh; fi; # buildkit
LABEL com.nvidia.nccl.version=2.25.1 com.nvidia.cublas.version=12.8.3.14 com.nvidia.cufft.version=11.3.3.41 com.nvidia.curand.version=10.3.9.55 com.nvidia.cusparse.version=12.5.7.53 com.nvidia.cusparselt.version=0.6.3.2 com.nvidia.cusolver.version=11.7.2.55 com.nvidia.cutensor.version=2.1.0.9 com.nvidia.npp.version=12.3.3.65 com.nvidia.nvjpeg.version=12.3.5.57 com.nvidia.cudnn.version=9.7.0.66 com.nvidia.tensorrt.version=10.8.0.40 com.nvidia.tensorrtoss.version= com.nvidia.nsightsystems.version=2024.6.2.225 com.nvidia.nsightcompute.version=2025.1.0.14
ARG DALI_VERSION=1.45.0
ARG DALI_BUILD=
ARG DALI_URL_SUFFIX=120
ARG POLYGRAPHY_VERSION=0.49.16
ARG TRANSFORMER_ENGINE_VERSION=1.14
ARG MODEL_OPT_VERSION=0.21.0
ENV DALI_VERSION=1.45.0 DALI_BUILD= DALI_URL_SUFFIX=120 POLYGRAPHY_VERSION=0.49.16 TRANSFORMER_ENGINE_VERSION=1.14 MODEL_OPT_VERSION=0.21.0
ADD docs.tgz / # buildkit
RUN |27 CUDA_VERSION=12.8.0.038 CUDA_DRIVER_VERSION=570.86.10 JETPACK_HOST_MOUNTS= NCCL_VERSION=2.25.1 CUBLAS_VERSION=12.8.3.14 CUFFT_VERSION=11.3.3.41 CURAND_VERSION=10.3.9.55 CUSPARSE_VERSION=12.5.7.53 CUSOLVER_VERSION=11.7.2.55 CUTENSOR_VERSION=2.1.0.9 NPP_VERSION=12.3.3.65 NVJPEG_VERSION=12.3.5.57 CUFILE_VERSION=1.13.0.11 NVJITLINK_VERSION=12.8.61 CUDNN_VERSION=9.7.0.66 CUDNN_FRONTEND_VERSION=1.9.0 TRT_VERSION=10.8.0.40 TRTOSS_VERSION= NSIGHT_SYSTEMS_VERSION=2024.6.2.225 NSIGHT_COMPUTE_VERSION=2025.1.0.14 CUSPARSELT_VERSION=0.6.3.2 DALI_VERSION=1.45.0 DALI_BUILD= DALI_URL_SUFFIX=120 POLYGRAPHY_VERSION=0.49.16 TRANSFORMER_ENGINE_VERSION=1.14 MODEL_OPT_VERSION=0.21.0 /bin/sh -c echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf && echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf # buildkit
ARG _LIBPATH_SUFFIX=
ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin LD_LIBRARY_PATH=/usr/local/cuda/compat/lib:/usr/local/nvidia/lib:/usr/local/nvidia/lib64 NVIDIA_VISIBLE_DEVICES=all NVIDIA_DRIVER_CAPABILITIES=compute,utility,video
COPY entrypoint/ /opt/nvidia/ # buildkit
ENV NVIDIA_PRODUCT_NAME=CUDA
ENTRYPOINT ["/opt/nvidia/nvidia_entrypoint.sh"]
COPY NVIDIA_Deep_Learning_Container_License.pdf /workspace/ # buildkit
RUN /bin/sh -c export DEBIAN_FRONTEND=noninteractive && apt-get update && apt-get install -y --no-install-recommends build-essential git libglib2.0-0 less libhwloc15 libnl-route-3-200 libnl-3-dev libnl-route-3-dev libnuma-dev libnuma1 libpmi2-0-dev nano numactl openssh-client vim wget && rm -rf /var/lib/apt/lists/* # buildkit
ARG GDRCOPY_VERSION=2.4.1
ARG HPCX_VERSION=2.21
ARG RDMACORE_VERSION=39.0
ARG MOFED_VERSION=5.4-rdmacore39.0
ARG OPENUCX_VERSION=1.18.0
ARG OPENMPI_VERSION=4.1.7
ARG EFA_VERSION=1.34.0
ARG AWS_OFI_NCCL_VERSION=1.12.1
ENV GDRCOPY_VERSION=2.4.1 HPCX_VERSION=2.21 MOFED_VERSION=5.4-rdmacore39.0 OPENUCX_VERSION=1.18.0 OPENMPI_VERSION=4.1.7 RDMACORE_VERSION=39.0 EFA_VERSION=1.34.0 AWS_OFI_NCCL_VERSION=1.12.1
ARG TARGETARCH=amd64
RUN |9 GDRCOPY_VERSION=2.4.1 HPCX_VERSION=2.21 RDMACORE_VERSION=39.0 MOFED_VERSION=5.4-rdmacore39.0 OPENUCX_VERSION=1.18.0 OPENMPI_VERSION=4.1.7 EFA_VERSION=1.34.0 AWS_OFI_NCCL_VERSION=1.12.1 TARGETARCH=amd64 /bin/sh -c cd /nvidia && ( export DEBIAN_FRONTEND=noninteractive && apt-get update && apt-get install -y --no-install-recommends libibverbs1 libibverbs-dev librdmacm1 librdmacm-dev libibumad3 libibumad-dev ibverbs-utils ibverbs-providers && rm -rf /var/lib/apt/lists/* && rm $(dpkg-query -L libibverbs-dev librdmacm-dev libibumad-dev | grep "\(\.so\|\.a\)$") ) && ( cd opt/gdrcopy/ && dpkg -i libgdrapi_*.deb ) && ( cp -r opt/hpcx /opt/ && cp etc/ld.so.conf.d/hpcx.conf /etc/ld.so.conf.d/ && ln -sf /opt/hpcx/ompi /usr/local/mpi && ln -sf /opt/hpcx/ucx /usr/local/ucx && sed -i 's/^\(hwloc_base_binding_policy\) = core$/\1 = none/' /opt/hpcx/ompi/etc/openmpi-mca-params.conf && sed -i 's/^\(btl = self\)$/#\1/' /opt/hpcx/ompi/etc/openmpi-mca-params.conf ) && ( if [ ! -f /etc/ld.so.conf.d/nvidia-tegra.conf ]; then cd opt/amazon/efa/ && dpkg -i libfabric*.deb && rm /opt/amazon/efa/lib/libfabric.a && echo "/opt/amazon/efa/lib" > /etc/ld.so.conf.d/efa.conf; fi ) && ldconfig # buildkit
ENV OPAL_PREFIX=/opt/hpcx/ompi PATH=/usr/local/mpi/bin:/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/ucx/bin:/opt/amazon/efa/bin
ENV OMPI_MCA_coll_hcoll_enable=0
ENV NCCL_NVLS_ENABLE=0
COPY cuda-*.patch /tmp # buildkit
RUN |9 GDRCOPY_VERSION=2.4.1 HPCX_VERSION=2.21 RDMACORE_VERSION=39.0 MOFED_VERSION=5.4-rdmacore39.0 OPENUCX_VERSION=1.18.0 OPENMPI_VERSION=4.1.7 EFA_VERSION=1.34.0 AWS_OFI_NCCL_VERSION=1.12.1 TARGETARCH=amd64 /bin/sh -c export DEVEL=1 BASE=0 && /nvidia/build-scripts/installNCU.sh && /nvidia/build-scripts/installCUDA.sh && /nvidia/build-scripts/installLIBS.sh && if [ ! -f /etc/ld.so.conf.d/nvidia-tegra.conf ]; then /nvidia/build-scripts/installNCCL.sh; fi && /nvidia/build-scripts/installCUDNN.sh && /nvidia/build-scripts/installCUTENSOR.sh && /nvidia/build-scripts/installTRT.sh && /nvidia/build-scripts/installNSYS.sh && /nvidia/build-scripts/installCUSPARSELT.sh && if [ -f "/tmp/cuda-${_CUDA_VERSION_MAJMIN}.patch" ]; then patch -p0 < /tmp/cuda-${_CUDA_VERSION_MAJMIN}.patch; fi && rm -f /tmp/cuda-*.patch # buildkit
ENV LIBRARY_PATH=/usr/local/cuda/lib64/stubs:
COPY /opt/amazon/aws-ofi-nccl /opt/amazon/aws-ofi-nccl # buildkit
RUN |9 GDRCOPY_VERSION=2.4.1 HPCX_VERSION=2.21 RDMACORE_VERSION=39.0 MOFED_VERSION=5.4-rdmacore39.0 OPENUCX_VERSION=1.18.0 OPENMPI_VERSION=4.1.7 EFA_VERSION=1.34.0 AWS_OFI_NCCL_VERSION=1.12.1 TARGETARCH=amd64 /bin/sh -c if [ ! -f /etc/ld.so.conf.d/nvidia-tegra.conf ]; then echo "/opt/amazon/aws-ofi-nccl/lib" > /etc/ld.so.conf.d/aws-ofi-nccl.conf && ldconfig; fi # buildkit
ENV NVIDIA_PRODUCT_NAME=PyTorch
ARG NVIDIA_PYTORCH_VERSION=25.01
ARG PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae
ARG NVFUSER_BUILD_VERSION=6627725
ENV PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae PYTORCH_VERSION=2.6.0a0+ecf3bae PYTORCH_BUILD_NUMBER=0 NVIDIA_PYTORCH_VERSION=25.01
ENV NVFUSER_BUILD_VERSION=6627725 NVFUSER_VERSION=6627725
LABEL com.nvidia.pytorch.version=2.6.0a0+ecf3bae
ARG TARGETARCH=amd64
ARG PYVER=3.12
ARG PYVER_MAJMIN=312
ENV PIP_BREAK_SYSTEM_PACKAGES=1
ARG L4T=0
RUN |7 NVIDIA_PYTORCH_VERSION=25.01 PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae NVFUSER_BUILD_VERSION=6627725 TARGETARCH=amd64 PYVER=3.12 PYVER_MAJMIN=312 L4T=0 /bin/sh -c export PYSFX=`echo "${PYVER}" | cut -c1-1` && export DEBIAN_FRONTEND=noninteractive && apt-get update && apt-get install -y --no-install-recommends python$PYVER-dev python$PYSFX python$PYSFX-dev python$PYSFX-venv python-is-python$PYSFX autoconf automake libatlas-base-dev libgoogle-glog-dev libbz2-dev libc-ares2 libre2-dev libleveldb-dev liblmdb-dev libprotobuf-dev libsnappy-dev libtool nasm protobuf-compiler pkg-config unzip sox libsndfile1 libpng-dev libhdf5-dev gfortran rapidjson-dev ninja-build libedit-dev build-essential patchelf && rm -rf /var/lib/apt/lists/* # buildkit
ENV PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python
RUN |7 NVIDIA_PYTORCH_VERSION=25.01 PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae NVFUSER_BUILD_VERSION=6627725 TARGETARCH=amd64 PYVER=3.12 PYVER_MAJMIN=312 L4T=0 /bin/sh -c DEBIAN_FRONTEND=noninteractive apt remove -y --force-yes python3-pip && curl -O https://bootstrap.pypa.io/get-pip.py && python get-pip.py && rm get-pip.py # buildkit
RUN |7 NVIDIA_PYTORCH_VERSION=25.01 PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae NVFUSER_BUILD_VERSION=6627725 TARGETARCH=amd64 PYVER=3.12 PYVER_MAJMIN=312 L4T=0 /bin/sh -c pip install --no-cache-dir pip 'setuptools<71' && pip install --no-cache-dir cmake # buildkit
RUN |7 NVIDIA_PYTORCH_VERSION=25.01 PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae NVFUSER_BUILD_VERSION=6627725 TARGETARCH=amd64 PYVER=3.12 PYVER_MAJMIN=312 L4T=0 /bin/sh -c curl "https://gitlab-master.nvidia.com/api/v4/projects/105799/packages/generic/OpenBLAS/0.3.24-$(uname -m)/OpenBLAS-0.3.24-$(uname -m).tar.gz" --output OpenBLAS.tar.gz && tar -xf OpenBLAS.tar.gz -C /usr/local/ && rm OpenBLAS.tar.gz # buildkit
RUN |7 NVIDIA_PYTORCH_VERSION=25.01 PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae NVFUSER_BUILD_VERSION=6627725 TARGETARCH=amd64 PYVER=3.12 PYVER_MAJMIN=312 L4T=0 /bin/sh -c if [ $TARGETARCH = "arm64" ]; then cd /opt && curl "https://gitlab-master.nvidia.com/api/v4/projects/105799/packages/generic/nvpl_slim_24.04/sbsa/nvpl_slim_24.04.tar" --output nvpl_slim_24.04.tar && tar -xf nvpl_slim_24.04.tar && cp -r nvpl_slim_24.04/lib/* /usr/local/lib && cp -r nvpl_slim_24.04/include/* /usr/local/include && rm -rf nvpl_slim_24.04.tar nvpl_slim_24.04 ; fi # buildkit
ENV NVPL_LAPACK_MATH_MODE=PEDANTIC
WORKDIR /opt/pytorch
COPY . . # buildkit
ENV PYTHONIOENCODING=utf-8
ENV LC_ALL=C.UTF-8
ENV PIP_DEFAULT_TIMEOUT=100
RUN |7 NVIDIA_PYTORCH_VERSION=25.01 PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae NVFUSER_BUILD_VERSION=6627725 TARGETARCH=amd64 PYVER=3.12 PYVER_MAJMIN=312 L4T=0 /bin/sh -c pip install --no-cache-dir numpy==1.26.4 scipy==1.11.3 "PyYAML>=5.4.1" astunparse typing_extensions cffi spacy==3.7.5 mock tqdm librosa==0.10.1 expecttest==0.1.3 hypothesis==5.35.1 xdoctest==1.0.2 pytest==8.1.1 pytest-xdist pytest-rerunfailures pytest-shard pytest-flakefinder pybind11 Cython "regex>=2020.1.8" protobuf==4.24.4 && six==1.16.0 && if [[ $TARGETARCH = "amd64" ]] ; then pip install --no-cache-dir mkl==2021.1.1 mkl-include==2021.1.1 mkl-devel==2021.1.1 ; find /usr/local/lib -maxdepth 1 -type f -regex '.*\/lib\(tbb\|mkl\).*\.so\($\|\.[0-9]*\.[0-9]*\)' -exec rm -v {} + ; fi # buildkit
RUN |7 NVIDIA_PYTORCH_VERSION=25.01 PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae NVFUSER_BUILD_VERSION=6627725 TARGETARCH=amd64 PYVER=3.12 PYVER_MAJMIN=312 L4T=0 /bin/sh -c git config --global url."https://github".insteadOf git://github && pip install --no-cache-dir 'jupyterlab>=4.1.0,<5.0.0a0' notebook tensorboard==2.16.2 jupyterlab_code_formatter python-hostlist # buildkit
RUN |7 NVIDIA_PYTORCH_VERSION=25.01 PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae NVFUSER_BUILD_VERSION=6627725 TARGETARCH=amd64 PYVER=3.12 PYVER_MAJMIN=312 L4T=0 /bin/sh -c PATCHED_FILE=$(python -c "from tensorboard.plugins.core import core_plugin as _; print(_.__file__)") && sed -i 's/^\( *"--bind_all",\)$/\1 default=True,/' "$PATCHED_FILE" && test $(grep '^ *"--bind_all", default=True,$' "$PATCHED_FILE" | wc -l) -eq 1 # buildkit
WORKDIR /opt/pytorch
RUN |7 NVIDIA_PYTORCH_VERSION=25.01 PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae NVFUSER_BUILD_VERSION=6627725 TARGETARCH=amd64 PYVER=3.12 PYVER_MAJMIN=312 L4T=0 /bin/sh -c pip install --no-cache-dir /builder/*.whl jupytext black isort && mkdir -p /root/.jupyter/lab/user-settings/@jupyterlab/completer-extension/ && jupyter lab clean # buildkit
COPY jupyter_config/jupyter_notebook_config.py /usr/local/etc/jupyter/ # buildkit
COPY jupyter_config/manager.jupyterlab-settings /root/.jupyter/lab/user-settings/@jupyterlab/completer-extension/ # buildkit
COPY jupyter_config/settings.jupyterlab-settings /root/.jupyter/lab/user-settings/@jupyterlab/completer-extension/ # buildkit
ENV JUPYTER_PORT=8888
ENV TENSORBOARD_PORT=6006
EXPOSE map[8888/tcp:{}]
EXPOSE map[6006/tcp:{}]
RUN |7 NVIDIA_PYTORCH_VERSION=25.01 PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae NVFUSER_BUILD_VERSION=6627725 TARGETARCH=amd64 PYVER=3.12 PYVER_MAJMIN=312 L4T=0 /bin/sh -c OPENCV_VERSION=4.10.0 && cd / && wget -q -O - https://github.com/opencv/opencv/archive/${OPENCV_VERSION}.tar.gz | tar -xzf - && cd /opencv-${OPENCV_VERSION} && cmake -GNinja -Bbuild -H. -DWITH_CUDA=OFF -DWITH_1394=OFF -DPYTHON3_PACKAGES_PATH="/usr/local/lib/python${PYVER}/dist-packages" -DBUILD_opencv_cudalegacy=OFF -DBUILD_opencv_stitching=OFF -DWITH_IPP=OFF -DWITH_PROTOBUF=OFF && cmake --build build --target install && cd modules/python/package && pip install --no-cache-dir --disable-pip-version-check -v . && rm -rf /opencv-${OPENCV_VERSION} # buildkit
ENV UCC_CL_BASIC_TLS=^sharp
ENV TORCH_CUDA_ARCH_LIST=7.5 8.0 8.6 9.0 10.0 12.0+PTX
ENV PYTORCH_HOME=/opt/pytorch/pytorch
ENV CUDA_HOME=/usr/local/cuda
ENV TORCH_ALLOW_TF32_CUBLAS_OVERRIDE=1
RUN |7 NVIDIA_PYTORCH_VERSION=25.01 PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae NVFUSER_BUILD_VERSION=6627725 TARGETARCH=amd64 PYVER=3.12 PYVER_MAJMIN=312 L4T=0 /bin/sh -c pip install /opt/transfer/torch*.whl && patchelf --set-rpath '/usr/local/lib' /usr/local/lib/python${PYVER}/dist-packages/torch/lib/libtorch_global_deps.so # buildkit
COPY /usr/local/share/cmake/TorchVision/ /usr/local/share/cmake/TorchVision/ # buildkit
COPY /usr/local/include/torchvision/ /usr/local/include/torchvision/ # buildkit
COPY /usr/local/lib64/libtorchvision.so /usr/local/lib/libtorchvision.so.1.0 # buildkit
COPY /usr/local/lib64/libjpeg* /usr/local/lib/ # buildkit
RUN |7 NVIDIA_PYTORCH_VERSION=25.01 PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae NVFUSER_BUILD_VERSION=6627725 TARGETARCH=amd64 PYVER=3.12 PYVER_MAJMIN=312 L4T=0 /bin/sh -c patchelf --set-rpath '$ORIGIN:/usr/local/lib/python3.12/dist-packages/torch/lib:/usr/local/lib/python3.12/dist-packages/torchvision/' /usr/local/lib/libtorchvision.so.1.0 && patchelf --set-soname libtorchvision.so.1 --output /usr/local/lib/libtorchvision.so.1.0 /usr/local/lib/libtorchvision.so.1.0 && ldconfig && pushd /usr/local/lib && ln -s libtorchvision.so.1 /usr/local/lib/libtorchvision.so && popd && patchelf --set-soname libjpeg.so.62 --output /usr/local/lib/libjpeg.so.62 $(readlink -f $(ldd /usr/local/lib/python3.12/dist-packages/torchvision/image.so | grep libjpeg | awk '{print $3}')) # buildkit
RUN |7 NVIDIA_PYTORCH_VERSION=25.01 PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae NVFUSER_BUILD_VERSION=6627725 TARGETARCH=amd64 PYVER=3.12 PYVER_MAJMIN=312 L4T=0 /bin/sh -c cd pytorch && pip install --no-cache-dir -v -r /opt/pytorch/pytorch/requirements.txt # buildkit
RUN |7 NVIDIA_PYTORCH_VERSION=25.01 PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae NVFUSER_BUILD_VERSION=6627725 TARGETARCH=amd64 PYVER=3.12 PYVER_MAJMIN=312 L4T=0 /bin/sh -c pip install --no-cache-dir /tmp/dist/*.whl && sed -i "s/return 80 + minor - 1/return 86/g" /usr/local/lib/python${PYVER}/dist-packages/triton/backends/nvidia/compiler.py # buildkit
ARG DALI_EXTRA_INDEX_URL=http://sqrl/nvdl/datasets/dali/pip-dali
RUN |8 NVIDIA_PYTORCH_VERSION=25.01 PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae NVFUSER_BUILD_VERSION=6627725 TARGETARCH=amd64 PYVER=3.12 PYVER_MAJMIN=312 L4T=0 DALI_EXTRA_INDEX_URL=http://sqrl/nvdl/datasets/dali/pip-dali /bin/sh -c if [ -z "${DALI_VERSION}" ] ; then echo "Not Installing DALI for L4T Build." ; else export CUDA_VERSION_MAJOR=$(ls /usr/local/cuda/lib64/libcudart.so.*.*.* | cut -d . -f 3) && export DALI_PKG_SUFFIX="cuda${CUDA_VERSION_MAJOR}0" && if [ -z "${DALI_URL_SUFFIX}" ] ; then export DALI_EXTRA_INDEX_URL="${DALI_EXTRA_INDEX_URL}-qa"; fi && pip install --disable-pip-version-check --no-cache-dir --extra-index-url https://developer.download.nvidia.com/compute/redist --extra-index-url "${DALI_EXTRA_INDEX_URL}" --trusted-host sqrl nvidia-dali-${DALI_PKG_SUFFIX}==${DALI_VERSION}; fi # buildkit
ENV COCOAPI_VERSION=2.0+nv0.8.1
RUN |8 NVIDIA_PYTORCH_VERSION=25.01 PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae NVFUSER_BUILD_VERSION=6627725 TARGETARCH=amd64 PYVER=3.12 PYVER_MAJMIN=312 L4T=0 DALI_EXTRA_INDEX_URL=http://sqrl/nvdl/datasets/dali/pip-dali /bin/sh -c export COCOAPI_TAG=$(echo ${COCOAPI_VERSION} | sed 's/^.*+n//') && pip install --disable-pip-version-check --no-cache-dir git+https://github.com/nvidia/cocoapi.git@${COCOAPI_TAG}#subdirectory=PythonAPI # buildkit
COPY singularity/ /.singularity.d/ # buildkit
RUN |8 NVIDIA_PYTORCH_VERSION=25.01 PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae NVFUSER_BUILD_VERSION=6627725 TARGETARCH=amd64 PYVER=3.12 PYVER_MAJMIN=312 L4T=0 DALI_EXTRA_INDEX_URL=http://sqrl/nvdl/datasets/dali/pip-dali /bin/sh -c ( cd fuser && pip install -r requirements.txt && python setup.py -version-tag=a0+${NVFUSER_VERSION} install && python setup.py clean && cp $(find /usr/local/lib/python${PYVER}/dist-packages/ -name libnvfuser_codegen.so) /usr/local/lib/python${PYVER}/dist-packages/torch/lib/ ) && ( cd lightning-thunder && python setup.py install && rm -rf build *.egg-info) && ( cd lightning-thunder && mkdir tmp && cd tmp && git clone -b v${CUDNN_FRONTEND_VERSION} --recursive --single-branch https://github.com/NVIDIA/cudnn-frontend.git cudnn_frontend && cd cudnn_frontend && pip install --no-build-isolation --no-cache-dir --disable-pip-version-check . && cd ../../ && rm -rf tmp ) && ( cd pytorch/third_party/onnx && pip uninstall typing -y && CMAKE_ARGS="-DONNX_USE_PROTOBUF_SHARED_LIBS=ON" pip install --no-build-isolation --no-cache-dir --disable-pip-version-check . ) # buildkit
RUN |8 NVIDIA_PYTORCH_VERSION=25.01 PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae NVFUSER_BUILD_VERSION=6627725 TARGETARCH=amd64 PYVER=3.12 PYVER_MAJMIN=312 L4T=0 DALI_EXTRA_INDEX_URL=http://sqrl/nvdl/datasets/dali/pip-dali /bin/sh -c pip install --no-cache-dir --disable-pip-version-check tabulate # buildkit
RUN |8 NVIDIA_PYTORCH_VERSION=25.01 PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae NVFUSER_BUILD_VERSION=6627725 TARGETARCH=amd64 PYVER=3.12 PYVER_MAJMIN=312 L4T=0 DALI_EXTRA_INDEX_URL=http://sqrl/nvdl/datasets/dali/pip-dali /bin/sh -c if [ "${L4T}" = "1" ]; then echo "Not installing rapids for L4T build." ; else find /rapids -name "*-Linux.tar.gz" -exec tar -C /usr --exclude="*.a" --exclude="bin/xgboost" --strip-components=1 -xvf {} \; && find /rapids -name "*.whl" ! -name "tornado-*" ! -name "Pillow-*" ! -name "certifi-*" ! -name "protobuf-*" -exec pip install --no-cache-dir {} + ; pip install numpy==1.26.4; fi # buildkit
WORKDIR /workspace
COPY NVREADME.md README.md # buildkit
COPY docker-examples docker-examples # buildkit
COPY tutorials tutorials # buildkit
RUN |8 NVIDIA_PYTORCH_VERSION=25.01 PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae NVFUSER_BUILD_VERSION=6627725 TARGETARCH=amd64 PYVER=3.12 PYVER_MAJMIN=312 L4T=0 DALI_EXTRA_INDEX_URL=http://sqrl/nvdl/datasets/dali/pip-dali /bin/sh -c chmod -R a+w . # buildkit
RUN |8 NVIDIA_PYTORCH_VERSION=25.01 PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae NVFUSER_BUILD_VERSION=6627725 TARGETARCH=amd64 PYVER=3.12 PYVER_MAJMIN=312 L4T=0 DALI_EXTRA_INDEX_URL=http://sqrl/nvdl/datasets/dali/pip-dali /bin/sh -c set -x && WHEELS=1 /nvidia/build-scripts/installTRT.sh # buildkit
ENV PATH=/usr/local/mpi/bin:/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/ucx/bin:/opt/amazon/efa/bin:/opt/tensorrt/bin
RUN |8 NVIDIA_PYTORCH_VERSION=25.01 PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae NVFUSER_BUILD_VERSION=6627725 TARGETARCH=amd64 PYVER=3.12 PYVER_MAJMIN=312 L4T=0 DALI_EXTRA_INDEX_URL=http://sqrl/nvdl/datasets/dali/pip-dali /bin/sh -c pip --version && python -c 'import sys; print(sys.platform)' && pip install --no-cache-dir nvidia-pyindex && pip install --extra-index-url https://urm.nvidia.com/artifactory/api/pypi/sw-tensorrt-pypi/simple --no-cache-dir "polygraphy==${POLYGRAPHY_VERSION}" && pip install --extra-index-url https://pypi.nvidia.com "nvidia-modelopt[torch]==${MODEL_OPT_VERSION}" # buildkit
COPY torch_tensorrt/ /opt/pytorch/torch_tensorrt/ # buildkit
ARG PYVER=3.12
RUN |8 NVIDIA_PYTORCH_VERSION=25.01 PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae NVFUSER_BUILD_VERSION=6627725 TARGETARCH=amd64 PYVER=3.12 PYVER_MAJMIN=312 L4T=0 DALI_EXTRA_INDEX_URL=http://sqrl/nvdl/datasets/dali/pip-dali /bin/sh -c pip install --no-cache-dir /opt/pytorch/apex/dist/*.whl # buildkit
RUN |8 NVIDIA_PYTORCH_VERSION=25.01 PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae NVFUSER_BUILD_VERSION=6627725 TARGETARCH=amd64 PYVER=3.12 PYVER_MAJMIN=312 L4T=0 DALI_EXTRA_INDEX_URL=http://sqrl/nvdl/datasets/dali/pip-dali /bin/sh -c pip install --no-cache-dir /opt/pytorch/torch_tensorrt/dist/*.whl # buildkit
ENV LD_LIBRARY_PATH=/usr/local/lib/python3.12/dist-packages/torch/lib:/usr/local/lib/python3.12/dist-packages/torch_tensorrt/lib:/usr/local/cuda/compat/lib:/usr/local/nvidia/lib:/usr/local/nvidia/lib64
ENV PATH=/usr/local/lib/python3.12/dist-packages/torch_tensorrt/bin:/usr/local/mpi/bin:/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/ucx/bin:/opt/amazon/efa/bin:/opt/tensorrt/bin
RUN |8 NVIDIA_PYTORCH_VERSION=25.01 PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae NVFUSER_BUILD_VERSION=6627725 TARGETARCH=amd64 PYVER=3.12 PYVER_MAJMIN=312 L4T=0 DALI_EXTRA_INDEX_URL=http://sqrl/nvdl/datasets/dali/pip-dali /bin/sh -c if [ "${L4T}" = "1" ]; then echo "Not installing Transformer Engine in iGPU container until Version variable is set"; else CI_JOB_TOKEN=$(cat /run/secrets/CI_JOB_TOKEN) && echo $CI_JOB_TOKEN && git clone -b release_v${TRANSFORMER_ENGINE_VERSION} --single-branch --recursive https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab-master.nvidia.com/dl/transformerengine/transformerengine.git && env MAX_JOBS=6 pip install --no-cache-dir --no-build-isolation ./transformerengine/3rdparty/flashattn_internal && env NVTE_CUDA_ARCHS="70;80;89;90;100;120" NVTE_BUILD_THREADS_PER_JOB=8 pip install --no-cache-dir --no-build-isolation ./transformerengine && rm -rf transformerengine ; fi # buildkit
ENV TORCH_CUDNN_V8_API_ENABLED=1
ENV CUDA_MODULE_LOADING=LAZY
ENV TORCH_NCCL_USE_COMM_NONBLOCKING=0
RUN |8 NVIDIA_PYTORCH_VERSION=25.01 PYTORCH_BUILD_VERSION=2.6.0a0+ecf3bae NVFUSER_BUILD_VERSION=6627725 TARGETARCH=amd64 PYVER=3.12 PYVER_MAJMIN=312 L4T=0 DALI_EXTRA_INDEX_URL=http://sqrl/nvdl/datasets/dali/pip-dali /bin/sh -c ln -sf ${_CUDA_COMPAT_PATH}/lib.real ${_CUDA_COMPAT_PATH}/lib && echo ${_CUDA_COMPAT_PATH}/lib > /etc/ld.so.conf.d/00-cuda-compat.conf && ldconfig && rm -f ${_CUDA_COMPAT_PATH}/lib # buildkit
COPY entrypoint.d/ /opt/nvidia/entrypoint.d/ # buildkit
ARG NVIDIA_BUILD_ID=134983853
ENV NVIDIA_BUILD_ID=134983853
LABEL com.nvidia.build.id=134983853
ARG NVIDIA_BUILD_REF=3857cfd7a27acea1d4e19243329fcd1dae3b222e
LABEL com.nvidia.build.ref=3857cfd7a27acea1d4e19243329fcd1dae3b222e
WORKDIR /app
/bin/sh -c apt-get update -qq 2>/dev/null || true && apt-get install -y -qq --no-install-recommends ffmpeg tini pciutils clinfo 2>/dev/null || true && rm -rf /var/lib/apt/lists/* 2>/dev/null || true
COPY file:40f49d8278098e1b46f59fd86cf5a19b3f88ed970370bcd6a4f53109e18b1c7e in .
/bin/sh -c pip install --no-cache-dir -r requirements.txt && pip install --no-cache-dir "numpy<2.0" "torch>=2.6.0" ultralytics timm sentencepiece transformers mediapipe opencv-python-headless
COPY dir:c752c1bd103ec1b1c76db9d8eb24d58172e8b70fccdf5730efc73fd80d2414c2 in /app/
/bin/sh -c cp -r /app/monitoring_worker/ ./monitoring_worker/ 2>/dev/null || true
/bin/sh -c python -m compileall -q /app || true
ENV PYTHONUNBUFFERED=1 PYTHONDONTWRITEBYTECODE=0 PYTHONOPTIMIZE=1
ENV WORKER_ID=
ENV LOG_LEVEL=INFO
HEALTHCHECK &{["CMD-SHELL" "python -c \"import psycopg; psycopg.connect('host=postgres dbname=basilisk user=basilisk password=basilisk_secure_password').close()\" || exit 1"] "30s" "10s" "10s" "0s" '\x03'}
ENTRYPOINT ["/usr/bin/tini" "--"]
CMD ["python" "-m" "monitoring_worker.monitoring_worker"]
LABEL com.docker.compose.image.builder=classic

Labels

Key Value
com.docker.compose.image.builder classic
com.nvidia.build.id 134983853
com.nvidia.build.ref 3857cfd7a27acea1d4e19243329fcd1dae3b222e
com.nvidia.cublas.version 12.8.3.14
com.nvidia.cuda.version 9.0
com.nvidia.cudnn.version 9.7.0.66
com.nvidia.cufft.version 11.3.3.41
com.nvidia.curand.version 10.3.9.55
com.nvidia.cusolver.version 11.7.2.55
com.nvidia.cusparse.version 12.5.7.53
com.nvidia.cusparselt.version 0.6.3.2
com.nvidia.cutensor.version 2.1.0.9
com.nvidia.nccl.version 2.25.1
com.nvidia.npp.version 12.3.3.65
com.nvidia.nsightcompute.version 2025.1.0.14
com.nvidia.nsightsystems.version 2024.6.2.225
com.nvidia.nvjpeg.version 12.3.5.57
com.nvidia.pytorch.version 2.6.0a0+ecf3bae
com.nvidia.tensorrt.version 10.8.0.40
com.nvidia.tensorrtoss.version
com.nvidia.volumes.needed nvidia_driver
org.opencontainers.image.ref.name ubuntu
org.opencontainers.image.version 24.04
Details
Container
2025-10-23 19:08:24 -07:00
0
OCI / Docker
linux/amd64
18 GiB
Versions (4) View all
v2025.1025.12385d58 2025-10-25
v2025.1024.b2717bd7 2025-10-24
v2025.1023.56aa3288 2025-10-23
latest 2025-09-09