basilisk-monitoring-worker (sha256:8f65e6de3f746506795bf87ef3681174074bd265685da5adec05dfbea905ddaa)

Published 2025-09-09 09:16:39 -07:00 by CanuteTheGreat

Installation

docker pull git.canutethegreat.com/canutethegreat/basilisk-monitoring-worker@sha256:8f65e6de3f746506795bf87ef3681174074bd265685da5adec05dfbea905ddaa
sha256:8f65e6de3f746506795bf87ef3681174074bd265685da5adec05dfbea905ddaa

Image layers

ARG RELEASE
ARG LAUNCHPAD_BUILD_ARCH
LABEL org.opencontainers.image.ref.name=ubuntu
LABEL org.opencontainers.image.version=22.04
ADD file:ebe009f86035c175ba244badd298a2582914415cf62783d510eab3a311a5d4e1 in /
CMD ["/bin/bash"]
RUN /bin/sh -c export DEBIAN_FRONTEND=noninteractive && apt-get update && apt-get install -y --no-install-recommends apt-utils build-essential ca-certificates curl libncurses5 libncursesw5 patch wget rsync unzip jq gnupg libtcmalloc-minimal4 && rm -rf /var/lib/apt/lists/* && echo "hsts=0" > /root/.wgetrc # buildkit
ARG CUDA_VERSION=12.6.2.004
ARG CUDA_DRIVER_VERSION=560.35.03
ARG JETPACK_HOST_MOUNTS=
ENV CUDA_VERSION=12.6.2.004 CUDA_DRIVER_VERSION=560.35.03 CUDA_CACHE_DISABLE=1 NVIDIA_REQUIRE_JETPACK_HOST_MOUNTS=
RUN |3 CUDA_VERSION=12.6.2.004 CUDA_DRIVER_VERSION=560.35.03 JETPACK_HOST_MOUNTS= /bin/sh -c if [ -n "${JETPACK_HOST_MOUNTS}" ]; then echo "/usr/lib/aarch64-linux-gnu/tegra" > /etc/ld.so.conf.d/nvidia-tegra.conf && echo "/usr/lib/aarch64-linux-gnu/tegra-egl" >> /etc/ld.so.conf.d/nvidia-tegra.conf; fi # buildkit
RUN |3 CUDA_VERSION=12.6.2.004 CUDA_DRIVER_VERSION=560.35.03 JETPACK_HOST_MOUNTS= /bin/sh -c /nvidia/build-scripts/installCUDA.sh # buildkit
RUN |3 CUDA_VERSION=12.6.2.004 CUDA_DRIVER_VERSION=560.35.03 JETPACK_HOST_MOUNTS= /bin/sh -c cp -vprd /nvidia/. / && patch -p0 < /etc/startup_scripts.patch && rm -f /etc/startup_scripts.patch # buildkit
ENV _CUDA_COMPAT_PATH=/usr/local/cuda/compat ENV=/etc/shinit_v2 BASH_ENV=/etc/bash.bashrc SHELL=/bin/bash NVIDIA_REQUIRE_CUDA=cuda>=9.0
LABEL com.nvidia.volumes.needed=nvidia_driver com.nvidia.cuda.version=9.0
ARG NCCL_VERSION=2.22.3
ARG CUBLAS_VERSION=12.6.3.3
ARG CUFFT_VERSION=11.3.0.4
ARG CURAND_VERSION=10.3.7.77
ARG CUSPARSE_VERSION=12.5.4.2
ARG CUSOLVER_VERSION=11.7.1.2
ARG CUTENSOR_VERSION=2.0.2.5
ARG NPP_VERSION=12.3.1.54
ARG NVJPEG_VERSION=12.3.3.54
ARG CUDNN_VERSION=9.5.0.50
ARG CUDNN_FRONTEND_VERSION=1.7.0
ARG TRT_VERSION=10.5.0.18
ARG TRTOSS_VERSION=
ARG NSIGHT_SYSTEMS_VERSION=2024.6.1.90
ARG NSIGHT_COMPUTE_VERSION=2024.3.2.3
ARG CUSPARSELT_VERSION=0.6.2.3
ENV NCCL_VERSION=2.22.3 CUBLAS_VERSION=12.6.3.3 CUFFT_VERSION=11.3.0.4 CURAND_VERSION=10.3.7.77 CUSPARSE_VERSION=12.5.4.2 CUSPARSELT_VERSION=0.6.2.3 CUSOLVER_VERSION=11.7.1.2 CUTENSOR_VERSION=2.0.2.5 NPP_VERSION=12.3.1.54 NVJPEG_VERSION=12.3.3.54 CUDNN_VERSION=9.5.0.50 CUDNN_FRONTEND_VERSION=1.7.0 TRT_VERSION=10.5.0.18 TRTOSS_VERSION= NSIGHT_SYSTEMS_VERSION=2024.6.1.90 NSIGHT_COMPUTE_VERSION=2024.3.2.3
RUN |19 CUDA_VERSION=12.6.2.004 CUDA_DRIVER_VERSION=560.35.03 JETPACK_HOST_MOUNTS= NCCL_VERSION=2.22.3 CUBLAS_VERSION=12.6.3.3 CUFFT_VERSION=11.3.0.4 CURAND_VERSION=10.3.7.77 CUSPARSE_VERSION=12.5.4.2 CUSOLVER_VERSION=11.7.1.2 CUTENSOR_VERSION=2.0.2.5 NPP_VERSION=12.3.1.54 NVJPEG_VERSION=12.3.3.54 CUDNN_VERSION=9.5.0.50 CUDNN_FRONTEND_VERSION=1.7.0 TRT_VERSION=10.5.0.18 TRTOSS_VERSION= NSIGHT_SYSTEMS_VERSION=2024.6.1.90 NSIGHT_COMPUTE_VERSION=2024.3.2.3 CUSPARSELT_VERSION=0.6.2.3 /bin/sh -c /nvidia/build-scripts/installLIBS.sh && /nvidia/build-scripts/installCUDNN.sh && /nvidia/build-scripts/installTRT.sh && /nvidia/build-scripts/installNSYS.sh && /nvidia/build-scripts/installNCU.sh && /nvidia/build-scripts/installCUTENSOR.sh && /nvidia/build-scripts/installCUSPARSELT.sh && if [ -z "${JETPACK_HOST_MOUNTS}" ]; then /nvidia/build-scripts/installNCCL.sh; fi; # buildkit
LABEL com.nvidia.nccl.version=2.22.3 com.nvidia.cublas.version=12.6.3.3 com.nvidia.cufft.version=11.3.0.4 com.nvidia.curand.version=10.3.7.77 com.nvidia.cusparse.version=12.5.4.2 com.nvidia.cusparselt.version=0.6.2.3 com.nvidia.cusolver.version=11.7.1.2 com.nvidia.cutensor.version=2.0.2.5 com.nvidia.npp.version=12.3.1.54 com.nvidia.nvjpeg.version=12.3.3.54 com.nvidia.cudnn.version=9.5.0.50 com.nvidia.tensorrt.version=10.5.0.18 com.nvidia.tensorrtoss.version= com.nvidia.nsightsystems.version=2024.6.1.90 com.nvidia.nsightcompute.version=2024.3.2.3
ARG DALI_VERSION=1.42.0
ARG DALI_BUILD=18507157
ARG POLYGRAPHY_VERSION=0.49.13
ARG TRANSFORMER_ENGINE_VERSION=1.11
ARG MODEL_OPT_VERSION=0.17.0
ENV DALI_VERSION=1.42.0 DALI_BUILD=18507157 POLYGRAPHY_VERSION=0.49.13 TRANSFORMER_ENGINE_VERSION=1.11 MODEL_OPT_VERSION=0.17.0
ADD docs.tgz / # buildkit
RUN |24 CUDA_VERSION=12.6.2.004 CUDA_DRIVER_VERSION=560.35.03 JETPACK_HOST_MOUNTS= NCCL_VERSION=2.22.3 CUBLAS_VERSION=12.6.3.3 CUFFT_VERSION=11.3.0.4 CURAND_VERSION=10.3.7.77 CUSPARSE_VERSION=12.5.4.2 CUSOLVER_VERSION=11.7.1.2 CUTENSOR_VERSION=2.0.2.5 NPP_VERSION=12.3.1.54 NVJPEG_VERSION=12.3.3.54 CUDNN_VERSION=9.5.0.50 CUDNN_FRONTEND_VERSION=1.7.0 TRT_VERSION=10.5.0.18 TRTOSS_VERSION= NSIGHT_SYSTEMS_VERSION=2024.6.1.90 NSIGHT_COMPUTE_VERSION=2024.3.2.3 CUSPARSELT_VERSION=0.6.2.3 DALI_VERSION=1.42.0 DALI_BUILD=18507157 POLYGRAPHY_VERSION=0.49.13 TRANSFORMER_ENGINE_VERSION=1.11 MODEL_OPT_VERSION=0.17.0 /bin/sh -c echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf && echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf # buildkit
ARG _LIBPATH_SUFFIX=
ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin LD_LIBRARY_PATH=/usr/local/cuda/compat/lib:/usr/local/nvidia/lib:/usr/local/nvidia/lib64 NVIDIA_VISIBLE_DEVICES=all NVIDIA_DRIVER_CAPABILITIES=compute,utility,video
COPY entrypoint/ /opt/nvidia/ # buildkit
ENV NVIDIA_PRODUCT_NAME=CUDA
ENTRYPOINT ["/opt/nvidia/nvidia_entrypoint.sh"]
COPY NVIDIA_Deep_Learning_Container_License.pdf /workspace/ # buildkit
RUN /bin/sh -c export DEBIAN_FRONTEND=noninteractive && apt-get update && apt-get install -y --no-install-recommends build-essential git libglib2.0-0 less libnl-route-3-200 libnl-3-dev libnl-route-3-dev libnuma-dev libnuma1 libpmi2-0-dev nano numactl openssh-client vim wget && rm -rf /var/lib/apt/lists/* # buildkit
ARG GDRCOPY_VERSION=2.3.1-1
ARG HPCX_VERSION=2.20
ARG RDMACORE_VERSION=39.0
ARG MOFED_VERSION=5.4-rdmacore39.0
ARG OPENUCX_VERSION=1.17.0
ARG OPENMPI_VERSION=4.1.7
ENV GDRCOPY_VERSION=2.3.1-1 HPCX_VERSION=2.20 MOFED_VERSION=5.4-rdmacore39.0 OPENUCX_VERSION=1.17.0 OPENMPI_VERSION=4.1.7 RDMACORE_VERSION=39.0
ARG TARGETARCH=amd64
RUN |7 GDRCOPY_VERSION=2.3.1-1 HPCX_VERSION=2.20 RDMACORE_VERSION=39.0 MOFED_VERSION=5.4-rdmacore39.0 OPENUCX_VERSION=1.17.0 OPENMPI_VERSION=4.1.7 TARGETARCH=amd64 /bin/sh -c cd /nvidia && ( export DEBIAN_FRONTEND=noninteractive && apt-get update && apt-get install -y --no-install-recommends libibverbs1 libibverbs-dev librdmacm1 librdmacm-dev libibumad3 libibumad-dev ibverbs-utils ibverbs-providers && rm -rf /var/lib/apt/lists/* && rm $(dpkg-query -L libibverbs-dev librdmacm-dev libibumad-dev | grep "\(\.so\|\.a\)$") ) && ( cd opt/gdrcopy/ && dpkg -i libgdrapi_*.deb ) && ( cp -r opt/hpcx /opt/ && cp etc/ld.so.conf.d/hpcx.conf /etc/ld.so.conf.d/ && ln -sf /opt/hpcx/ompi /usr/local/mpi && ln -sf /opt/hpcx/ucx /usr/local/ucx && sed -i 's/^\(hwloc_base_binding_policy\) = core$/\1 = none/' /opt/hpcx/ompi/etc/openmpi-mca-params.conf && sed -i 's/^\(btl = self\)$/#\1/' /opt/hpcx/ompi/etc/openmpi-mca-params.conf ) && ldconfig # buildkit
ENV OPAL_PREFIX=/opt/hpcx/ompi PATH=/usr/local/mpi/bin:/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/ucx/bin
ENV OMPI_MCA_coll_hcoll_enable=0
COPY cuda-*.patch /tmp # buildkit
RUN |7 GDRCOPY_VERSION=2.3.1-1 HPCX_VERSION=2.20 RDMACORE_VERSION=39.0 MOFED_VERSION=5.4-rdmacore39.0 OPENUCX_VERSION=1.17.0 OPENMPI_VERSION=4.1.7 TARGETARCH=amd64 /bin/sh -c export DEVEL=1 BASE=0 && /nvidia/build-scripts/installNCU.sh && /nvidia/build-scripts/installCUDA.sh && /nvidia/build-scripts/installLIBS.sh && if [ ! -f /etc/ld.so.conf.d/nvidia-tegra.conf ]; then /nvidia/build-scripts/installNCCL.sh; fi && /nvidia/build-scripts/installCUDNN.sh && /nvidia/build-scripts/installCUTENSOR.sh && /nvidia/build-scripts/installTRT.sh && /nvidia/build-scripts/installNSYS.sh && /nvidia/build-scripts/installCUSPARSELT.sh && if [ -f "/tmp/cuda-${_CUDA_VERSION_MAJMIN}.patch" ]; then patch -p0 < /tmp/cuda-${_CUDA_VERSION_MAJMIN}.patch; fi && rm -f /tmp/cuda-*.patch # buildkit
ENV LIBRARY_PATH=/usr/local/cuda/lib64/stubs:
ARG NVIDIA_BUILD_REF=bb0f0608792391d35d9686e6d86a7cb319bddadc
ARG NVIDIA_BUILD_ID=114391310
ENV NVIDIA_BUILD_ID=114391310
LABEL com.nvidia.build.id=114391310
LABEL com.nvidia.build.ref=bb0f0608792391d35d9686e6d86a7cb319bddadc
ENV NVIDIA_PRODUCT_NAME=PyTorch
ARG NVIDIA_PYTORCH_VERSION=24.10
ARG PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0
ARG NVFUSER_BUILD_VERSION=f669fcf
ENV PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 PYTORCH_VERSION=2.5.0a0+e000cf0 PYTORCH_BUILD_NUMBER=0 NVIDIA_PYTORCH_VERSION=24.10
ENV NVFUSER_BUILD_VERSION=f669fcf NVFUSER_VERSION=f669fcf
LABEL com.nvidia.pytorch.version=2.5.0a0+e000cf0
ARG TARGETARCH=amd64
ARG PYVER=3.10
ARG L4T=0
RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c export PYSFX=`echo "$PYVER" | cut -c1-1` && export DEBIAN_FRONTEND=noninteractive && apt-get update && apt-get install -y --no-install-recommends python$PYVER-dev python$PYSFX python$PYSFX-dev python$PYSFX-distutils python-is-python$PYSFX autoconf automake libatlas-base-dev libgoogle-glog-dev libbz2-dev libc-ares2 libre2-9 libleveldb-dev liblmdb-dev libprotobuf-dev libsnappy-dev libtool nasm protobuf-compiler pkg-config unzip sox libsndfile1 libpng-dev libhdf5-103 libhdf5-dev gfortran rapidjson-dev ninja-build libedit-dev build-essential patchelf && rm -rf /var/lib/apt/lists/* # buildkit
ENV PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python
RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c curl -O https://bootstrap.pypa.io/get-pip.py && python get-pip.py && rm get-pip.py # buildkit
RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c pip install --no-cache-dir pip 'setuptools<71' && pip install --no-cache-dir cmake # buildkit
RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c curl "https://gitlab-master.nvidia.com/api/v4/projects/105799/packages/generic/OpenBLAS/0.3.24-$(uname -m)/OpenBLAS-0.3.24-$(uname -m).tar.gz" --output OpenBLAS.tar.gz && tar -xf OpenBLAS.tar.gz -C /usr/local/ && rm OpenBLAS.tar.gz # buildkit
RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c if [ $TARGETARCH = "arm64" ]; then cd /opt && curl "https://gitlab-master.nvidia.com/api/v4/projects/105799/packages/generic/nvpl_slim_24.04/sbsa/nvpl_slim_24.04.tar" --output nvpl_slim_24.04.tar && tar -xf nvpl_slim_24.04.tar && cp -r nvpl_slim_24.04/lib/* /usr/local/lib && cp -r nvpl_slim_24.04/include/* /usr/local/include && rm -rf nvpl_slim_24.04.tar nvpl_slim_24.04 ; fi # buildkit
ENV NVPL_LAPACK_MATH_MODE=PEDANTIC
WORKDIR /opt/pytorch
COPY . . # buildkit
ENV PYTHONIOENCODING=utf-8
ENV LC_ALL=C.UTF-8
ENV PIP_DEFAULT_TIMEOUT=100
RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c pip install --no-cache-dir numpy==1.24.4 scipy==1.11.3 "PyYAML>=5.4.1" astunparse typing_extensions cffi spacy==3.7.5 mock tqdm librosa==0.10.1 expecttest==0.1.3 hypothesis==5.35.1 xdoctest==1.0.2 pytest==8.1.1 pytest-xdist pytest-rerunfailures pytest-shard pytest-flakefinder pybind11 Cython "regex>=2020.1.8" protobuf==4.24.4 && if [[ $TARGETARCH = "amd64" ]] ; then pip install --no-cache-dir mkl==2021.1.1 mkl-include==2021.1.1 mkl-devel==2021.1.1 ; find /usr/local/lib -maxdepth 1 -type f -regex '.*\/lib\(tbb\|mkl\).*\.so\($\|\.[0-9]*\.[0-9]*\)' -exec rm -v {} + ; fi # buildkit
RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c git config --global url."https://github".insteadOf git://github && pip install --no-cache-dir 'jupyterlab>=4.1.0,<5.0.0a0' notebook tensorboard==2.16.2 jupyterlab_code_formatter python-hostlist # buildkit
RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c PATCHED_FILE=$(python -c "from tensorboard.plugins.core import core_plugin as _; print(_.__file__)") && sed -i 's/^\( *"--bind_all",\)$/\1 default=True,/' "$PATCHED_FILE" && test $(grep '^ *"--bind_all", default=True,$' "$PATCHED_FILE" | wc -l) -eq 1 # buildkit
RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c pip install --no-cache-dir jupyterlab-tensorboard-pro jupytext black isort && mkdir -p /root/.jupyter/lab/user-settings/@jupyterlab/completer-extension/ && jupyter lab clean # buildkit
COPY jupyter_config/jupyter_notebook_config.py /usr/local/etc/jupyter/ # buildkit
COPY jupyter_config/manager.jupyterlab-settings /root/.jupyter/lab/user-settings/@jupyterlab/completer-extension/ # buildkit
COPY jupyter_config/settings.jupyterlab-settings /root/.jupyter/lab/user-settings/@jupyterlab/completer-extension/ # buildkit
ENV JUPYTER_PORT=8888
ENV TENSORBOARD_PORT=6006
EXPOSE map[8888/tcp:{}]
EXPOSE map[6006/tcp:{}]
RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c OPENCV_VERSION=4.7.0 && cd / && wget -q -O - https://github.com/opencv/opencv/archive/${OPENCV_VERSION}.tar.gz | tar -xzf - && cd /opencv-${OPENCV_VERSION} && cmake -GNinja -Bbuild -H. -DWITH_CUDA=OFF -DWITH_1394=OFF -DPYTHON3_PACKAGES_PATH="/usr/local/lib/python${PYVER}/dist-packages" -DBUILD_opencv_cudalegacy=OFF -DBUILD_opencv_stitching=OFF -DWITH_IPP=OFF -DWITH_PROTOBUF=OFF && cmake --build build --target install && cd modules/python/package && pip install --no-cache-dir --disable-pip-version-check -v . && rm -rf /opencv-${OPENCV_VERSION} # buildkit
ENV UCC_CL_BASIC_TLS=^sharp
ENV TORCH_CUDA_ARCH_LIST=5.2 6.0 6.1 7.0 7.2 7.5 8.0 8.6 8.7 9.0+PTX
ENV PYTORCH_HOME=/opt/pytorch/pytorch
ENV CUDA_HOME=/usr/local/cuda
ENV TORCH_ALLOW_TF32_CUBLAS_OVERRIDE=1
ENV USE_EXPERIMENTAL_CUDNN_V8_API=1
RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c pip install /opt/transfer/torch*.whl && patchelf --set-rpath '/usr/local/lib' /usr/local/lib/python3.10/dist-packages/torch/lib/libtorch_global_deps.so # buildkit
RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c cd pytorch && pip install --no-cache-dir -v -r /opt/pytorch/pytorch/requirements.txt # buildkit
RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c pip install --no-cache-dir /tmp/dist/*.whl # buildkit
RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c if [ -z "${DALI_VERSION}" ] ; then echo "Not Installing DALI for L4T Build." ; else export DALI_PKG_SUFFIX="cuda${CUDA_VERSION%%.*}0" && pip install --disable-pip-version-check --no-cache-dir --extra-index-url https://developer.download.nvidia.com/compute/redist --extra-index-url http://sqrl/dldata/pip-dali${DALI_URL_SUFFIX:-} --trusted-host sqrl nvidia-dali-${DALI_PKG_SUFFIX}==${DALI_VERSION}; fi # buildkit
ENV COCOAPI_VERSION=2.0+nv0.8.0
RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c export COCOAPI_TAG=$(echo ${COCOAPI_VERSION} | sed 's/^.*+n//') && pip install --disable-pip-version-check --no-cache-dir git+https://github.com/nvidia/cocoapi.git@${COCOAPI_TAG}#subdirectory=PythonAPI # buildkit
COPY singularity/ /.singularity.d/ # buildkit
RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c ( cd vision && export PYTORCH_VERSION=$(python -c "import torch; print(torch.__version__)") && CFLAGS="-g0" FORCE_CUDA=1 NVCC_APPEND_FLAGS="--threads 8" pip install --no-cache-dir --no-build-isolation --disable-pip-version-check . ) && ( cd vision && cmake -Bbuild -H. -GNinja -DWITH_CUDA=1 -DCMAKE_PREFIX_PATH=`python -c 'import torch;print(torch.utils.cmake_prefix_path)'` && cmake --build build --target install && rm -rf build ) && ( cd fuser && pip install -r requirements.txt && python setup.py -version-tag=a0+${NVFUSER_VERSION} install && python setup.py clean && cp $(find /usr/local/lib/python3.10/dist-packages/ -name libnvfuser_codegen.so) /usr/local/lib/python3.10/dist-packages/torch/lib/ ) && ( cd lightning-thunder && python setup.py install && rm -rf build) && BUILD_OPTIONS="--cpp_ext --cuda_ext --bnp --xentropy --deprecated_fused_adam --deprecated_fused_lamb --fast_multihead_attn --distributed_lamb --fast_layer_norm --transducer --distributed_adam --fmha --permutation_search --focal_loss --fused_conv_bias_relu --index_mul_2d --cudnn_gbn --group_norm --gpu_direct_storage" && if [ "${L4T}" != "1" ]; then BUILD_OPTIONS="--fast_bottleneck --nccl_p2p --peer_memory --nccl_allocator ${BUILD_OPTIONS}"; fi && ( cd apex && CFLAGS="-g0" NVCC_APPEND_FLAGS="--threads 8" pip install -v --no-build-isolation --no-cache-dir --disable-pip-version-check --config-settings "--build-option=${BUILD_OPTIONS}" . && rm -rf build ) && ( cd lightning-thunder && mkdir tmp && cd tmp && git clone -b v${CUDNN_FRONTEND_VERSION} --recursive --single-branch https://github.com/NVIDIA/cudnn-frontend.git cudnn_frontend && cd cudnn_frontend && pip install --no-build-isolation --no-cache-dir --disable-pip-version-check . && cd ../../ && rm -rf tmp ) && ( cd pytorch/third_party/onnx && pip uninstall typing -y && CMAKE_ARGS="-DONNX_USE_PROTOBUF_SHARED_LIBS=ON" pip install --no-build-isolation --no-cache-dir --disable-pip-version-check . ) # buildkit
RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c pip install --no-cache-dir --disable-pip-version-check tabulate # buildkit
RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c if [ "${L4T}" = "1" ]; then echo "Not installing rapids for L4T build." ; else find /rapids -name "*-Linux.tar.gz" -exec tar -C /usr --exclude="*.a" --exclude="bin/xgboost" --strip-components=1 -xvf {} \; && find /rapids -name "*.whl" ! -name "tornado-*" ! -name "Pillow-*" ! -name "certifi-*" ! -name "protobuf-*" -exec pip install --no-cache-dir {} + ; pip install numpy==1.24.4; fi # buildkit
WORKDIR /workspace
COPY NVREADME.md README.md # buildkit
COPY docker-examples docker-examples # buildkit
COPY examples examples # buildkit
COPY tutorials tutorials # buildkit
RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c chmod -R a+w . # buildkit
RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c set -x && URL=$(VERIFY=1 /nvidia/build-scripts/installTRT.sh | sed -n "s/^.*\(http.*\)tar.*$/\1/p")tar && FILE=$(wget -O - $URL | sed -n 's/^.*href="\(TensorRT[^"]*\)".*$/\1/p' | egrep -v "internal|safety") && wget -q $URL/$FILE -O - | tar -xz && PY=$(python -c 'import sys; print(str(sys.version_info[0])+str(sys.version_info[1]))') && pip install TensorRT-*/python/tensorrt-*-cp$PY*.whl && mv /usr/src/tensorrt /opt && ln -s /opt/tensorrt /usr/src/tensorrt && rm -r TensorRT-* # buildkit
ENV PATH=/usr/local/mpi/bin:/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/ucx/bin:/opt/tensorrt/bin
RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c pip --version && python -c 'import sys; print(sys.platform)' && pip install --no-cache-dir nvidia-pyindex && pip install --extra-index-url https://urm.nvidia.com/artifactory/api/pypi/sw-tensorrt-pypi/simple --no-cache-dir polygraphy==0.49.12 && pip install --extra-index-url https://pypi.nvidia.com "nvidia-modelopt[torch]==${MODEL_OPT_VERSION}" # buildkit
COPY torch_tensorrt/ /opt/pytorch/torch_tensorrt/ # buildkit
ARG PYVER=3.10
RUN |7 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 PYVER=3.10 /bin/sh -c pip install --no-cache-dir /opt/pytorch/torch_tensorrt/dist/*.whl # buildkit
ENV LD_LIBRARY_PATH=/usr/local/lib/python3.10/dist-packages/torch/lib:/usr/local/lib/python3.10/dist-packages/torch_tensorrt/lib:/usr/local/cuda/compat/lib:/usr/local/nvidia/lib:/usr/local/nvidia/lib64
ENV PATH=/usr/local/lib/python3.10/dist-packages/torch_tensorrt/bin:/usr/local/mpi/bin:/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/ucx/bin:/opt/tensorrt/bin
RUN |7 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 PYVER=3.10 /bin/sh -c if [ "${L4T}" = "1" ]; then echo "Not installing Flash Attention in iGPU as it is a requirement for Transformer Engine"; else total_mem_gb=$(grep MemTotal /proc/meminfo | awk '{print int($2 / 1024 / 1024)}'); max_jobs=$(( (total_mem_gb - 40) / 6 )); max_jobs=$(( max_jobs < 4 ? 4 : max_jobs )); max_jobs=$(( max_jobs > $(nproc) ? $(nproc) : max_jobs )); echo "Using MAX_JOBS=${max_jobs} to build flash-attn"; env MAX_JOBS=$max_jobs pip install flash-attn==2.4.2; fi # buildkit
RUN |7 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 PYVER=3.10 /bin/sh -c if [ "${L4T}" = "1" ]; then echo "Not installing Transformer Engine in iGPU container until Version variable is set"; else NVTE_BUILD_THREADS_PER_JOB=8 pip install --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@release_v${TRANSFORMER_ENGINE_VERSION}; fi # buildkit
ENV TORCH_CUDNN_V8_API_ENABLED=1
ENV CUDA_MODULE_LOADING=LAZY
RUN |7 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 PYVER=3.10 /bin/sh -c ln -sf ${_CUDA_COMPAT_PATH}/lib.real ${_CUDA_COMPAT_PATH}/lib && echo ${_CUDA_COMPAT_PATH}/lib > /etc/ld.so.conf.d/00-cuda-compat.conf && ldconfig && rm -f ${_CUDA_COMPAT_PATH}/lib # buildkit
COPY entrypoint.d/ /opt/nvidia/entrypoint.d/ # buildkit
ARG NVIDIA_BUILD_ID=114410972
ENV NVIDIA_BUILD_ID=114410972
LABEL com.nvidia.build.id=114410972
ARG NVIDIA_BUILD_REF=3e3c067dd015e6d16d2cf59ac18e9f2e2466b68a
LABEL com.nvidia.build.ref=3e3c067dd015e6d16d2cf59ac18e9f2e2466b68a
WORKDIR /app
RUN /bin/sh -c apt-get update -qq 2>/dev/null || true && apt-get install -y -qq --no-install-recommends ffmpeg 2>/dev/null || true && rm -rf /var/lib/apt/lists/* 2>/dev/null || true # buildkit
COPY requirements.txt . # buildkit
RUN /bin/sh -c pip install --no-cache-dir "numpy<2.0" psycopg2-binary redis httpx pillow opencv-python-headless transformers ultralytics timm sentencepiece psutil && python -c "import cv2; cv2.dnn.DictValue = type('DictValue', (), {})" 2>/dev/null || true # buildkit
COPY . . # buildkit
COPY monitoring_worker/monitoring_worker.py ./monitoring_worker/ # buildkit
COPY monitoring_worker/fix_opencv.py ./monitoring_worker/ # buildkit
RUN /bin/sh -c python monitoring_worker/fix_opencv.py || true # buildkit
RUN /bin/sh -c python -m compileall -f -q /app -x 'venv|env|tests|\.git|model_cache|storage' && echo "✅ Bytecode compilation complete" # buildkit
ENV PYTHONUNBUFFERED=1 PYTHONDONTWRITEBYTECODE=0 PYTHONOPTIMIZE=1
ENV WORKER_ID=
ENV LOG_LEVEL=INFO
HEALTHCHECK &{["CMD-SHELL" "python -c \"import psycopg2; psycopg2.connect(host='postgres', database='basilisk', user='basilisk', password='basilisk_secure_password').close()\" || exit 1"] "30s" "10s" "10s" "0s" '\x03'}
CMD ["python" "-m" "monitoring_worker.monitoring_worker"]

Labels

Key Value
com.docker.compose.project basilisk-api
com.docker.compose.service monitoring-worker-1
com.docker.compose.version 2.39.2
com.nvidia.build.id 114410972
com.nvidia.build.ref 3e3c067dd015e6d16d2cf59ac18e9f2e2466b68a
com.nvidia.cublas.version 12.6.3.3
com.nvidia.cuda.version 9.0
com.nvidia.cudnn.version 9.5.0.50
com.nvidia.cufft.version 11.3.0.4
com.nvidia.curand.version 10.3.7.77
com.nvidia.cusolver.version 11.7.1.2
com.nvidia.cusparse.version 12.5.4.2
com.nvidia.cusparselt.version 0.6.2.3
com.nvidia.cutensor.version 2.0.2.5
com.nvidia.nccl.version 2.22.3
com.nvidia.npp.version 12.3.1.54
com.nvidia.nsightcompute.version 2024.3.2.3
com.nvidia.nsightsystems.version 2024.6.1.90
com.nvidia.nvjpeg.version 12.3.3.54
com.nvidia.pytorch.version 2.5.0a0+e000cf0
com.nvidia.tensorrt.version 10.5.0.18
com.nvidia.tensorrtoss.version
com.nvidia.volumes.needed nvidia_driver
org.opencontainers.image.ref.name ubuntu
org.opencontainers.image.version 22.04
Details
Container
2025-09-09 09:16:39 -07:00
0
OCI / Docker
linux/amd64
10 GiB
Versions (1) View all
latest 2025-09-09