Block a user
docker-memo (24.12.01)
Published 2025-10-06 06:56:23 +00:00 by eric
Installation
docker pull git.ericxliu.me/eric/docker-memo:24.12.01sha256:fa6d2de9272494a4d9d272bcd49bcde5db215c0c08706d8e4dcdd5752a68b563Image Layers
| ARG RELEASE |
| ARG LAUNCHPAD_BUILD_ARCH |
| LABEL org.opencontainers.image.ref.name=ubuntu |
| LABEL org.opencontainers.image.version=22.04 |
| ADD file:ebe009f86035c175ba244badd298a2582914415cf62783d510eab3a311a5d4e1 in / |
| CMD ["/bin/bash"] |
| RUN /bin/sh -c export DEBIAN_FRONTEND=noninteractive && apt-get update && apt-get install -y --no-install-recommends apt-utils build-essential ca-certificates curl libncurses5 libncursesw5 patch wget rsync unzip jq gnupg libtcmalloc-minimal4 && rm -rf /var/lib/apt/lists/* && echo "hsts=0" > /root/.wgetrc # buildkit |
| ARG CUDA_VERSION=12.6.2.004 |
| ARG CUDA_DRIVER_VERSION=560.35.03 |
| ARG JETPACK_HOST_MOUNTS= |
| ENV CUDA_VERSION=12.6.2.004 CUDA_DRIVER_VERSION=560.35.03 CUDA_CACHE_DISABLE=1 NVIDIA_REQUIRE_JETPACK_HOST_MOUNTS= |
| RUN |3 CUDA_VERSION=12.6.2.004 CUDA_DRIVER_VERSION=560.35.03 JETPACK_HOST_MOUNTS= /bin/sh -c if [ -n "${JETPACK_HOST_MOUNTS}" ]; then echo "/usr/lib/aarch64-linux-gnu/tegra" > /etc/ld.so.conf.d/nvidia-tegra.conf && echo "/usr/lib/aarch64-linux-gnu/tegra-egl" >> /etc/ld.so.conf.d/nvidia-tegra.conf; fi # buildkit |
| RUN |3 CUDA_VERSION=12.6.2.004 CUDA_DRIVER_VERSION=560.35.03 JETPACK_HOST_MOUNTS= /bin/sh -c /nvidia/build-scripts/installCUDA.sh # buildkit |
| RUN |3 CUDA_VERSION=12.6.2.004 CUDA_DRIVER_VERSION=560.35.03 JETPACK_HOST_MOUNTS= /bin/sh -c cp -vprd /nvidia/. / && patch -p0 < /etc/startup_scripts.patch && rm -f /etc/startup_scripts.patch # buildkit |
| ENV _CUDA_COMPAT_PATH=/usr/local/cuda/compat ENV=/etc/shinit_v2 BASH_ENV=/etc/bash.bashrc SHELL=/bin/bash NVIDIA_REQUIRE_CUDA=cuda>=9.0 |
| LABEL com.nvidia.volumes.needed=nvidia_driver com.nvidia.cuda.version=9.0 |
| ARG NCCL_VERSION=2.22.3 |
| ARG CUBLAS_VERSION=12.6.3.3 |
| ARG CUFFT_VERSION=11.3.0.4 |
| ARG CURAND_VERSION=10.3.7.77 |
| ARG CUSPARSE_VERSION=12.5.4.2 |
| ARG CUSOLVER_VERSION=11.7.1.2 |
| ARG CUTENSOR_VERSION=2.0.2.5 |
| ARG NPP_VERSION=12.3.1.54 |
| ARG NVJPEG_VERSION=12.3.3.54 |
| ARG CUDNN_VERSION=9.5.0.50 |
| ARG CUDNN_FRONTEND_VERSION=1.7.0 |
| ARG TRT_VERSION=10.5.0.18 |
| ARG TRTOSS_VERSION= |
| ARG NSIGHT_SYSTEMS_VERSION=2024.6.1.90 |
| ARG NSIGHT_COMPUTE_VERSION=2024.3.2.3 |
| ARG CUSPARSELT_VERSION=0.6.2.3 |
| ENV NCCL_VERSION=2.22.3 CUBLAS_VERSION=12.6.3.3 CUFFT_VERSION=11.3.0.4 CURAND_VERSION=10.3.7.77 CUSPARSE_VERSION=12.5.4.2 CUSPARSELT_VERSION=0.6.2.3 CUSOLVER_VERSION=11.7.1.2 CUTENSOR_VERSION=2.0.2.5 NPP_VERSION=12.3.1.54 NVJPEG_VERSION=12.3.3.54 CUDNN_VERSION=9.5.0.50 CUDNN_FRONTEND_VERSION=1.7.0 TRT_VERSION=10.5.0.18 TRTOSS_VERSION= NSIGHT_SYSTEMS_VERSION=2024.6.1.90 NSIGHT_COMPUTE_VERSION=2024.3.2.3 |
| RUN |19 CUDA_VERSION=12.6.2.004 CUDA_DRIVER_VERSION=560.35.03 JETPACK_HOST_MOUNTS= NCCL_VERSION=2.22.3 CUBLAS_VERSION=12.6.3.3 CUFFT_VERSION=11.3.0.4 CURAND_VERSION=10.3.7.77 CUSPARSE_VERSION=12.5.4.2 CUSOLVER_VERSION=11.7.1.2 CUTENSOR_VERSION=2.0.2.5 NPP_VERSION=12.3.1.54 NVJPEG_VERSION=12.3.3.54 CUDNN_VERSION=9.5.0.50 CUDNN_FRONTEND_VERSION=1.7.0 TRT_VERSION=10.5.0.18 TRTOSS_VERSION= NSIGHT_SYSTEMS_VERSION=2024.6.1.90 NSIGHT_COMPUTE_VERSION=2024.3.2.3 CUSPARSELT_VERSION=0.6.2.3 /bin/sh -c /nvidia/build-scripts/installLIBS.sh && /nvidia/build-scripts/installCUDNN.sh && /nvidia/build-scripts/installTRT.sh && /nvidia/build-scripts/installNSYS.sh && /nvidia/build-scripts/installNCU.sh && /nvidia/build-scripts/installCUTENSOR.sh && /nvidia/build-scripts/installCUSPARSELT.sh && if [ -z "${JETPACK_HOST_MOUNTS}" ]; then /nvidia/build-scripts/installNCCL.sh; fi; # buildkit |
| LABEL com.nvidia.nccl.version=2.22.3 com.nvidia.cublas.version=12.6.3.3 com.nvidia.cufft.version=11.3.0.4 com.nvidia.curand.version=10.3.7.77 com.nvidia.cusparse.version=12.5.4.2 com.nvidia.cusparselt.version=0.6.2.3 com.nvidia.cusolver.version=11.7.1.2 com.nvidia.cutensor.version=2.0.2.5 com.nvidia.npp.version=12.3.1.54 com.nvidia.nvjpeg.version=12.3.3.54 com.nvidia.cudnn.version=9.5.0.50 com.nvidia.tensorrt.version=10.5.0.18 com.nvidia.tensorrtoss.version= com.nvidia.nsightsystems.version=2024.6.1.90 com.nvidia.nsightcompute.version=2024.3.2.3 |
| ARG DALI_VERSION=1.42.0 |
| ARG DALI_BUILD=18507157 |
| ARG POLYGRAPHY_VERSION=0.49.13 |
| ARG TRANSFORMER_ENGINE_VERSION=1.11 |
| ARG MODEL_OPT_VERSION=0.17.0 |
| ENV DALI_VERSION=1.42.0 DALI_BUILD=18507157 POLYGRAPHY_VERSION=0.49.13 TRANSFORMER_ENGINE_VERSION=1.11 MODEL_OPT_VERSION=0.17.0 |
| ADD docs.tgz / # buildkit |
| RUN |24 CUDA_VERSION=12.6.2.004 CUDA_DRIVER_VERSION=560.35.03 JETPACK_HOST_MOUNTS= NCCL_VERSION=2.22.3 CUBLAS_VERSION=12.6.3.3 CUFFT_VERSION=11.3.0.4 CURAND_VERSION=10.3.7.77 CUSPARSE_VERSION=12.5.4.2 CUSOLVER_VERSION=11.7.1.2 CUTENSOR_VERSION=2.0.2.5 NPP_VERSION=12.3.1.54 NVJPEG_VERSION=12.3.3.54 CUDNN_VERSION=9.5.0.50 CUDNN_FRONTEND_VERSION=1.7.0 TRT_VERSION=10.5.0.18 TRTOSS_VERSION= NSIGHT_SYSTEMS_VERSION=2024.6.1.90 NSIGHT_COMPUTE_VERSION=2024.3.2.3 CUSPARSELT_VERSION=0.6.2.3 DALI_VERSION=1.42.0 DALI_BUILD=18507157 POLYGRAPHY_VERSION=0.49.13 TRANSFORMER_ENGINE_VERSION=1.11 MODEL_OPT_VERSION=0.17.0 /bin/sh -c echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf && echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf # buildkit |
| ARG _LIBPATH_SUFFIX= |
| ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin LD_LIBRARY_PATH=/usr/local/cuda/compat/lib:/usr/local/nvidia/lib:/usr/local/nvidia/lib64 NVIDIA_VISIBLE_DEVICES=all NVIDIA_DRIVER_CAPABILITIES=compute,utility,video |
| COPY entrypoint/ /opt/nvidia/ # buildkit |
| ENV NVIDIA_PRODUCT_NAME=CUDA |
| ENTRYPOINT ["/opt/nvidia/nvidia_entrypoint.sh"] |
| COPY NVIDIA_Deep_Learning_Container_License.pdf /workspace/ # buildkit |
| RUN /bin/sh -c export DEBIAN_FRONTEND=noninteractive && apt-get update && apt-get install -y --no-install-recommends build-essential git libglib2.0-0 less libnl-route-3-200 libnl-3-dev libnl-route-3-dev libnuma-dev libnuma1 libpmi2-0-dev nano numactl openssh-client vim wget && rm -rf /var/lib/apt/lists/* # buildkit |
| ARG GDRCOPY_VERSION=2.3.1-1 |
| ARG HPCX_VERSION=2.20 |
| ARG RDMACORE_VERSION=39.0 |
| ARG MOFED_VERSION=5.4-rdmacore39.0 |
| ARG OPENUCX_VERSION=1.17.0 |
| ARG OPENMPI_VERSION=4.1.7 |
| ENV GDRCOPY_VERSION=2.3.1-1 HPCX_VERSION=2.20 MOFED_VERSION=5.4-rdmacore39.0 OPENUCX_VERSION=1.17.0 OPENMPI_VERSION=4.1.7 RDMACORE_VERSION=39.0 |
| ARG TARGETARCH=amd64 |
| RUN |7 GDRCOPY_VERSION=2.3.1-1 HPCX_VERSION=2.20 RDMACORE_VERSION=39.0 MOFED_VERSION=5.4-rdmacore39.0 OPENUCX_VERSION=1.17.0 OPENMPI_VERSION=4.1.7 TARGETARCH=amd64 /bin/sh -c cd /nvidia && ( export DEBIAN_FRONTEND=noninteractive && apt-get update && apt-get install -y --no-install-recommends libibverbs1 libibverbs-dev librdmacm1 librdmacm-dev libibumad3 libibumad-dev ibverbs-utils ibverbs-providers && rm -rf /var/lib/apt/lists/* && rm $(dpkg-query -L libibverbs-dev librdmacm-dev libibumad-dev | grep "\(\.so\|\.a\)$") ) && ( cd opt/gdrcopy/ && dpkg -i libgdrapi_*.deb ) && ( cp -r opt/hpcx /opt/ && cp etc/ld.so.conf.d/hpcx.conf /etc/ld.so.conf.d/ && ln -sf /opt/hpcx/ompi /usr/local/mpi && ln -sf /opt/hpcx/ucx /usr/local/ucx && sed -i 's/^\(hwloc_base_binding_policy\) = core$/\1 = none/' /opt/hpcx/ompi/etc/openmpi-mca-params.conf && sed -i 's/^\(btl = self\)$/#\1/' /opt/hpcx/ompi/etc/openmpi-mca-params.conf ) && ldconfig # buildkit |
| ENV OPAL_PREFIX=/opt/hpcx/ompi PATH=/usr/local/mpi/bin:/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/ucx/bin |
| ENV OMPI_MCA_coll_hcoll_enable=0 |
| COPY cuda-*.patch /tmp # buildkit |
| RUN |7 GDRCOPY_VERSION=2.3.1-1 HPCX_VERSION=2.20 RDMACORE_VERSION=39.0 MOFED_VERSION=5.4-rdmacore39.0 OPENUCX_VERSION=1.17.0 OPENMPI_VERSION=4.1.7 TARGETARCH=amd64 /bin/sh -c export DEVEL=1 BASE=0 && /nvidia/build-scripts/installNCU.sh && /nvidia/build-scripts/installCUDA.sh && /nvidia/build-scripts/installLIBS.sh && if [ ! -f /etc/ld.so.conf.d/nvidia-tegra.conf ]; then /nvidia/build-scripts/installNCCL.sh; fi && /nvidia/build-scripts/installCUDNN.sh && /nvidia/build-scripts/installCUTENSOR.sh && /nvidia/build-scripts/installTRT.sh && /nvidia/build-scripts/installNSYS.sh && /nvidia/build-scripts/installCUSPARSELT.sh && if [ -f "/tmp/cuda-${_CUDA_VERSION_MAJMIN}.patch" ]; then patch -p0 < /tmp/cuda-${_CUDA_VERSION_MAJMIN}.patch; fi && rm -f /tmp/cuda-*.patch # buildkit |
| ENV LIBRARY_PATH=/usr/local/cuda/lib64/stubs: |
| ARG NVIDIA_BUILD_REF=bb0f0608792391d35d9686e6d86a7cb319bddadc |
| ARG NVIDIA_BUILD_ID=114391310 |
| ENV NVIDIA_BUILD_ID=114391310 |
| LABEL com.nvidia.build.id=114391310 |
| LABEL com.nvidia.build.ref=bb0f0608792391d35d9686e6d86a7cb319bddadc |
| ENV NVIDIA_PRODUCT_NAME=PyTorch |
| ARG NVIDIA_PYTORCH_VERSION=24.10 |
| ARG PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 |
| ARG NVFUSER_BUILD_VERSION=f669fcf |
| ENV PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 PYTORCH_VERSION=2.5.0a0+e000cf0 PYTORCH_BUILD_NUMBER=0 NVIDIA_PYTORCH_VERSION=24.10 |
| ENV NVFUSER_BUILD_VERSION=f669fcf NVFUSER_VERSION=f669fcf |
| LABEL com.nvidia.pytorch.version=2.5.0a0+e000cf0 |
| ARG TARGETARCH=amd64 |
| ARG PYVER=3.10 |
| ARG L4T=0 |
| RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c export PYSFX=`echo "$PYVER" | cut -c1-1` && export DEBIAN_FRONTEND=noninteractive && apt-get update && apt-get install -y --no-install-recommends python$PYVER-dev python$PYSFX python$PYSFX-dev python$PYSFX-distutils python-is-python$PYSFX autoconf automake libatlas-base-dev libgoogle-glog-dev libbz2-dev libc-ares2 libre2-9 libleveldb-dev liblmdb-dev libprotobuf-dev libsnappy-dev libtool nasm protobuf-compiler pkg-config unzip sox libsndfile1 libpng-dev libhdf5-103 libhdf5-dev gfortran rapidjson-dev ninja-build libedit-dev build-essential patchelf && rm -rf /var/lib/apt/lists/* # buildkit |
| ENV PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python |
| RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c curl -O https://bootstrap.pypa.io/get-pip.py && python get-pip.py && rm get-pip.py # buildkit |
| RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c pip install --no-cache-dir pip 'setuptools<71' && pip install --no-cache-dir cmake # buildkit |
| RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c curl "https://gitlab-master.nvidia.com/api/v4/projects/105799/packages/generic/OpenBLAS/0.3.24-$(uname -m)/OpenBLAS-0.3.24-$(uname -m).tar.gz" --output OpenBLAS.tar.gz && tar -xf OpenBLAS.tar.gz -C /usr/local/ && rm OpenBLAS.tar.gz # buildkit |
| RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c if [ $TARGETARCH = "arm64" ]; then cd /opt && curl "https://gitlab-master.nvidia.com/api/v4/projects/105799/packages/generic/nvpl_slim_24.04/sbsa/nvpl_slim_24.04.tar" --output nvpl_slim_24.04.tar && tar -xf nvpl_slim_24.04.tar && cp -r nvpl_slim_24.04/lib/* /usr/local/lib && cp -r nvpl_slim_24.04/include/* /usr/local/include && rm -rf nvpl_slim_24.04.tar nvpl_slim_24.04 ; fi # buildkit |
| ENV NVPL_LAPACK_MATH_MODE=PEDANTIC |
| WORKDIR /opt/pytorch |
| COPY . . # buildkit |
| ENV PYTHONIOENCODING=utf-8 |
| ENV LC_ALL=C.UTF-8 |
| ENV PIP_DEFAULT_TIMEOUT=100 |
| RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c pip install --no-cache-dir numpy==1.24.4 scipy==1.11.3 "PyYAML>=5.4.1" astunparse typing_extensions cffi spacy==3.7.5 mock tqdm librosa==0.10.1 expecttest==0.1.3 hypothesis==5.35.1 xdoctest==1.0.2 pytest==8.1.1 pytest-xdist pytest-rerunfailures pytest-shard pytest-flakefinder pybind11 Cython "regex>=2020.1.8" protobuf==4.24.4 && if [[ $TARGETARCH = "amd64" ]] ; then pip install --no-cache-dir mkl==2021.1.1 mkl-include==2021.1.1 mkl-devel==2021.1.1 ; find /usr/local/lib -maxdepth 1 -type f -regex '.*\/lib\(tbb\|mkl\).*\.so\($\|\.[0-9]*\.[0-9]*\)' -exec rm -v {} + ; fi # buildkit |
| RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c git config --global url."https://github".insteadOf git://github && pip install --no-cache-dir 'jupyterlab>=4.1.0,<5.0.0a0' notebook tensorboard==2.16.2 jupyterlab_code_formatter python-hostlist # buildkit |
| RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c PATCHED_FILE=$(python -c "from tensorboard.plugins.core import core_plugin as _; print(_.__file__)") && sed -i 's/^\( *"--bind_all",\)$/\1 default=True,/' "$PATCHED_FILE" && test $(grep '^ *"--bind_all", default=True,$' "$PATCHED_FILE" | wc -l) -eq 1 # buildkit |
| RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c pip install --no-cache-dir jupyterlab-tensorboard-pro jupytext black isort && mkdir -p /root/.jupyter/lab/user-settings/@jupyterlab/completer-extension/ && jupyter lab clean # buildkit |
| COPY jupyter_config/jupyter_notebook_config.py /usr/local/etc/jupyter/ # buildkit |
| COPY jupyter_config/manager.jupyterlab-settings /root/.jupyter/lab/user-settings/@jupyterlab/completer-extension/ # buildkit |
| COPY jupyter_config/settings.jupyterlab-settings /root/.jupyter/lab/user-settings/@jupyterlab/completer-extension/ # buildkit |
| ENV JUPYTER_PORT=8888 |
| ENV TENSORBOARD_PORT=6006 |
| EXPOSE map[8888/tcp:{}] |
| EXPOSE map[6006/tcp:{}] |
| RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c OPENCV_VERSION=4.7.0 && cd / && wget -q -O - https://github.com/opencv/opencv/archive/${OPENCV_VERSION}.tar.gz | tar -xzf - && cd /opencv-${OPENCV_VERSION} && cmake -GNinja -Bbuild -H. -DWITH_CUDA=OFF -DWITH_1394=OFF -DPYTHON3_PACKAGES_PATH="/usr/local/lib/python${PYVER}/dist-packages" -DBUILD_opencv_cudalegacy=OFF -DBUILD_opencv_stitching=OFF -DWITH_IPP=OFF -DWITH_PROTOBUF=OFF && cmake --build build --target install && cd modules/python/package && pip install --no-cache-dir --disable-pip-version-check -v . && rm -rf /opencv-${OPENCV_VERSION} # buildkit |
| ENV UCC_CL_BASIC_TLS=^sharp |
| ENV TORCH_CUDA_ARCH_LIST=5.2 6.0 6.1 7.0 7.2 7.5 8.0 8.6 8.7 9.0+PTX |
| ENV PYTORCH_HOME=/opt/pytorch/pytorch |
| ENV CUDA_HOME=/usr/local/cuda |
| ENV TORCH_ALLOW_TF32_CUBLAS_OVERRIDE=1 |
| ENV USE_EXPERIMENTAL_CUDNN_V8_API=1 |
| RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c pip install /opt/transfer/torch*.whl && patchelf --set-rpath '/usr/local/lib' /usr/local/lib/python3.10/dist-packages/torch/lib/libtorch_global_deps.so # buildkit |
| RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c cd pytorch && pip install --no-cache-dir -v -r /opt/pytorch/pytorch/requirements.txt # buildkit |
| RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c pip install --no-cache-dir /tmp/dist/*.whl # buildkit |
| RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c if [ -z "${DALI_VERSION}" ] ; then echo "Not Installing DALI for L4T Build." ; else export DALI_PKG_SUFFIX="cuda${CUDA_VERSION%%.*}0" && pip install --disable-pip-version-check --no-cache-dir --extra-index-url https://developer.download.nvidia.com/compute/redist --extra-index-url http://sqrl/dldata/pip-dali${DALI_URL_SUFFIX:-} --trusted-host sqrl nvidia-dali-${DALI_PKG_SUFFIX}==${DALI_VERSION}; fi # buildkit |
| ENV COCOAPI_VERSION=2.0+nv0.8.0 |
| RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c export COCOAPI_TAG=$(echo ${COCOAPI_VERSION} | sed 's/^.*+n//') && pip install --disable-pip-version-check --no-cache-dir git+https://github.com/nvidia/cocoapi.git@${COCOAPI_TAG}#subdirectory=PythonAPI # buildkit |
| COPY singularity/ /.singularity.d/ # buildkit |
| RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c ( cd vision && export PYTORCH_VERSION=$(python -c "import torch; print(torch.__version__)") && CFLAGS="-g0" FORCE_CUDA=1 NVCC_APPEND_FLAGS="--threads 8" pip install --no-cache-dir --no-build-isolation --disable-pip-version-check . ) && ( cd vision && cmake -Bbuild -H. -GNinja -DWITH_CUDA=1 -DCMAKE_PREFIX_PATH=`python -c 'import torch;print(torch.utils.cmake_prefix_path)'` && cmake --build build --target install && rm -rf build ) && ( cd fuser && pip install -r requirements.txt && python setup.py -version-tag=a0+${NVFUSER_VERSION} install && python setup.py clean && cp $(find /usr/local/lib/python3.10/dist-packages/ -name libnvfuser_codegen.so) /usr/local/lib/python3.10/dist-packages/torch/lib/ ) && ( cd lightning-thunder && python setup.py install && rm -rf build) && BUILD_OPTIONS="--cpp_ext --cuda_ext --bnp --xentropy --deprecated_fused_adam --deprecated_fused_lamb --fast_multihead_attn --distributed_lamb --fast_layer_norm --transducer --distributed_adam --fmha --permutation_search --focal_loss --fused_conv_bias_relu --index_mul_2d --cudnn_gbn --group_norm --gpu_direct_storage" && if [ "${L4T}" != "1" ]; then BUILD_OPTIONS="--fast_bottleneck --nccl_p2p --peer_memory --nccl_allocator ${BUILD_OPTIONS}"; fi && ( cd apex && CFLAGS="-g0" NVCC_APPEND_FLAGS="--threads 8" pip install -v --no-build-isolation --no-cache-dir --disable-pip-version-check --config-settings "--build-option=${BUILD_OPTIONS}" . && rm -rf build ) && ( cd lightning-thunder && mkdir tmp && cd tmp && git clone -b v${CUDNN_FRONTEND_VERSION} --recursive --single-branch https://github.com/NVIDIA/cudnn-frontend.git cudnn_frontend && cd cudnn_frontend && pip install --no-build-isolation --no-cache-dir --disable-pip-version-check . && cd ../../ && rm -rf tmp ) && ( cd pytorch/third_party/onnx && pip uninstall typing -y && CMAKE_ARGS="-DONNX_USE_PROTOBUF_SHARED_LIBS=ON" pip install --no-build-isolation --no-cache-dir --disable-pip-version-check . ) # buildkit |
| RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c pip install --no-cache-dir --disable-pip-version-check tabulate # buildkit |
| RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c if [ "${L4T}" = "1" ]; then echo "Not installing rapids for L4T build." ; else find /rapids -name "*-Linux.tar.gz" -exec tar -C /usr --exclude="*.a" --exclude="bin/xgboost" --strip-components=1 -xvf {} \; && find /rapids -name "*.whl" ! -name "tornado-*" ! -name "Pillow-*" ! -name "certifi-*" ! -name "protobuf-*" -exec pip install --no-cache-dir {} + ; pip install numpy==1.24.4; fi # buildkit |
| WORKDIR /workspace |
| COPY NVREADME.md README.md # buildkit |
| COPY docker-examples docker-examples # buildkit |
| COPY examples examples # buildkit |
| COPY tutorials tutorials # buildkit |
| RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c chmod -R a+w . # buildkit |
| RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c set -x && URL=$(VERIFY=1 /nvidia/build-scripts/installTRT.sh | sed -n "s/^.*\(http.*\)tar.*$/\1/p")tar && FILE=$(wget -O - $URL | sed -n 's/^.*href="\(TensorRT[^"]*\)".*$/\1/p' | egrep -v "internal|safety") && wget -q $URL/$FILE -O - | tar -xz && PY=$(python -c 'import sys; print(str(sys.version_info[0])+str(sys.version_info[1]))') && pip install TensorRT-*/python/tensorrt-*-cp$PY*.whl && mv /usr/src/tensorrt /opt && ln -s /opt/tensorrt /usr/src/tensorrt && rm -r TensorRT-* # buildkit |
| ENV PATH=/usr/local/mpi/bin:/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/ucx/bin:/opt/tensorrt/bin |
| RUN |6 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 /bin/sh -c pip --version && python -c 'import sys; print(sys.platform)' && pip install --no-cache-dir nvidia-pyindex && pip install --extra-index-url https://urm.nvidia.com/artifactory/api/pypi/sw-tensorrt-pypi/simple --no-cache-dir polygraphy==0.49.12 && pip install --extra-index-url https://pypi.nvidia.com "nvidia-modelopt[torch]==${MODEL_OPT_VERSION}" # buildkit |
| COPY torch_tensorrt/ /opt/pytorch/torch_tensorrt/ # buildkit |
| ARG PYVER=3.10 |
| RUN |7 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 PYVER=3.10 /bin/sh -c pip install --no-cache-dir /opt/pytorch/torch_tensorrt/dist/*.whl # buildkit |
| ENV LD_LIBRARY_PATH=/usr/local/lib/python3.10/dist-packages/torch/lib:/usr/local/lib/python3.10/dist-packages/torch_tensorrt/lib:/usr/local/cuda/compat/lib:/usr/local/nvidia/lib:/usr/local/nvidia/lib64 |
| ENV PATH=/usr/local/lib/python3.10/dist-packages/torch_tensorrt/bin:/usr/local/mpi/bin:/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/ucx/bin:/opt/tensorrt/bin |
| RUN |7 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 PYVER=3.10 /bin/sh -c if [ "${L4T}" = "1" ]; then echo "Not installing Flash Attention in iGPU as it is a requirement for Transformer Engine"; else total_mem_gb=$(grep MemTotal /proc/meminfo | awk '{print int($2 / 1024 / 1024)}'); max_jobs=$(( (total_mem_gb - 40) / 6 )); max_jobs=$(( max_jobs < 4 ? 4 : max_jobs )); max_jobs=$(( max_jobs > $(nproc) ? $(nproc) : max_jobs )); echo "Using MAX_JOBS=${max_jobs} to build flash-attn"; env MAX_JOBS=$max_jobs pip install flash-attn==2.4.2; fi # buildkit |
| RUN |7 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 PYVER=3.10 /bin/sh -c if [ "${L4T}" = "1" ]; then echo "Not installing Transformer Engine in iGPU container until Version variable is set"; else NVTE_BUILD_THREADS_PER_JOB=8 pip install --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@release_v${TRANSFORMER_ENGINE_VERSION}; fi # buildkit |
| ENV TORCH_CUDNN_V8_API_ENABLED=1 |
| ENV CUDA_MODULE_LOADING=LAZY |
| RUN |7 NVIDIA_PYTORCH_VERSION=24.10 PYTORCH_BUILD_VERSION=2.5.0a0+e000cf0 NVFUSER_BUILD_VERSION=f669fcf TARGETARCH=amd64 PYVER=3.10 L4T=0 PYVER=3.10 /bin/sh -c ln -sf ${_CUDA_COMPAT_PATH}/lib.real ${_CUDA_COMPAT_PATH}/lib && echo ${_CUDA_COMPAT_PATH}/lib > /etc/ld.so.conf.d/00-cuda-compat.conf && ldconfig && rm -f ${_CUDA_COMPAT_PATH}/lib # buildkit |
| COPY entrypoint.d/ /opt/nvidia/entrypoint.d/ # buildkit |
| ARG NVIDIA_BUILD_ID=114410972 |
| ENV NVIDIA_BUILD_ID=114410972 |
| LABEL com.nvidia.build.id=114410972 |
| ARG NVIDIA_BUILD_REF=3e3c067dd015e6d16d2cf59ac18e9f2e2466b68a |
| LABEL com.nvidia.build.ref=3e3c067dd015e6d16d2cf59ac18e9f2e2466b68a |
| ENV NVIDIA_PRODUCT_NAME=NeMo Framework |
| ENV PIP_NO_CACHE_DIR=1 |
| ARG NVIDIA_BIGNLP_VERSION |
| ENV NVIDIA_BIGNLP_VERSION= |
| LABEL com.nvidia.bignlp.version= |
| ENV DEBIAN_FRONTEND=noninteractive |
| RUN |1 NVIDIA_BIGNLP_VERSION= /bin/sh -c apt-get update && apt-get install -y --no-install-recommends libsndfile1 sox swig libb64-dev openssh-server ffmpeg && rm -rf /var/lib/apt/lists/* && apt-get clean && pip install virtualenv && virtualenv /opt/venv # buildkit |
| RUN |1 NVIDIA_BIGNLP_VERSION= /bin/sh -c apt-get remove --purge -y libslurm* && apt-get -y autoremove && pip uninstall -y onnx && rm -rf /opt/pytorch/pytorch/third_party/onnx # buildkit |
| WORKDIR /opt |
| RUN |1 NVIDIA_BIGNLP_VERSION= /bin/sh -c bash llm_inference/tekit/docker/common/install_base.sh && bash llm_inference/tekit/docker/common/install_cmake.sh && bash llm_inference/tekit/docker/common/install_ccache.sh # buildkit |
| ARG TRT_VER=10.5.0.18 |
| ARG CUDA_VER=12.6 |
| ARG CUDNN_VER=9.5.0.50-1 |
| ARG NCCL_VER |
| ARG CUBLAS_VER=12.6.3.3-1 |
| ARG NVRTC_VER=12.6.77-1 |
| COPY llm_inference/trtllm/install_tensorrt.sh . # buildkit |
| RUN |7 NVIDIA_BIGNLP_VERSION= TRT_VER=10.5.0.18 CUDA_VER=12.6 CUDNN_VER=9.5.0.50-1 NCCL_VER= CUBLAS_VER=12.6.3.3-1 NVRTC_VER=12.6.77-1 /bin/sh -c bash install_tensorrt.sh --TRT_VER=${TRT_VER} --CUDA_VER=${CUDA_VER} --CUDNN_VER=${CUDNN_VER} --NCCL_VER=${NCCL_VER} --CUBLAS_VER=${CUBLAS_VER} --NVRTC_VER=${NVRTC_VER} bash llm_inference/tekit/docker/common/install_polygraphy.sh && bash llm_inference/tekit/docker/common/install_mpi4py.sh && rm install_tensorrt.sh # buildkit |
| ARG REINSTALL_APEX=False |
| ARG APEX_COMMIT=73375b3bbcb59a5d6ff43f2fafd00b9ecdbe0417 |
| RUN |9 NVIDIA_BIGNLP_VERSION= TRT_VER=10.5.0.18 CUDA_VER=12.6 CUDNN_VER=9.5.0.50-1 NCCL_VER= CUBLAS_VER=12.6.3.3-1 NVRTC_VER=12.6.77-1 REINSTALL_APEX=False APEX_COMMIT=73375b3bbcb59a5d6ff43f2fafd00b9ecdbe0417 /bin/sh -c pip install packaging # buildkit |
| RUN |9 NVIDIA_BIGNLP_VERSION= TRT_VER=10.5.0.18 CUDA_VER=12.6 CUDNN_VER=9.5.0.50-1 NCCL_VER= CUBLAS_VER=12.6.3.3-1 NVRTC_VER=12.6.77-1 REINSTALL_APEX=False APEX_COMMIT=73375b3bbcb59a5d6ff43f2fafd00b9ecdbe0417 /bin/sh -c if [ $REINSTALL_APEX = "True" ]; then pip uninstall -y apex && git clone https://github.com/NVIDIA/apex && cd apex && if [ ! -z $APEX_COMMIT ]; then git fetch origin $APEX_COMMIT && git checkout FETCH_HEAD; fi && HEAD_APEX_COMMIT=$(git rev-parse HEAD) && echo "Container built with Apex commit hash: $HEAD_APEX_COMMIT" && pip install -e . -v --no-build-isolation --disable-pip-version-check --no-cache-dir --config-settings "--build-option=--cpp_ext --cuda_ext --fast_layer_norm --distributed_adam --deprecated_fused_adam --group_norm --nccl_allocator"; fi # buildkit |
| RUN |9 NVIDIA_BIGNLP_VERSION= TRT_VER=10.5.0.18 CUDA_VER=12.6 CUDNN_VER=9.5.0.50-1 NCCL_VER= CUBLAS_VER=12.6.3.3-1 NVRTC_VER=12.6.77-1 REINSTALL_APEX=False APEX_COMMIT=73375b3bbcb59a5d6ff43f2fafd00b9ecdbe0417 /bin/sh -c if [ -d /tmp/TransformerEngine ]; then pip install --no-cache-dir --no-build-isolation /tmp/TransformerEngine/dist/transformer_engine*; fi # buildkit |
| ARG MCORE_COMMIT=core_r0.10.0 |
| RUN |10 NVIDIA_BIGNLP_VERSION= TRT_VER=10.5.0.18 CUDA_VER=12.6 CUDNN_VER=9.5.0.50-1 NCCL_VER= CUBLAS_VER=12.6.3.3-1 NVRTC_VER=12.6.77-1 REINSTALL_APEX=False APEX_COMMIT=73375b3bbcb59a5d6ff43f2fafd00b9ecdbe0417 MCORE_COMMIT=core_r0.10.0 /bin/sh -c CI_JOB_TOKEN=$(cat /run/secrets/CI_JOB_TOKEN) && git clone https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab-master.nvidia.com/ADLR/megatron-lm.git && cd megatron-lm && git pull && if [ ! -z $MCORE_COMMIT ]; then git fetch origin $MCORE_COMMIT && git checkout FETCH_HEAD; fi && HEAD_MCORE_COMMIT=$(git rev-parse HEAD) && echo "Container built with megatron-lm commit hash: $HEAD_MCORE_COMMIT" && pip install -e . && rm -rf .git && cd megatron/core/datasets && make # buildkit |
| ENV PYTHONPATH=:/opt/megatron-lm |
| WORKDIR /opt/tensorrt_llm |
| RUN |10 NVIDIA_BIGNLP_VERSION= TRT_VER=10.5.0.18 CUDA_VER=12.6 CUDNN_VER=9.5.0.50-1 NCCL_VER= CUBLAS_VER=12.6.3.3-1 NVRTC_VER=12.6.77-1 REINSTALL_APEX=False APEX_COMMIT=73375b3bbcb59a5d6ff43f2fafd00b9ecdbe0417 MCORE_COMMIT=core_r0.10.0 /bin/sh -c pip install /src/tensorrt_llm/build/tensorrt_llm*.whl --extra-index-url https://pypi.nvidia.com # buildkit |
| RUN |10 NVIDIA_BIGNLP_VERSION= TRT_VER=10.5.0.18 CUDA_VER=12.6 CUDNN_VER=9.5.0.50-1 NCCL_VER= CUBLAS_VER=12.6.3.3-1 NVRTC_VER=12.6.77-1 REINSTALL_APEX=False APEX_COMMIT=73375b3bbcb59a5d6ff43f2fafd00b9ecdbe0417 MCORE_COMMIT=core_r0.10.0 /bin/sh -c ln -sv $(python3 -c 'import site; print(f"{site.getsitepackages()[0]}/tensorrt_llm/libs")') lib && test -f lib/libnvinfer_plugin_tensorrt_llm.so && ln -sv lib/libnvinfer_plugin_tensorrt_llm.so lib/libnvinfer_plugin_tensorrt_llm.so.9 && echo "/opt/tensorrt_llm/lib" > /etc/ld.so.conf.d/tensorrt_llm.conf && ldconfig # buildkit |
| ARG SRC_DIR=/src/tensorrt_llm |
| COPY /src/tensorrt_llm/benchmarks benchmarks # buildkit |
| ARG CPP_BUILD_DIR=/src/tensorrt_llm/cpp/build |
| COPY /src/tensorrt_llm/cpp/build/benchmarks/bertBenchmark /src/tensorrt_llm/cpp/build/benchmarks/gptManagerBenchmark /src/tensorrt_llm/cpp/build/benchmarks/gptSessionBenchmark benchmarks/cpp/ # buildkit |
| RUN |12 NVIDIA_BIGNLP_VERSION= TRT_VER=10.5.0.18 CUDA_VER=12.6 CUDNN_VER=9.5.0.50-1 NCCL_VER= CUBLAS_VER=12.6.3.3-1 NVRTC_VER=12.6.77-1 REINSTALL_APEX=False APEX_COMMIT=73375b3bbcb59a5d6ff43f2fafd00b9ecdbe0417 MCORE_COMMIT=core_r0.10.0 SRC_DIR=/src/tensorrt_llm CPP_BUILD_DIR=/src/tensorrt_llm/cpp/build /bin/sh -c rm -v benchmarks/cpp/bertBenchmark.cpp benchmarks/cpp/gptManagerBenchmark.cpp benchmarks/cpp/gptSessionBenchmark.cpp benchmarks/cpp/CMakeLists.txt # buildkit |
| ARG TARGET_ARCH=x86 |
| ARG INSTALL_VLLM=True |
| WORKDIR /opt |
| COPY /opt/tinycudann*.whl ./ # buildkit |
| RUN |14 NVIDIA_BIGNLP_VERSION= TRT_VER=10.5.0.18 CUDA_VER=12.6 CUDNN_VER=9.5.0.50-1 NCCL_VER= CUBLAS_VER=12.6.3.3-1 NVRTC_VER=12.6.77-1 REINSTALL_APEX=False APEX_COMMIT=73375b3bbcb59a5d6ff43f2fafd00b9ecdbe0417 MCORE_COMMIT=core_r0.10.0 SRC_DIR=/src/tensorrt_llm CPP_BUILD_DIR=/src/tensorrt_llm/cpp/build TARGET_ARCH=x86 INSTALL_VLLM=True /bin/sh -c pip install --no-cache-dir /tmp/build_mamba_dep/causal_conv1d*.whl /tmp/build_grouped_gemm/grouped_gemm*.whl /tmp/build_nvdiffrast/nvdiffrast*.whl /tmp/build_stable_dreamfusion/raymarching*.whl /tmp/build_stable_dreamfusion/shencoder*.whl /tmp/build_stable_dreamfusion/freqencoder*.whl /tmp/build_stable_dreamfusion/gridencoder*.whl && if [ -f /tmp/build_mamba_dep/mamba*.whl ]; then pip install --no-cache-dir /tmp/build_mamba_dep/mamba*.whl "triton==3.1.0"; fi # buildkit |
| ARG ALIGNER_COMMIT=r0.6.0 |
| RUN |15 NVIDIA_BIGNLP_VERSION= TRT_VER=10.5.0.18 CUDA_VER=12.6 CUDNN_VER=9.5.0.50-1 NCCL_VER= CUBLAS_VER=12.6.3.3-1 NVRTC_VER=12.6.77-1 REINSTALL_APEX=False APEX_COMMIT=73375b3bbcb59a5d6ff43f2fafd00b9ecdbe0417 MCORE_COMMIT=core_r0.10.0 SRC_DIR=/src/tensorrt_llm CPP_BUILD_DIR=/src/tensorrt_llm/cpp/build TARGET_ARCH=x86 INSTALL_VLLM=True ALIGNER_COMMIT=r0.6.0 /bin/sh -c git clone https://github.com/NVIDIA/NeMo-Aligner.git && cd NeMo-Aligner && git pull && if [ ! -z $ALIGNER_COMMIT ]; then git fetch origin $ALIGNER_COMMIT && git checkout FETCH_HEAD; fi && sed -i "/nemo_toolkit/d" setup/requirements.txt && sed -i "/megatron_core/d" setup/requirements.txt && pip install -e . # buildkit |
| ARG LAUNCHER_COMMIT=5730fac6f97795931325cab0ac5dce1924cdcb3f |
| RUN |16 NVIDIA_BIGNLP_VERSION= TRT_VER=10.5.0.18 CUDA_VER=12.6 CUDNN_VER=9.5.0.50-1 NCCL_VER= CUBLAS_VER=12.6.3.3-1 NVRTC_VER=12.6.77-1 REINSTALL_APEX=False APEX_COMMIT=73375b3bbcb59a5d6ff43f2fafd00b9ecdbe0417 MCORE_COMMIT=core_r0.10.0 SRC_DIR=/src/tensorrt_llm CPP_BUILD_DIR=/src/tensorrt_llm/cpp/build TARGET_ARCH=x86 INSTALL_VLLM=True ALIGNER_COMMIT=r0.6.0 LAUNCHER_COMMIT=5730fac6f97795931325cab0ac5dce1924cdcb3f /bin/sh -c git clone https://github.com/NVIDIA/NeMo-Framework-Launcher.git && cd NeMo-Framework-Launcher && git pull && if [ ! -z $LAUNCHER_COMMIT ]; then git fetch origin $LAUNCHER_COMMIT && git checkout FETCH_HEAD; fi && HEAD_LAUNCHER_COMMIT=$(git rev-parse HEAD) && echo "Container built with NeMo-Framework-Launcher commit hash: $HEAD_LAUNCHER_COMMIT" && pip install --no-cache-dir -r requirements.txt # buildkit |
| ENV LAUNCHER_SCRIPTS_PATH=/opt/NeMo-Framework-Launcher/launcher_scripts |
| ENV PYTHONPATH=/opt/NeMo-Framework-Launcher/launcher_scripts::/opt/megatron-lm |
| ARG DATA_CURATOR_COMMIT=r0.6.0 |
| ARG INSTALL_DATA_CURATOR=True |
| RUN |18 NVIDIA_BIGNLP_VERSION= TRT_VER=10.5.0.18 CUDA_VER=12.6 CUDNN_VER=9.5.0.50-1 NCCL_VER= CUBLAS_VER=12.6.3.3-1 NVRTC_VER=12.6.77-1 REINSTALL_APEX=False APEX_COMMIT=73375b3bbcb59a5d6ff43f2fafd00b9ecdbe0417 MCORE_COMMIT=core_r0.10.0 SRC_DIR=/src/tensorrt_llm CPP_BUILD_DIR=/src/tensorrt_llm/cpp/build TARGET_ARCH=x86 INSTALL_VLLM=True ALIGNER_COMMIT=r0.6.0 LAUNCHER_COMMIT=5730fac6f97795931325cab0ac5dce1924cdcb3f DATA_CURATOR_COMMIT=r0.6.0 INSTALL_DATA_CURATOR=True /bin/sh -c if [ $INSTALL_DATA_CURATOR = "True" ]; then pip install mpi4py --no-build-isolation && if [ $TARGET_ARCH = "arm" ]; then pip install git+https://github.com/aboSamoor/pycld2.git; fi && rm /usr/lib/libcudf.so && git clone https://github.com/NVIDIA/NeMo-Curator.git && cd NeMo-Curator && git pull && if [ ! -z $DATA_CURATOR_COMMIT ]; then git fetch origin $DATA_CURATOR_COMMIT && git checkout FETCH_HEAD; fi && HEAD_CURATOR_COMMIT=$(git rev-parse HEAD) && echo "Container built with Curator commit hash: $HEAD_CURATOR_COMMIT" && sed -i "/nemo_toolkit/d" pyproject.toml && cd ../ && pip install --extra-index-url=https://pypi.nvidia.com -e "./NeMo-Curator[all]" && pip install protobuf==4.24.4; fi # extra-index-url pypi could cause dependencies conflicts (later on) # buildkit |
| ARG MODELOPT_VERSION=0.19.0 |
| RUN |19 NVIDIA_BIGNLP_VERSION= TRT_VER=10.5.0.18 CUDA_VER=12.6 CUDNN_VER=9.5.0.50-1 NCCL_VER= CUBLAS_VER=12.6.3.3-1 NVRTC_VER=12.6.77-1 REINSTALL_APEX=False APEX_COMMIT=73375b3bbcb59a5d6ff43f2fafd00b9ecdbe0417 MCORE_COMMIT=core_r0.10.0 SRC_DIR=/src/tensorrt_llm CPP_BUILD_DIR=/src/tensorrt_llm/cpp/build TARGET_ARCH=x86 INSTALL_VLLM=True ALIGNER_COMMIT=r0.6.0 LAUNCHER_COMMIT=5730fac6f97795931325cab0ac5dce1924cdcb3f DATA_CURATOR_COMMIT=r0.6.0 INSTALL_DATA_CURATOR=True MODELOPT_VERSION=0.19.0 /bin/sh -c pip install --no-cache-dir nvidia-modelopt[torch]==$MODELOPT_VERSION # buildkit |
| ARG TARGET_ARCH=x86 |
| ARG INSTALL_VLLM=True |
| RUN |19 NVIDIA_BIGNLP_VERSION= TRT_VER=10.5.0.18 CUDA_VER=12.6 CUDNN_VER=9.5.0.50-1 NCCL_VER= CUBLAS_VER=12.6.3.3-1 NVRTC_VER=12.6.77-1 REINSTALL_APEX=False APEX_COMMIT=73375b3bbcb59a5d6ff43f2fafd00b9ecdbe0417 MCORE_COMMIT=core_r0.10.0 SRC_DIR=/src/tensorrt_llm CPP_BUILD_DIR=/src/tensorrt_llm/cpp/build TARGET_ARCH=x86 INSTALL_VLLM=True ALIGNER_COMMIT=r0.6.0 LAUNCHER_COMMIT=5730fac6f97795931325cab0ac5dce1924cdcb3f DATA_CURATOR_COMMIT=r0.6.0 INSTALL_DATA_CURATOR=True MODELOPT_VERSION=0.19.0 /bin/sh -c echo " UserKnownHostsFile /dev/null" >> /etc/ssh/ssh_config && sed -i 's/#\(StrictModes \).*/\1no/g' /etc/ssh/sshd_config && sed -i 's/# StrictHostKeyChecking ask/ StrictHostKeyChecking no/' /etc/ssh/ssh_config && mkdir -p /var/run/sshd # buildkit |
| ARG NEMO_COMMIT=r2.1.1 |
| RUN |20 NVIDIA_BIGNLP_VERSION= TRT_VER=10.5.0.18 CUDA_VER=12.6 CUDNN_VER=9.5.0.50-1 NCCL_VER= CUBLAS_VER=12.6.3.3-1 NVRTC_VER=12.6.77-1 REINSTALL_APEX=False APEX_COMMIT=73375b3bbcb59a5d6ff43f2fafd00b9ecdbe0417 MCORE_COMMIT=core_r0.10.0 SRC_DIR=/src/tensorrt_llm CPP_BUILD_DIR=/src/tensorrt_llm/cpp/build TARGET_ARCH=x86 INSTALL_VLLM=True ALIGNER_COMMIT=r0.6.0 LAUNCHER_COMMIT=5730fac6f97795931325cab0ac5dce1924cdcb3f DATA_CURATOR_COMMIT=r0.6.0 INSTALL_DATA_CURATOR=True MODELOPT_VERSION=0.19.0 NEMO_COMMIT=r2.1.1 /bin/sh -c git clone https://github.com/NVIDIA/NeMo.git && cd NeMo && git pull && if [ ! -z $NEMO_COMMIT ]; then git fetch origin $NEMO_COMMIT && git checkout FETCH_HEAD; fi && HEAD_NEMO_COMMIT=$(git rev-parse HEAD) && echo "Container built with NeMo commit hash: $HEAD_NEMO_COMMIT" && pip uninstall -y nemo_toolkit sacrebleu && sed -i "/mamba-ssm/d" requirements/requirements_nlp.txt && if [ $TARGET_ARCH = "arm" ]; then sed -i "/torch/d" requirements/requirements.txt && sed -i "/decord/d" requirements/requirements_multimodal.txt && sed -i "/megatron_core/d" requirements/requirements_nlp.txt; fi && pip install -e ".[all]" && pip install -r requirements/requirements_infer.txt && cd nemo/collections/nlp/data/language_modeling/megatron && make # buildkit |
| ARG RESIL_COMMIT=97aad77609d2e25ed38ac5c99f0c13f93c48464e |
| RUN |21 NVIDIA_BIGNLP_VERSION= TRT_VER=10.5.0.18 CUDA_VER=12.6 CUDNN_VER=9.5.0.50-1 NCCL_VER= CUBLAS_VER=12.6.3.3-1 NVRTC_VER=12.6.77-1 REINSTALL_APEX=False APEX_COMMIT=73375b3bbcb59a5d6ff43f2fafd00b9ecdbe0417 MCORE_COMMIT=core_r0.10.0 SRC_DIR=/src/tensorrt_llm CPP_BUILD_DIR=/src/tensorrt_llm/cpp/build TARGET_ARCH=x86 INSTALL_VLLM=True ALIGNER_COMMIT=r0.6.0 LAUNCHER_COMMIT=5730fac6f97795931325cab0ac5dce1924cdcb3f DATA_CURATOR_COMMIT=r0.6.0 INSTALL_DATA_CURATOR=True MODELOPT_VERSION=0.19.0 NEMO_COMMIT=r2.1.1 RESIL_COMMIT=97aad77609d2e25ed38ac5c99f0c13f93c48464e /bin/sh -c if [ $TARGET_ARCH = "x86" ]; then pip install --no-cache-dir "git+https://github.com/NVIDIA/nvidia-resiliency-ext.git@${RESIL_COMMIT}"; fi # buildkit |
| RUN |21 NVIDIA_BIGNLP_VERSION= TRT_VER=10.5.0.18 CUDA_VER=12.6 CUDNN_VER=9.5.0.50-1 NCCL_VER= CUBLAS_VER=12.6.3.3-1 NVRTC_VER=12.6.77-1 REINSTALL_APEX=False APEX_COMMIT=73375b3bbcb59a5d6ff43f2fafd00b9ecdbe0417 MCORE_COMMIT=core_r0.10.0 SRC_DIR=/src/tensorrt_llm CPP_BUILD_DIR=/src/tensorrt_llm/cpp/build TARGET_ARCH=x86 INSTALL_VLLM=True ALIGNER_COMMIT=r0.6.0 LAUNCHER_COMMIT=5730fac6f97795931325cab0ac5dce1924cdcb3f DATA_CURATOR_COMMIT=r0.6.0 INSTALL_DATA_CURATOR=True MODELOPT_VERSION=0.19.0 NEMO_COMMIT=r2.1.1 RESIL_COMMIT=97aad77609d2e25ed38ac5c99f0c13f93c48464e /bin/sh -c if [ $INSTALL_VLLM = "True" ]; then /opt/venv/bin/pip install -r /opt/NeMo/requirements/requirements_vllm.txt -r /opt/NeMo/requirements/requirements_infer.txt; fi # buildkit |
| RUN |21 NVIDIA_BIGNLP_VERSION= TRT_VER=10.5.0.18 CUDA_VER=12.6 CUDNN_VER=9.5.0.50-1 NCCL_VER= CUBLAS_VER=12.6.3.3-1 NVRTC_VER=12.6.77-1 REINSTALL_APEX=False APEX_COMMIT=73375b3bbcb59a5d6ff43f2fafd00b9ecdbe0417 MCORE_COMMIT=core_r0.10.0 SRC_DIR=/src/tensorrt_llm CPP_BUILD_DIR=/src/tensorrt_llm/cpp/build TARGET_ARCH=x86 INSTALL_VLLM=True ALIGNER_COMMIT=r0.6.0 LAUNCHER_COMMIT=5730fac6f97795931325cab0ac5dce1924cdcb3f DATA_CURATOR_COMMIT=r0.6.0 INSTALL_DATA_CURATOR=True MODELOPT_VERSION=0.19.0 NEMO_COMMIT=r2.1.1 RESIL_COMMIT=97aad77609d2e25ed38ac5c99f0c13f93c48464e /bin/sh -c if [ $TARGET_ARCH = "arm" ]; then git clone https://gitlab-master.nvidia.com/nccl/nccl.git && cd nccl && git checkout v2.22-oberon && make -j$(nproc) PREFIX=/usr install && rm /usr/lib/aarch64-linux-gnu/libnccl* && mv /usr/lib/libnccl* /usr/lib/aarch64-linux-gnu/ && cd ../ && rm -rf /opt/nccl; fi # buildkit |
| ARG NEMO_RUN_COMMIT=b4e2258f61b88c53b77996b5f9ed871ee666d85f |
| RUN |22 NVIDIA_BIGNLP_VERSION= TRT_VER=10.5.0.18 CUDA_VER=12.6 CUDNN_VER=9.5.0.50-1 NCCL_VER= CUBLAS_VER=12.6.3.3-1 NVRTC_VER=12.6.77-1 REINSTALL_APEX=False APEX_COMMIT=73375b3bbcb59a5d6ff43f2fafd00b9ecdbe0417 MCORE_COMMIT=core_r0.10.0 SRC_DIR=/src/tensorrt_llm CPP_BUILD_DIR=/src/tensorrt_llm/cpp/build TARGET_ARCH=x86 INSTALL_VLLM=True ALIGNER_COMMIT=r0.6.0 LAUNCHER_COMMIT=5730fac6f97795931325cab0ac5dce1924cdcb3f DATA_CURATOR_COMMIT=r0.6.0 INSTALL_DATA_CURATOR=True MODELOPT_VERSION=0.19.0 NEMO_COMMIT=r2.1.1 RESIL_COMMIT=97aad77609d2e25ed38ac5c99f0c13f93c48464e NEMO_RUN_COMMIT=b4e2258f61b88c53b77996b5f9ed871ee666d85f /bin/sh -c git clone https://github.com/NVIDIA/NeMo-Run && cd NeMo-Run && git pull && if [ ! -z $NEMO_RUN_COMMIT ]; then git fetch origin $NEMO_RUN_COMMIT && git checkout FETCH_HEAD; fi && pip install -e . # buildkit |
| RUN |22 NVIDIA_BIGNLP_VERSION= TRT_VER=10.5.0.18 CUDA_VER=12.6 CUDNN_VER=9.5.0.50-1 NCCL_VER= CUBLAS_VER=12.6.3.3-1 NVRTC_VER=12.6.77-1 REINSTALL_APEX=False APEX_COMMIT=73375b3bbcb59a5d6ff43f2fafd00b9ecdbe0417 MCORE_COMMIT=core_r0.10.0 SRC_DIR=/src/tensorrt_llm CPP_BUILD_DIR=/src/tensorrt_llm/cpp/build TARGET_ARCH=x86 INSTALL_VLLM=True ALIGNER_COMMIT=r0.6.0 LAUNCHER_COMMIT=5730fac6f97795931325cab0ac5dce1924cdcb3f DATA_CURATOR_COMMIT=r0.6.0 INSTALL_DATA_CURATOR=True MODELOPT_VERSION=0.19.0 NEMO_COMMIT=r2.1.1 RESIL_COMMIT=97aad77609d2e25ed38ac5c99f0c13f93c48464e NEMO_RUN_COMMIT=b4e2258f61b88c53b77996b5f9ed871ee666d85f /bin/sh -c pip uninstall -y webdataset && pip install --no-cache-dir "webdataset==0.2.86" "protobuf==3.20.3" # buildkit |
| RUN |22 NVIDIA_BIGNLP_VERSION= TRT_VER=10.5.0.18 CUDA_VER=12.6 CUDNN_VER=9.5.0.50-1 NCCL_VER= CUBLAS_VER=12.6.3.3-1 NVRTC_VER=12.6.77-1 REINSTALL_APEX=False APEX_COMMIT=73375b3bbcb59a5d6ff43f2fafd00b9ecdbe0417 MCORE_COMMIT=core_r0.10.0 SRC_DIR=/src/tensorrt_llm CPP_BUILD_DIR=/src/tensorrt_llm/cpp/build TARGET_ARCH=x86 INSTALL_VLLM=True ALIGNER_COMMIT=r0.6.0 LAUNCHER_COMMIT=5730fac6f97795931325cab0ac5dce1924cdcb3f DATA_CURATOR_COMMIT=r0.6.0 INSTALL_DATA_CURATOR=True MODELOPT_VERSION=0.19.0 NEMO_COMMIT=r2.1.1 RESIL_COMMIT=97aad77609d2e25ed38ac5c99f0c13f93c48464e NEMO_RUN_COMMIT=b4e2258f61b88c53b77996b5f9ed871ee666d85f /bin/sh -c pip uninstall -y tornado && pip install --no-cache-dir "tornado==6.4.2" # buildkit |
| RUN |22 NVIDIA_BIGNLP_VERSION= TRT_VER=10.5.0.18 CUDA_VER=12.6 CUDNN_VER=9.5.0.50-1 NCCL_VER= CUBLAS_VER=12.6.3.3-1 NVRTC_VER=12.6.77-1 REINSTALL_APEX=False APEX_COMMIT=73375b3bbcb59a5d6ff43f2fafd00b9ecdbe0417 MCORE_COMMIT=core_r0.10.0 SRC_DIR=/src/tensorrt_llm CPP_BUILD_DIR=/src/tensorrt_llm/cpp/build TARGET_ARCH=x86 INSTALL_VLLM=True ALIGNER_COMMIT=r0.6.0 LAUNCHER_COMMIT=5730fac6f97795931325cab0ac5dce1924cdcb3f DATA_CURATOR_COMMIT=r0.6.0 INSTALL_DATA_CURATOR=True MODELOPT_VERSION=0.19.0 NEMO_COMMIT=r2.1.1 RESIL_COMMIT=97aad77609d2e25ed38ac5c99f0c13f93c48464e NEMO_RUN_COMMIT=b4e2258f61b88c53b77996b5f9ed871ee666d85f /bin/sh -c <<"EOF" python from transformers import AutoTokenizer, Qwen2Tokenizer, CLIPImageProcessor _=AutoTokenizer.from_pretrained('gpt2') _=AutoTokenizer.from_pretrained('bert-base-cased') _=AutoTokenizer.from_pretrained('bert-large-cased') _=AutoTokenizer.from_pretrained('bert-large-uncased') _=AutoTokenizer.from_pretrained('bigcode/starcoder2-tokenizer') _=AutoTokenizer.from_pretrained('THUDM/chatglm2-6b',trust_remote_code=True) _=AutoTokenizer.from_pretrained('THUDM/chatglm3-6b',trust_remote_code=True) _=Qwen2Tokenizer.from_pretrained('qwen/Qwen1.5-7B',trust_remote_code=True) _=Qwen2Tokenizer.from_pretrained('qwen/Qwen1.5-14B',trust_remote_code=True) _=AutoTokenizer.from_pretrained('openai/clip-vit-large-patch14') _=CLIPImageProcessor.from_pretrained('openai/clip-vit-large-patch14') _=CLIPImageProcessor.from_pretrained('openai/clip-vit-large-patch14-336') EOF # buildkit |
| RUN |22 NVIDIA_BIGNLP_VERSION= TRT_VER=10.5.0.18 CUDA_VER=12.6 CUDNN_VER=9.5.0.50-1 NCCL_VER= CUBLAS_VER=12.6.3.3-1 NVRTC_VER=12.6.77-1 REINSTALL_APEX=False APEX_COMMIT=73375b3bbcb59a5d6ff43f2fafd00b9ecdbe0417 MCORE_COMMIT=core_r0.10.0 SRC_DIR=/src/tensorrt_llm CPP_BUILD_DIR=/src/tensorrt_llm/cpp/build TARGET_ARCH=x86 INSTALL_VLLM=True ALIGNER_COMMIT=r0.6.0 LAUNCHER_COMMIT=5730fac6f97795931325cab0ac5dce1924cdcb3f DATA_CURATOR_COMMIT=r0.6.0 INSTALL_DATA_CURATOR=True MODELOPT_VERSION=0.19.0 NEMO_COMMIT=r2.1.1 RESIL_COMMIT=97aad77609d2e25ed38ac5c99f0c13f93c48464e NEMO_RUN_COMMIT=b4e2258f61b88c53b77996b5f9ed871ee666d85f /bin/sh -c URM_API_TOKEN=$(cat /run/secrets/URM_API_TOKEN) && wget --http-user donghyukc --http-password ${URM_API_TOKEN} "https://urm.nvidia.com/artifactory/nemo-fw-generic-local/llama3_70b_tokenizer.tar.gz" && wget --http-user donghyukc --http-password ${URM_API_TOKEN} "https://urm.nvidia.com/artifactory/nemo-fw-generic-local/llama3_8b_tokenizer.tar.gz" && mkdir -p /tmp_assets && tar -xzvf llama3_8b_tokenizer.tar.gz -C /tmp_assets && tar -xzvf llama3_70b_tokenizer.tar.gz -C /tmp_assets && cp -rf /tmp_assets/hub ~/.cache/huggingface && rm -rf /tmp_assets && rm llama3_8b_tokenizer.tar.gz llama3_70b_tokenizer.tar.gz # buildkit |
| RUN |22 NVIDIA_BIGNLP_VERSION= TRT_VER=10.5.0.18 CUDA_VER=12.6 CUDNN_VER=9.5.0.50-1 NCCL_VER= CUBLAS_VER=12.6.3.3-1 NVRTC_VER=12.6.77-1 REINSTALL_APEX=False APEX_COMMIT=73375b3bbcb59a5d6ff43f2fafd00b9ecdbe0417 MCORE_COMMIT=core_r0.10.0 SRC_DIR=/src/tensorrt_llm CPP_BUILD_DIR=/src/tensorrt_llm/cpp/build TARGET_ARCH=x86 INSTALL_VLLM=True ALIGNER_COMMIT=r0.6.0 LAUNCHER_COMMIT=5730fac6f97795931325cab0ac5dce1924cdcb3f DATA_CURATOR_COMMIT=r0.6.0 INSTALL_DATA_CURATOR=True MODELOPT_VERSION=0.19.0 NEMO_COMMIT=r2.1.1 RESIL_COMMIT=97aad77609d2e25ed38ac5c99f0c13f93c48464e NEMO_RUN_COMMIT=b4e2258f61b88c53b77996b5f9ed871ee666d85f /bin/sh -c <<"EOF" python from transformers import AutoTokenizer _=AutoTokenizer.from_pretrained('meta-llama/Meta-Llama-3-8B') _=AutoTokenizer.from_pretrained('meta-llama/Meta-Llama-3-70B') _=AutoTokenizer.from_pretrained('meta-llama/Meta-Llama-3-8B-instruct') _=AutoTokenizer.from_pretrained('meta-llama/Meta-Llama-3-70B-instruct') EOF # buildkit |
| RUN |22 NVIDIA_BIGNLP_VERSION= TRT_VER=10.5.0.18 CUDA_VER=12.6 CUDNN_VER=9.5.0.50-1 NCCL_VER= CUBLAS_VER=12.6.3.3-1 NVRTC_VER=12.6.77-1 REINSTALL_APEX=False APEX_COMMIT=73375b3bbcb59a5d6ff43f2fafd00b9ecdbe0417 MCORE_COMMIT=core_r0.10.0 SRC_DIR=/src/tensorrt_llm CPP_BUILD_DIR=/src/tensorrt_llm/cpp/build TARGET_ARCH=x86 INSTALL_VLLM=True ALIGNER_COMMIT=r0.6.0 LAUNCHER_COMMIT=5730fac6f97795931325cab0ac5dce1924cdcb3f DATA_CURATOR_COMMIT=r0.6.0 INSTALL_DATA_CURATOR=True MODELOPT_VERSION=0.19.0 NEMO_COMMIT=r2.1.1 RESIL_COMMIT=97aad77609d2e25ed38ac5c99f0c13f93c48464e NEMO_RUN_COMMIT=b4e2258f61b88c53b77996b5f9ed871ee666d85f /bin/sh -c sed -i '/^LayerId/s/^/#/' /usr/local/lib/python3.10/dist-packages/cv2/typing/__init__.py # buildkit |
| ARG PRE_COMPILE_QUANT_KERNELS=True |
| RUN |23 NVIDIA_BIGNLP_VERSION= TRT_VER=10.5.0.18 CUDA_VER=12.6 CUDNN_VER=9.5.0.50-1 NCCL_VER= CUBLAS_VER=12.6.3.3-1 NVRTC_VER=12.6.77-1 REINSTALL_APEX=False APEX_COMMIT=73375b3bbcb59a5d6ff43f2fafd00b9ecdbe0417 MCORE_COMMIT=core_r0.10.0 SRC_DIR=/src/tensorrt_llm CPP_BUILD_DIR=/src/tensorrt_llm/cpp/build TARGET_ARCH=x86 INSTALL_VLLM=True ALIGNER_COMMIT=r0.6.0 LAUNCHER_COMMIT=5730fac6f97795931325cab0ac5dce1924cdcb3f DATA_CURATOR_COMMIT=r0.6.0 INSTALL_DATA_CURATOR=True MODELOPT_VERSION=0.19.0 NEMO_COMMIT=r2.1.1 RESIL_COMMIT=97aad77609d2e25ed38ac5c99f0c13f93c48464e NEMO_RUN_COMMIT=b4e2258f61b88c53b77996b5f9ed871ee666d85f PRE_COMPILE_QUANT_KERNELS=True /bin/sh -c if [ $PRE_COMPILE_QUANT_KERNELS = "True" ]; then python -c "import modelopt.torch.quantization.extensions as ext; ext.precompile()"; fi # buildkit |
| WORKDIR /workspace |
| RUN |23 NVIDIA_BIGNLP_VERSION= TRT_VER=10.5.0.18 CUDA_VER=12.6 CUDNN_VER=9.5.0.50-1 NCCL_VER= CUBLAS_VER=12.6.3.3-1 NVRTC_VER=12.6.77-1 REINSTALL_APEX=False APEX_COMMIT=73375b3bbcb59a5d6ff43f2fafd00b9ecdbe0417 MCORE_COMMIT=core_r0.10.0 SRC_DIR=/src/tensorrt_llm CPP_BUILD_DIR=/src/tensorrt_llm/cpp/build TARGET_ARCH=x86 INSTALL_VLLM=True ALIGNER_COMMIT=r0.6.0 LAUNCHER_COMMIT=5730fac6f97795931325cab0ac5dce1924cdcb3f DATA_CURATOR_COMMIT=r0.6.0 INSTALL_DATA_CURATOR=True MODELOPT_VERSION=0.19.0 NEMO_COMMIT=r2.1.1 RESIL_COMMIT=97aad77609d2e25ed38ac5c99f0c13f93c48464e NEMO_RUN_COMMIT=b4e2258f61b88c53b77996b5f9ed871ee666d85f PRE_COMPILE_QUANT_KERNELS=True /bin/sh -c chmod -R a+w /workspace # buildkit |
| ARG NVIDIA_BUILD_ID |
| ENV NVIDIA_BUILD_ID=114410972 |
| LABEL com.nvidia.build.id=114410972 |
| ARG NVIDIA_BUILD_REF |
| LABEL com.nvidia.build.ref= |
Labels
| Key | Value |
|---|---|
| com.nvidia.bignlp.version | |
| com.nvidia.build.id | 114410972 |
| com.nvidia.build.ref | |
| com.nvidia.cublas.version | 12.6.3.3 |
| com.nvidia.cuda.version | 9.0 |
| com.nvidia.cudnn.version | 9.5.0.50 |
| com.nvidia.cufft.version | 11.3.0.4 |
| com.nvidia.curand.version | 10.3.7.77 |
| com.nvidia.cusolver.version | 11.7.1.2 |
| com.nvidia.cusparse.version | 12.5.4.2 |
| com.nvidia.cusparselt.version | 0.6.2.3 |
| com.nvidia.cutensor.version | 2.0.2.5 |
| com.nvidia.nccl.version | 2.22.3 |
| com.nvidia.npp.version | 12.3.1.54 |
| com.nvidia.nsightcompute.version | 2024.3.2.3 |
| com.nvidia.nsightsystems.version | 2024.6.1.90 |
| com.nvidia.nvjpeg.version | 12.3.3.54 |
| com.nvidia.pytorch.version | 2.5.0a0+e000cf0 |
| com.nvidia.tensorrt.version | 10.5.0.18 |
| com.nvidia.tensorrtoss.version | |
| com.nvidia.volumes.needed | nvidia_driver |
| org.opencontainers.image.ref.name | ubuntu |
| org.opencontainers.image.version | 22.04 |