-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathDockerfile
More file actions
98 lines (81 loc) · 3.28 KB
/
Dockerfile
File metadata and controls
98 lines (81 loc) · 3.28 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
# Based on OpenAI's mujoco-py Dockerfile
# base stage contains just binary dependencies.
# This is used in the CI build.
FROM nvidia/cuda:11.5.1-cudnn8-runtime-ubuntu20.04 AS base
ARG DEBIAN_FRONTEND=noninteractive
RUN apt-get update -q \
&& apt-get install -y --no-install-recommends \
build-essential \
curl \
ffmpeg \
git \
libgl1-mesa-dev \
libgl1-mesa-glx \
libglew-dev \
libosmesa6-dev \
net-tools \
parallel \
python3.8 \
python3.8-dev \
python3-pip \
rsync \
software-properties-common \
tar \
vim \
virtualenv \
wget \
xpra \
xserver-xorg-dev \
patchelf \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
ENV LANG C.UTF-8
#RUN mkdir -p /root/.mujoco \
# && wget https://github.com/deepmind/mujoco/releases/download/2.1.0/mujoco210-linux-x86_64.tar.gz \
# && tar --no-same-owner -xzvf mujoco210-linux-x86_64.tar.gz \
# && mv mujoco210 /root/.mujoco/mujoco210 \
# && rm mujoco210-linux-x86_64.tar.gz
# Set the PATH to the venv before we create the venv, so it's visible in base.
# This is since we may create the venv outside of Docker, e.g. in CI
# or by binding it in for local development.
ENV PATH="/venv/bin:$PATH"
#ENV LD_LIBRARY_PATH /usr/local/nvidia/lib64:/root/.mujoco/mujoco210/bin:${LD_LIBRARY_PATH}
# Run Xdummy mock X server by default so that rendering will work.
COPY ci/xorg.conf /etc/dummy_xorg.conf
COPY ci/Xdummy-entrypoint.py /usr/bin/Xdummy-entrypoint.py
ENTRYPOINT ["/usr/bin/python3", "/usr/bin/Xdummy-entrypoint.py"]
# python-req stage contains Python venv, but not code.
# It is useful for development purposes: you can mount
# code from outside the Docker container.
FROM base as python-req
# Use this bash as default, as aour scripts throw permission denied otherwise.
#SHELL ["/usr/bin/env", "bash"]
WORKDIR /adversarial-policy-defense/
# Copy over just setup.py and dependencies (__init__.py and README.md)
# to avoid rebuilding venv when requirements have not changed.
COPY ./setup.py ./setup.py
COPY ./README.md ./README.md
COPY ./requirements.txt /adversarial-policy-defense/
COPY ci/build_and_activate_venv.sh ./ci/build_and_activate_venv.sh
RUN /usr/bin/env bash ci/build_and_activate_venv.sh /venv \
&& rm -rf $HOME/.cache/pip
# For some reason the option in requirements.txt is not enough, so accept the license manually here.
# ONLY BUILD THIS DOCKERFILE IF YOU OWN THE RESPECTIVE ROMS / onw a license to use them.
CMD AutoROM --accept-license
# Installing our modification to ray. The wheel is already built with the requirements.txt. Changes to RLLib are possible without
# requiring a build and compile of ray.
COPY ci/install_custom_ray.sh ./ci/install_custom_ray.sh
RUN /usr/bin/env bash ci/install_custom_ray.sh
# full stage contains everything.
# Can be used for deployment and local testing.
FROM python-req as full
# Delay copying (and installing) the code until the very end
COPY . /adversarial-policy-defense
# Build a wheel then install to avoid copying whole directory (pip issue #2195)
RUN python3 setup.py sdist bdist_wheel
RUN pip install --upgrade dist/aprl_defense-*.whl
# So the entrypoint has the same workdir as when running from commandline without docker
WORKDIR /adversarial-policy-defense/src/
CMD cd src/
# Default entrypoints
CMD echo "Hello World"