From 1883d8ecde5e5c9e5140a399596e48208d1347b8 Mon Sep 17 00:00:00 2001 From: OlivierDehaene Date: Sun, 9 Apr 2023 19:59:16 +0200 Subject: [PATCH] feat(docker): improve flash_attention caching (#160) --- Dockerfile | 6 ++++-- server/Makefile | 21 ++------------------- server/Makefile-flash-att | 10 ++++++++++ server/Makefile-transformers | 10 ++++++++++ 4 files changed, 26 insertions(+), 21 deletions(-) create mode 100644 server/Makefile-flash-att create mode 100644 server/Makefile-transformers diff --git a/Dockerfile b/Dockerfile index 85463af1..9fe0b49b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -56,14 +56,16 @@ WORKDIR /usr/src # Install torch RUN pip install torch --extra-index-url https://download.pytorch.org/whl/cu118 --no-cache-dir -COPY server/Makefile server/Makefile - # Install specific version of flash attention +COPY server/Makefile-flash-att server/Makefile RUN cd server && make install-flash-attention # Install specific version of transformers +COPY server/Makefile-transformers server/Makefile RUN cd server && BUILD_EXTENSIONS="True" make install-transformers +COPY server/Makefile server/Makefile + # Install server COPY proto proto COPY server server diff --git a/server/Makefile b/server/Makefile index d2a8cf7a..d827ceca 100644 --- a/server/Makefile +++ b/server/Makefile @@ -1,5 +1,5 @@ -transformers_commit := 2b57aa18da658e7d2f42ef6bd5b56751af582fef -flash_att_commit := 4d87e4d875077ad9efd25030efa4ab0ba92c19e1 +include Makefile-transformers +include Makefile-flash-att gen-server: # Compile protos @@ -10,23 +10,6 @@ gen-server: find text_generation_server/pb/ -type f -name "*.py" -print0 -exec sed -i -e 's/^\(import.*pb2\)/from . \1/g' {} \; touch text_generation_server/pb/__init__.py -install-transformers: - # Install specific version of transformers with custom cuda kernels - pip uninstall transformers -y || true - rm -rf transformers || true - git clone https://github.com/OlivierDehaene/transformers.git - cd transformers && git checkout $(transformers_commit) - cd transformers && python setup.py install - -install-flash-attention: - # Install specific version of flash attention - pip install packaging - pip uninstall flash_attn rotary_emb dropout_layer_norm -y || true - rm -rf flash-attention || true - git clone https://github.com/HazyResearch/flash-attention.git - cd flash-attention && git checkout $(flash_att_commit) - cd flash-attention && python setup.py install && cd csrc/layer_norm && python setup.py install && cd ../rotary && python setup.py install - install-torch: # Install specific version of torch pip install torch --extra-index-url https://download.pytorch.org/whl/cu118 --no-cache-dir diff --git a/server/Makefile-flash-att b/server/Makefile-flash-att new file mode 100644 index 00000000..297fd9d0 --- /dev/null +++ b/server/Makefile-flash-att @@ -0,0 +1,10 @@ +flash_att_commit := d478eeec8f16c7939c54e4617dbd36f59b8eeed7 + +install-flash-attention: + # Install specific version of flash attention + pip install packaging + pip uninstall flash_attn rotary_emb dropout_layer_norm -y || true + rm -rf flash-attention || true + git clone https://github.com/HazyResearch/flash-attention.git + cd flash-attention && git checkout $(flash_att_commit) + cd flash-attention && python setup.py install && cd csrc/layer_norm && python setup.py install && cd ../rotary && python setup.py install \ No newline at end of file diff --git a/server/Makefile-transformers b/server/Makefile-transformers new file mode 100644 index 00000000..1e081336 --- /dev/null +++ b/server/Makefile-transformers @@ -0,0 +1,10 @@ +transformers_commit := b8d969ff47c6a9d40538a6ea33df021953363afc + +install-transformers: + # Install specific version of transformers with custom cuda kernels + pip install --upgrade setuptools + pip uninstall transformers -y || true + rm -rf transformers || true + git clone https://github.com/OlivierDehaene/transformers.git + cd transformers && git checkout $(transformers_commit) + cd transformers && python setup.py install \ No newline at end of file