2024-07-22 10:27:10 -06:00
|
|
|
flash_att_v2_commit_cuda := v2.6.1
|
2024-09-28 09:54:45 -06:00
|
|
|
flash_att_v2_commit_rocm := 2092111b9f975b3347c652ff7fabd431130256c4
|
2023-07-18 08:21:18 -06:00
|
|
|
|
2024-06-04 11:38:46 -06:00
|
|
|
build-flash-attention-v2-cuda:
|
|
|
|
pip install -U packaging wheel
|
|
|
|
pip install flash-attn==$(flash_att_v2_commit_cuda)
|
2023-11-27 06:08:12 -07:00
|
|
|
|
2024-06-04 15:34:03 -06:00
|
|
|
install-flash-attention-v2-cuda: build-flash-attention-v2-cuda
|
|
|
|
echo "Flash v2 installed"
|
2023-11-28 08:28:40 -07:00
|
|
|
|
2024-06-04 11:38:46 -06:00
|
|
|
build-flash-attention-v2-rocm:
|
|
|
|
if [ ! -d 'flash-attention-v2' ]; then \
|
|
|
|
pip install -U packaging ninja --no-cache-dir && \
|
2024-09-28 09:54:45 -06:00
|
|
|
git clone https://github.com/mht-sharma/flash-attention.git flash-attention-v2 && \
|
2024-06-04 11:38:46 -06:00
|
|
|
cd flash-attention-v2 && git fetch && git checkout $(flash_att_v2_commit_rocm) && \
|
|
|
|
git submodule update --init --recursive && GPU_ARCHS="gfx90a;gfx942" PYTORCH_ROCM_ARCH="gfx90a;gfx942" python setup.py build; \
|
|
|
|
fi
|
2023-07-18 08:21:18 -06:00
|
|
|
|
2023-11-28 08:28:40 -07:00
|
|
|
install-flash-attention-v2-rocm: build-flash-attention-v2-rocm
|
2024-06-04 15:34:03 -06:00
|
|
|
cd flash-attention-v2 && \
|
|
|
|
GPU_ARCHS="gfx90a;gfx942" PYTORCH_ROCM_ARCH="gfx90a;gfx942" python setup.py install
|