.. |
custom_modeling
|
feat(server): add flash attention llama (#144)
|
2023-04-11 16:38:22 +02:00 |
__init__.py
|
feat(server): support OPT models (#55)
|
2023-04-11 19:16:41 +02:00 |
bloom.py
|
feat(server): optimize decode for sane tokenizers (#170)
|
2023-04-12 12:03:10 +02:00 |
causal_lm.py
|
feat(server): optimize decode for sane tokenizers (#170)
|
2023-04-12 12:03:10 +02:00 |
flash_causal_lm.py
|
feat(server): optimize decode for sane tokenizers (#170)
|
2023-04-12 12:03:10 +02:00 |
flash_llama.py
|
feat(server): add flash attention llama (#144)
|
2023-04-11 16:38:22 +02:00 |
flash_neox.py
|
feat(router): make router input validation optional (#164)
|
2023-04-09 20:22:27 +02:00 |
flash_santacoder.py
|
feat(server): optimize decode for sane tokenizers (#170)
|
2023-04-12 12:03:10 +02:00 |
galactica.py
|
feat(server): support OPT models (#55)
|
2023-04-11 19:16:41 +02:00 |
gpt_neox.py
|
feat(server): support OPT models (#55)
|
2023-04-11 19:16:41 +02:00 |
model.py
|
feat(server): optimize decode for sane tokenizers (#170)
|
2023-04-12 12:03:10 +02:00 |
opt.py
|
feat(server): support OPT models (#55)
|
2023-04-11 19:16:41 +02:00 |
santacoder.py
|
feat(server): optimize decode for sane tokenizers (#170)
|
2023-04-12 12:03:10 +02:00 |
seq2seq_lm.py
|
feat(server): optimize decode for sane tokenizers (#170)
|
2023-04-12 12:03:10 +02:00 |
t5.py
|
feat(server): support OPT models (#55)
|
2023-04-11 19:16:41 +02:00 |
types.py
|
feat(clients): Python client (#103)
|
2023-03-07 18:52:22 +01:00 |