hf_text-generation-inference/integration-tests/models/test_bloom_560m_sharded.py

51 lines
1.4 KiB
Python
Raw Normal View History

2023-05-15 15:36:30 -06:00
import pytest
2024-06-11 07:40:35 -06:00
from testing_utils import require_backend_async
@pytest.fixture(scope="module")
def bloom_560m_sharded_handle(launcher):
with launcher("bigscience/bloom-560m", num_shard=2) as handle:
yield handle
2023-05-15 15:36:30 -06:00
@pytest.fixture(scope="module")
async def bloom_560m_sharded(bloom_560m_sharded_handle):
2023-05-31 02:55:59 -06:00
await bloom_560m_sharded_handle.health(240)
return bloom_560m_sharded_handle.client
2023-05-15 15:36:30 -06:00
@pytest.mark.release
2023-05-15 15:36:30 -06:00
@pytest.mark.asyncio
2024-06-11 07:40:35 -06:00
@require_backend_async("cuda")
async def test_bloom_560m_sharded(bloom_560m_sharded, response_snapshot):
2024-06-11 07:40:35 -06:00
# The generated text is different on MI300X, and for what it is worth also different on H100.
2023-05-15 15:36:30 -06:00
response = await bloom_560m_sharded.generate(
"Pour déguster un ortolan, il faut tout d'abord",
max_new_tokens=10,
top_p=0.9,
decoder_input_details=True,
2023-05-15 15:36:30 -06:00
seed=0,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
2023-05-15 15:36:30 -06:00
@pytest.mark.release
2023-05-15 15:36:30 -06:00
@pytest.mark.asyncio
async def test_bloom_560m_sharded_load(
bloom_560m_sharded, generate_load, response_snapshot
2023-05-15 15:36:30 -06:00
):
responses = await generate_load(
bloom_560m_sharded,
"Pour déguster un ortolan, il faut tout d'abord",
max_new_tokens=10,
n=4,
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
2023-05-15 15:36:30 -06:00
assert responses == response_snapshot