2022-12-08 10:49:33 -07:00
|
|
|
import pytest
|
|
|
|
import torch
|
|
|
|
|
|
|
|
from copy import copy
|
2023-01-20 04:24:39 -07:00
|
|
|
from transformers import AutoTokenizer
|
2022-12-08 10:49:33 -07:00
|
|
|
|
2023-03-07 10:52:22 -07:00
|
|
|
from text_generation_server.pb import generate_pb2
|
|
|
|
from text_generation_server.models.causal_lm import CausalLM, CausalLMBatch
|
2022-12-08 10:49:33 -07:00
|
|
|
|
|
|
|
|
2023-01-20 04:24:39 -07:00
|
|
|
@pytest.fixture(scope="session")
|
|
|
|
def default_causal_lm():
|
2024-07-05 02:29:56 -06:00
|
|
|
return CausalLM.fallback("gpt2")
|
2023-01-20 04:24:39 -07:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture(scope="session")
|
|
|
|
def gpt2_tokenizer():
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("gpt2", padding_side="left")
|
|
|
|
tokenizer.pad_token_id = 50256
|
|
|
|
return tokenizer
|
|
|
|
|
|
|
|
|
2022-12-08 10:49:33 -07:00
|
|
|
@pytest.fixture
|
2022-12-12 10:25:22 -07:00
|
|
|
def default_pb_request(default_pb_parameters, default_pb_stop_parameters):
|
2022-12-08 10:49:33 -07:00
|
|
|
return generate_pb2.Request(
|
|
|
|
id=0,
|
|
|
|
inputs="Test",
|
2024-05-31 05:51:42 -06:00
|
|
|
input_chunks=generate_pb2.Input(chunks=[generate_pb2.InputChunk(text="Test")]),
|
2023-06-02 09:12:30 -06:00
|
|
|
prefill_logprobs=True,
|
2023-04-09 12:22:27 -06:00
|
|
|
truncate=100,
|
2022-12-08 10:49:33 -07:00
|
|
|
parameters=default_pb_parameters,
|
2022-12-12 10:25:22 -07:00
|
|
|
stopping_parameters=default_pb_stop_parameters,
|
2022-12-08 10:49:33 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def default_pb_batch(default_pb_request):
|
|
|
|
return generate_pb2.Batch(id=0, requests=[default_pb_request], size=1)
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def default_causal_lm_batch(default_pb_batch, gpt2_tokenizer):
|
2023-05-26 04:30:27 -06:00
|
|
|
return CausalLMBatch.from_pb(
|
|
|
|
default_pb_batch, gpt2_tokenizer, torch.float32, torch.device("cpu")
|
|
|
|
)
|
2022-12-08 10:49:33 -07:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def default_multi_requests_causal_lm_batch(default_pb_request, gpt2_tokenizer):
|
|
|
|
req_0 = copy(default_pb_request)
|
2023-04-20 03:07:40 -06:00
|
|
|
req_0.id = 1
|
2022-12-08 10:49:33 -07:00
|
|
|
req_1 = default_pb_request
|
2023-04-20 03:07:40 -06:00
|
|
|
req_1.id = 2
|
2022-12-12 10:25:22 -07:00
|
|
|
req_1.stopping_parameters.max_new_tokens = 5
|
2022-12-08 10:49:33 -07:00
|
|
|
|
2023-04-20 03:07:40 -06:00
|
|
|
batch_pb = generate_pb2.Batch(id=1, requests=[req_0, req_1], size=2)
|
2023-05-26 04:30:27 -06:00
|
|
|
return CausalLMBatch.from_pb(
|
|
|
|
batch_pb, gpt2_tokenizer, torch.float32, torch.device("cpu")
|
|
|
|
)
|
2022-12-08 10:49:33 -07:00
|
|
|
|
|
|
|
|
|
|
|
def test_batch_from_pb(default_pb_batch, default_causal_lm_batch):
|
|
|
|
batch = default_causal_lm_batch
|
|
|
|
|
|
|
|
assert batch.batch_id == default_pb_batch.id
|
|
|
|
assert batch.requests == default_pb_batch.requests
|
|
|
|
|
|
|
|
assert len(batch.input_ids) == default_pb_batch.size
|
|
|
|
assert batch.input_ids[0][-1] == 14402
|
|
|
|
assert torch.all(batch.input_ids[0][:-1] == 50256)
|
|
|
|
|
2023-02-24 04:49:21 -07:00
|
|
|
assert batch.attention_mask[0, 0] == 1
|
|
|
|
assert torch.all(batch.attention_mask[0, 1:] == 0)
|
2022-12-08 10:49:33 -07:00
|
|
|
|
|
|
|
assert batch.past_key_values is None
|
|
|
|
|
2023-04-20 03:07:40 -06:00
|
|
|
assert all(
|
|
|
|
[
|
|
|
|
torch.equal(input_ids, all_input_ids[:, 0])
|
|
|
|
for input_ids, all_input_ids in zip(batch.input_ids, batch.all_input_ids)
|
|
|
|
]
|
|
|
|
)
|
2022-12-08 10:49:33 -07:00
|
|
|
|
|
|
|
assert batch.input_lengths == [1]
|
|
|
|
|
2023-04-20 03:07:40 -06:00
|
|
|
assert len(batch) == default_pb_batch.size
|
|
|
|
assert len(batch.next_token_choosers) == len(batch.stopping_criterias) == len(batch)
|
2022-12-08 10:49:33 -07:00
|
|
|
|
2023-03-16 05:12:26 -06:00
|
|
|
assert batch.max_input_length == batch.input_lengths[0]
|
2022-12-08 10:49:33 -07:00
|
|
|
|
|
|
|
|
|
|
|
def test_batch_concatenate_no_prefill(default_causal_lm_batch):
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
CausalLMBatch.concatenate([default_causal_lm_batch, default_causal_lm_batch])
|
|
|
|
|
|
|
|
|
|
|
|
def test_causal_lm_batch_type(default_causal_lm):
|
|
|
|
assert default_causal_lm.batch_type == CausalLMBatch
|
|
|
|
|
|
|
|
|
|
|
|
def test_causal_lm_generate_token(default_causal_lm, default_causal_lm_batch):
|
2022-12-12 10:25:22 -07:00
|
|
|
sequence_length = len(default_causal_lm_batch.all_input_ids[0])
|
2023-12-14 07:59:38 -07:00
|
|
|
generations, next_batch, _ = default_causal_lm.generate_token(
|
|
|
|
default_causal_lm_batch
|
|
|
|
)
|
2022-12-08 10:49:33 -07:00
|
|
|
|
2023-01-31 09:04:00 -07:00
|
|
|
assert len(generations) == len(next_batch)
|
2022-12-08 10:49:33 -07:00
|
|
|
assert isinstance(next_batch, CausalLMBatch)
|
|
|
|
|
2023-04-20 03:07:40 -06:00
|
|
|
assert len(next_batch.all_input_ids) == len(next_batch)
|
2023-02-24 04:49:21 -07:00
|
|
|
assert len(next_batch.all_input_ids[0]) == sequence_length + 1
|
|
|
|
assert len(next_batch.attention_mask[0]) == 11
|
2022-12-12 10:25:22 -07:00
|
|
|
assert next_batch.all_input_ids[0][-1] == 13
|
2022-12-08 10:49:33 -07:00
|
|
|
assert next_batch.all_input_ids[0][-2] == 14402
|
|
|
|
assert torch.all(next_batch.all_input_ids[0][:-2] == 50256)
|
|
|
|
|
2023-02-24 04:49:21 -07:00
|
|
|
assert torch.all(next_batch.attention_mask[0][0:2] == 1)
|
|
|
|
assert torch.all(next_batch.attention_mask[0][2:] == 0)
|
2022-12-08 10:49:33 -07:00
|
|
|
|
2023-04-20 03:07:40 -06:00
|
|
|
assert next_batch.input_ids.shape == (len(next_batch), 1)
|
2022-12-12 10:25:22 -07:00
|
|
|
assert next_batch.input_ids[0, 0] == 13
|
2022-12-08 10:49:33 -07:00
|
|
|
|
|
|
|
assert next_batch.input_lengths == [2]
|
2023-03-16 05:12:26 -06:00
|
|
|
assert next_batch.max_input_length == next_batch.input_lengths[0]
|
2022-12-08 10:49:33 -07:00
|
|
|
|
|
|
|
assert next_batch.past_key_values is not None
|
2022-12-12 10:25:22 -07:00
|
|
|
assert all(
|
|
|
|
[p[0].shape == (1, 12, sequence_length, 64) for p in next_batch.past_key_values]
|
|
|
|
)
|
|
|
|
assert all(
|
|
|
|
[p[1].shape == (1, 12, sequence_length, 64) for p in next_batch.past_key_values]
|
|
|
|
)
|
2023-01-31 09:04:00 -07:00
|
|
|
assert all([generation.generated_text is None for generation in generations])
|
|
|
|
assert all([len(generation.prefill_tokens) == 1 for generation in generations])
|
2023-12-11 06:49:52 -07:00
|
|
|
assert all(
|
|
|
|
[
|
|
|
|
token_id.item() == 13
|
|
|
|
for generation in generations
|
|
|
|
for token_id in generation.tokens.token_ids
|
|
|
|
]
|
|
|
|
)
|
|
|
|
assert all(
|
|
|
|
[
|
|
|
|
token_text == "."
|
|
|
|
for generation in generations
|
|
|
|
for token_text in generation.tokens.texts
|
|
|
|
]
|
|
|
|
)
|
2023-01-31 09:04:00 -07:00
|
|
|
assert generations[0].request_id == 0
|
2022-12-08 10:49:33 -07:00
|
|
|
|
|
|
|
|
|
|
|
def test_causal_lm_generate_token_completion(
|
|
|
|
default_causal_lm, default_causal_lm_batch
|
|
|
|
):
|
|
|
|
next_batch = default_causal_lm_batch
|
|
|
|
for _ in range(default_causal_lm_batch.stopping_criterias[0].max_new_tokens - 1):
|
2023-12-14 07:59:38 -07:00
|
|
|
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
|
2023-01-31 09:04:00 -07:00
|
|
|
assert len(generations) == len(next_batch)
|
2022-12-08 10:49:33 -07:00
|
|
|
|
2023-12-14 07:59:38 -07:00
|
|
|
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
|
2022-12-08 10:49:33 -07:00
|
|
|
assert next_batch is None
|
|
|
|
|
2023-01-31 09:04:00 -07:00
|
|
|
assert len(generations) == 1
|
2023-02-02 07:02:04 -07:00
|
|
|
assert generations[0].generated_text.text == ".java:784) at net.minecraft."
|
2023-01-31 09:04:00 -07:00
|
|
|
assert generations[0].request_id == default_causal_lm_batch.requests[0].id
|
2022-12-08 10:49:33 -07:00
|
|
|
assert (
|
2023-01-31 09:04:00 -07:00
|
|
|
generations[0].generated_text.generated_tokens
|
2022-12-08 10:49:33 -07:00
|
|
|
== default_causal_lm_batch.stopping_criterias[0].max_new_tokens
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def test_causal_lm_generate_token_completion_multi(
|
|
|
|
default_causal_lm, default_multi_requests_causal_lm_batch
|
|
|
|
):
|
|
|
|
next_batch = default_multi_requests_causal_lm_batch
|
|
|
|
|
|
|
|
for i in range(
|
|
|
|
default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens - 1
|
|
|
|
):
|
2023-12-14 07:59:38 -07:00
|
|
|
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
|
2023-01-31 09:04:00 -07:00
|
|
|
assert len(generations) == len(next_batch)
|
2022-12-08 10:49:33 -07:00
|
|
|
|
2023-12-14 07:59:38 -07:00
|
|
|
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
|
2022-12-08 10:49:33 -07:00
|
|
|
assert next_batch is not None
|
|
|
|
|
2023-01-31 09:04:00 -07:00
|
|
|
assert len(generations) == 2
|
2023-02-02 07:02:04 -07:00
|
|
|
assert generations[1].generated_text.text == ".java:784)"
|
2022-12-08 10:49:33 -07:00
|
|
|
assert (
|
2023-01-31 09:04:00 -07:00
|
|
|
generations[1].request_id
|
|
|
|
== default_multi_requests_causal_lm_batch.requests[1].id
|
2022-12-08 10:49:33 -07:00
|
|
|
)
|
|
|
|
assert (
|
2023-01-31 09:04:00 -07:00
|
|
|
generations[1].generated_text.generated_tokens
|
2022-12-08 10:49:33 -07:00
|
|
|
== default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens
|
|
|
|
)
|
2023-04-24 06:15:42 -06:00
|
|
|
# Copy stopping_criterias before filtering
|
2023-04-24 09:59:00 -06:00
|
|
|
stopping_criterias = (
|
|
|
|
default_multi_requests_causal_lm_batch.stopping_criterias.copy()
|
|
|
|
)
|
2022-12-08 10:49:33 -07:00
|
|
|
|
2023-05-24 11:19:57 -06:00
|
|
|
next_batch = next_batch.filter([next_batch.requests[0].id])
|
2023-04-20 03:07:40 -06:00
|
|
|
|
2022-12-08 10:49:33 -07:00
|
|
|
for _ in range(
|
2023-04-24 09:59:00 -06:00
|
|
|
stopping_criterias[0].max_new_tokens - stopping_criterias[1].max_new_tokens - 1
|
2022-12-08 10:49:33 -07:00
|
|
|
):
|
2023-12-14 07:59:38 -07:00
|
|
|
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
|
2023-01-31 09:04:00 -07:00
|
|
|
assert len(generations) == len(next_batch)
|
2022-12-08 10:49:33 -07:00
|
|
|
|
2023-12-14 07:59:38 -07:00
|
|
|
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
|
2022-12-08 10:49:33 -07:00
|
|
|
assert next_batch is None
|
|
|
|
|
2023-01-31 09:04:00 -07:00
|
|
|
assert len(generations) == 1
|
2023-02-02 07:02:04 -07:00
|
|
|
assert generations[0].generated_text.text == ".java:784) at net.minecraft."
|
2022-12-08 10:49:33 -07:00
|
|
|
assert (
|
2023-01-31 09:04:00 -07:00
|
|
|
generations[0].request_id
|
|
|
|
== default_multi_requests_causal_lm_batch.requests[0].id
|
2022-12-08 10:49:33 -07:00
|
|
|
)
|
|
|
|
assert (
|
2023-01-31 09:04:00 -07:00
|
|
|
generations[0].generated_text.generated_tokens
|
2022-12-08 10:49:33 -07:00
|
|
|
== default_multi_requests_causal_lm_batch.stopping_criterias[0].max_new_tokens
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def test_batch_concatenate(
|
|
|
|
default_causal_lm, default_causal_lm_batch, default_multi_requests_causal_lm_batch
|
|
|
|
):
|
|
|
|
next_batch_0 = default_causal_lm_batch
|
2023-12-14 07:59:38 -07:00
|
|
|
_, next_batch_0, _ = default_causal_lm.generate_token(next_batch_0)
|
|
|
|
_, next_batch_0, _ = default_causal_lm.generate_token(next_batch_0)
|
2022-12-08 10:49:33 -07:00
|
|
|
|
|
|
|
next_batch_1 = default_multi_requests_causal_lm_batch
|
2023-12-14 07:59:38 -07:00
|
|
|
_, next_batch_1, _ = default_causal_lm.generate_token(next_batch_1)
|
2022-12-08 10:49:33 -07:00
|
|
|
|
2023-04-24 06:15:42 -06:00
|
|
|
# Clone past_key_values before concatenating to compare after,
|
|
|
|
# because they are removed from the concatenated batches
|
|
|
|
next_batch_0_past_key_values = [
|
|
|
|
(k.clone(), v.clone()) for (k, v) in next_batch_0.past_key_values
|
|
|
|
]
|
|
|
|
next_batch_1_past_key_values = [
|
|
|
|
(k.clone(), v.clone()) for (k, v) in next_batch_1.past_key_values
|
|
|
|
]
|
|
|
|
|
2022-12-08 10:49:33 -07:00
|
|
|
next_batch = CausalLMBatch.concatenate([next_batch_0, next_batch_1])
|
|
|
|
|
|
|
|
assert torch.equal(next_batch.all_input_ids[0], next_batch_0.all_input_ids[0])
|
|
|
|
assert torch.equal(next_batch.all_input_ids[1], next_batch_1.all_input_ids[0])
|
|
|
|
assert torch.equal(next_batch.all_input_ids[2], next_batch_1.all_input_ids[1])
|
|
|
|
|
2023-02-24 04:49:21 -07:00
|
|
|
assert torch.all(
|
|
|
|
next_batch.attention_mask[0, : -next_batch.padding_right_offset] == 1
|
|
|
|
)
|
|
|
|
assert torch.all(
|
|
|
|
next_batch.attention_mask[1:, 1 : -next_batch.padding_right_offset] == 1
|
|
|
|
)
|
|
|
|
assert torch.all(next_batch.attention_mask[1:, 3:] == 0)
|
2022-12-08 10:49:33 -07:00
|
|
|
|
|
|
|
assert next_batch.batch_id == 0
|
2022-12-12 10:25:22 -07:00
|
|
|
assert next_batch.input_ids[0, 0] == 12355
|
|
|
|
assert torch.all(next_batch.input_ids[1:] == 13)
|
2022-12-08 10:49:33 -07:00
|
|
|
|
|
|
|
assert next_batch.input_lengths == [3, 2, 2]
|
2023-03-16 05:12:26 -06:00
|
|
|
assert next_batch.max_input_length == 3
|
2022-12-08 10:49:33 -07:00
|
|
|
|
|
|
|
assert next_batch.requests[0] == next_batch_0.requests[0]
|
|
|
|
assert next_batch.requests[1:] == next_batch_1.requests
|
|
|
|
|
|
|
|
assert next_batch.next_token_choosers[0] == next_batch_0.next_token_choosers[0]
|
|
|
|
assert next_batch.next_token_choosers[1:] == next_batch_1.next_token_choosers
|
|
|
|
|
|
|
|
assert next_batch.stopping_criterias[0] == next_batch_0.stopping_criterias[0]
|
|
|
|
assert next_batch.stopping_criterias[1:] == next_batch_1.stopping_criterias
|
|
|
|
|
|
|
|
assert next_batch.past_key_values is not None
|
|
|
|
assert all([p[0].shape == (3, 12, 2, 64) for p in next_batch.past_key_values])
|
|
|
|
assert all([p[1].shape == (3, 12, 2, 64) for p in next_batch.past_key_values])
|
|
|
|
|
|
|
|
for i, past in enumerate(next_batch.past_key_values):
|
2023-04-24 06:15:42 -06:00
|
|
|
assert torch.equal(next_batch_0_past_key_values[i][0][0, :, -2:], past[0][0])
|
2022-12-08 10:49:33 -07:00
|
|
|
assert torch.equal(
|
2023-04-24 06:15:42 -06:00
|
|
|
next_batch_1_past_key_values[i][0][:, :, -1:], past[0][1:, :, -1:, :]
|
2022-12-08 10:49:33 -07:00
|
|
|
)
|
|
|
|
|
2023-04-24 06:15:42 -06:00
|
|
|
assert torch.equal(next_batch_0_past_key_values[i][1][0, :, -2:], past[1][0])
|
2022-12-08 10:49:33 -07:00
|
|
|
assert torch.equal(
|
2023-04-24 06:15:42 -06:00
|
|
|
next_batch_1_past_key_values[i][1][:, :, -1:], past[1][1:, :, -1:, :]
|
2022-12-08 10:49:33 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
for _ in range(
|
|
|
|
default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens - 2
|
|
|
|
):
|
2023-12-14 07:59:38 -07:00
|
|
|
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
|
2023-01-31 09:04:00 -07:00
|
|
|
assert len(generations) == len(next_batch)
|
2022-12-08 10:49:33 -07:00
|
|
|
|
2023-12-14 07:59:38 -07:00
|
|
|
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
|
2022-12-08 10:49:33 -07:00
|
|
|
assert next_batch is not None
|
|
|
|
|
2023-01-31 09:04:00 -07:00
|
|
|
assert len(generations) == 3
|
2023-02-02 07:02:04 -07:00
|
|
|
assert generations[2].generated_text.text == ".java:784)"
|
2022-12-08 10:49:33 -07:00
|
|
|
assert (
|
2023-01-31 09:04:00 -07:00
|
|
|
generations[2].request_id
|
|
|
|
== default_multi_requests_causal_lm_batch.requests[1].id
|
2022-12-08 10:49:33 -07:00
|
|
|
)
|
|
|
|
assert (
|
2023-01-31 09:04:00 -07:00
|
|
|
generations[2].generated_text.generated_tokens
|
2022-12-08 10:49:33 -07:00
|
|
|
== default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens
|
|
|
|
)
|
|
|
|
|
2023-05-26 04:30:27 -06:00
|
|
|
next_batch = next_batch.filter(
|
|
|
|
[next_batch.requests[0].id, next_batch.requests[1].id]
|
|
|
|
)
|
2023-04-20 03:07:40 -06:00
|
|
|
|
2022-12-08 10:49:33 -07:00
|
|
|
for _ in range(
|
|
|
|
default_causal_lm_batch.stopping_criterias[0].max_new_tokens
|
|
|
|
- default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens
|
|
|
|
- 2
|
|
|
|
):
|
2023-12-14 07:59:38 -07:00
|
|
|
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
|
2023-01-31 09:04:00 -07:00
|
|
|
assert len(generations) == len(next_batch)
|
2022-12-08 10:49:33 -07:00
|
|
|
|
2023-12-14 07:59:38 -07:00
|
|
|
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
|
2022-12-08 10:49:33 -07:00
|
|
|
assert next_batch is not None
|
|
|
|
|
2023-01-31 09:04:00 -07:00
|
|
|
assert len(generations) == 2
|
2023-02-02 07:02:04 -07:00
|
|
|
assert generations[0].generated_text.text == ".java:784) at net.minecraft."
|
2023-01-31 09:04:00 -07:00
|
|
|
assert generations[0].request_id == default_causal_lm_batch.requests[0].id
|
2022-12-08 10:49:33 -07:00
|
|
|
assert (
|
2023-01-31 09:04:00 -07:00
|
|
|
generations[0].generated_text.generated_tokens
|
2022-12-08 10:49:33 -07:00
|
|
|
== default_causal_lm_batch.stopping_criterias[0].max_new_tokens
|
|
|
|
)
|
|
|
|
|
2023-05-24 11:19:57 -06:00
|
|
|
next_batch = next_batch.filter([next_batch.requests[1].id])
|
2023-04-20 03:07:40 -06:00
|
|
|
|
2022-12-08 10:49:33 -07:00
|
|
|
for _ in range(
|
|
|
|
default_multi_requests_causal_lm_batch.stopping_criterias[0].max_new_tokens
|
|
|
|
- default_causal_lm_batch.stopping_criterias[0].max_new_tokens
|
|
|
|
- default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens
|
|
|
|
- 4
|
|
|
|
):
|
2023-12-14 07:59:38 -07:00
|
|
|
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
|
2023-01-31 09:04:00 -07:00
|
|
|
assert len(generations) == len(next_batch)
|
2022-12-08 10:49:33 -07:00
|
|
|
|
2023-12-14 07:59:38 -07:00
|
|
|
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
|
2022-12-08 10:49:33 -07:00
|
|
|
assert next_batch is None
|
|
|
|
|
2023-01-31 09:04:00 -07:00
|
|
|
assert len(generations) == 1
|
2023-02-02 07:02:04 -07:00
|
|
|
assert generations[0].generated_text.text == ".java:784) at net.minecraft."
|
2022-12-08 10:49:33 -07:00
|
|
|
assert (
|
2023-01-31 09:04:00 -07:00
|
|
|
generations[0].request_id
|
|
|
|
== default_multi_requests_causal_lm_batch.requests[0].id
|
2022-12-08 10:49:33 -07:00
|
|
|
)
|
|
|
|
assert (
|
2023-01-31 09:04:00 -07:00
|
|
|
generations[0].generated_text.generated_tokens
|
2022-12-08 10:49:33 -07:00
|
|
|
== default_multi_requests_causal_lm_batch.stopping_criterias[0].max_new_tokens
|
|
|
|
)
|