llama_tokenize method
int
llama_tokenize(
- Pointer<
llama_model> model, - Pointer<
Char> text, - int text_len,
- Pointer<
llama_token> tokens, - int n_tokens_max,
- bool add_special,
- bool parse_special,
@details Convert the provided text into tokens. @param tokens The tokens pointer must be large enough to hold the resulting tokens. @return Returns the number of tokens on success, no more than n_tokens_max @return Returns a negative number on failure - the number of tokens that would have been returned @param add_special Allow to add BOS and EOS tokens if model is configured to do so. @param parse_special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated as plaintext. Does not insert a leading space.
Implementation
int llama_tokenize(
ffi.Pointer<llama_model> model,
ffi.Pointer<ffi.Char> text,
int text_len,
ffi.Pointer<llama_token> tokens,
int n_tokens_max,
bool add_special,
bool parse_special,
) {
return _llama_tokenize(
model,
text,
text_len,
tokens,
n_tokens_max,
add_special,
parse_special,
);
}