Fix example documentation

This commit is contained in:
Andrei Betlen 2023-04-01 17:39:35 -04:00
parent a836639822
commit eef627c09c

View file

@ -127,10 +127,11 @@ class Llama:
]:
"""Generate tokens.
>>> llama = Llama("models/117M")
>>> tokens = llama.tokenize(b"Hello, world!")
>>> for token in llama.generate(tokens, top_k=40, top_p=0.95, temp=1.0, repeat_penalty=1.1):
... print(llama.detokenize([token]))
Examples:
>>> llama = Llama("models/ggml-7b.bin")
>>> tokens = llama.tokenize(b"Hello, world!")
>>> for token in llama.generate(tokens, top_k=40, top_p=0.95, temp=1.0, repeat_penalty=1.1):
... print(llama.detokenize([token]))
Args:
tokens: The prompt tokens.