docs: Add Llama class example
This commit is contained in:
parent
c5173b0fb3
commit
b6bb7ac76a
1 changed files with 24 additions and 0 deletions
|
@ -766,6 +766,30 @@ class Llama:
|
|||
):
|
||||
"""Load a llama.cpp model from `model_path`.
|
||||
|
||||
Examples:
|
||||
Basic usage
|
||||
|
||||
>>> import llama_cpp
|
||||
>>> model = llama_cpp.Llama(
|
||||
... model_path="path/to/model",
|
||||
... )
|
||||
>>> print(model("The quick brown fox jumps ", stop=["."])["choices"][0]["text"])
|
||||
the lazy dog
|
||||
|
||||
Loading a chat model
|
||||
|
||||
>>> import llama_cpp
|
||||
>>> model = llama_cpp.Llama(
|
||||
... model_path="path/to/model",
|
||||
... chat_format="llama-2",
|
||||
... )
|
||||
>>> print(model.create_chat_completion(
|
||||
... messages=[{
|
||||
... "role": "user",
|
||||
... "content": "what is the meaning of life?"
|
||||
... }]
|
||||
... ))
|
||||
|
||||
Args:
|
||||
model_path: Path to the model.
|
||||
n_gpu_layers: Number of layers to offload to GPU (-ngl). If -1, all layers are offloaded.
|
||||
|
|
Loading…
Reference in a new issue