Merge pull request #108 from eiery/main
Update n_batch default to 512 to match upstream llama.cpp
This commit is contained in:
commit
f37456133a
1 changed files with 1 additions and 1 deletions
|
@ -37,7 +37,7 @@ class Llama:
|
|||
use_mlock: bool = False,
|
||||
embedding: bool = False,
|
||||
n_threads: Optional[int] = None,
|
||||
n_batch: int = 8,
|
||||
n_batch: int = 512,
|
||||
last_n_tokens_size: int = 64,
|
||||
lora_base: Optional[str] = None,
|
||||
lora_path: Optional[str] = None,
|
||||
|
|
Loading…
Reference in a new issue