Merge pull request #108 from eiery/main

Update n_batch default to 512 to match upstream llama.cpp
This commit is contained in:
Andrei 2023-04-24 13:48:09 -04:00 committed by GitHub
commit f37456133a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -37,7 +37,7 @@ class Llama:
use_mlock: bool = False, use_mlock: bool = False,
embedding: bool = False, embedding: bool = False,
n_threads: Optional[int] = None, n_threads: Optional[int] = None,
n_batch: int = 8, n_batch: int = 512,
last_n_tokens_size: int = 64, last_n_tokens_size: int = 64,
lora_base: Optional[str] = None, lora_base: Optional[str] = None,
lora_path: Optional[str] = None, lora_path: Optional[str] = None,