From 6bbeea07ae49b16cf1cad7a6f2f5fec370c22d01 Mon Sep 17 00:00:00 2001 From: zocainViken <75504411+zocainViken@users.noreply.github.com> Date: Tue, 12 Dec 2023 02:41:38 +0100 Subject: [PATCH] README.md multimodal params fix (#967) multi modal params fix: add logits = True -> to make llava work --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 560ca27..0aacdf9 100644 --- a/README.md +++ b/README.md @@ -266,7 +266,8 @@ Then you'll need to use a custom chat handler to load the clip model and process >>> llm = Llama( model_path="./path/to/llava/llama-model.gguf", chat_handler=chat_handler, - n_ctx=2048 # n_ctx should be increased to accomodate the image embedding + n_ctx=2048, # n_ctx should be increased to accomodate the image embedding + logits_all=True,# needed to make llava work ) >>> llm.create_chat_completion( messages = [