From b3e358dee41b1bbeec856354ab58f2bc8c7003d8 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Mon, 26 Feb 2024 11:58:33 -0500 Subject: [PATCH] docs: Add example of local image loading to README --- README.md | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/README.md b/README.md index a3f82e4..e3d7643 100644 --- a/README.md +++ b/README.md @@ -468,6 +468,38 @@ Then you'll need to use a custom chat handler to load the clip model and process ) ``` +
+Loading a Local Image + +Images can be passed as base64 encoded data URIs. The following example demonstrates how to do this. + +```python +import base64 + +def image_to_base64_data_uri(file_path): + with open(file_path, "rb") as img_file: + base64_data = base64.b64encode(img_file.read()).decode('utf-8') + return f"data:image/png;base64,{base64_data}" + +# Replace 'file_path.png' with the actual path to your PNG file +file_path = 'file_path.png' +data_uri = image_to_base64_data_uri(file_path) + +messages = [ + {"role": "system", "content": "You are an assistant who perfectly describes images."}, + { + "role": "user", + "content": [ + {"type": "image_url", "image_url": {"url": data_uri }}, + {"type" : "text", "text": "Describe this image in detail please."} + ] + } +] + +``` + +
+ ### Speculative Decoding `llama-cpp-python` supports speculative decoding which allows the model to generate completions based on a draft model.