From 21171223967896b9e941007841ebd1513b3cc4b9 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Thu, 2 May 2024 12:07:09 -0400 Subject: [PATCH] chore: Bump version --- CHANGELOG.md | 10 +++++++++- llama_cpp/__init__.py | 2 +- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c9681f3..9b99550 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,9 +7,17 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.2.69] + +- feat: Update llama.cpp to ggerganov/llama.cpp@6ecf3189e00a1e8e737a78b6d10e1d7006e050a2 +- feat: Add llama-3-vision-alpha chat format by @abetlen in 31b1d95a6c19f5b615a3286069f181a415f872e8 +- fix: Change default verbose value of verbose in image chat format handlers to True to match Llama by @abetlen in 4f01c452b6c738dc56eacac3758119b12c57ea94 +- fix: Suppress all logs when verbose=False, use hardcoded fileno's to work in colab notebooks by @abetlen in f116175a5a7c84569c88cad231855c1e6e59ff6e +- fix: UTF-8 handling with grammars by @jsoma in #1415 + ## [0.2.68] -- feat: Update llama.cpp to ggerganov/llama.cpp@ +- feat: Update llama.cpp to ggerganov/llama.cpp@77e15bec6217a39be59b9cc83d6b9afb6b0d8167 - feat: Add option to enable flash_attn to Lllama params and ModelSettings by @abetlen in 22d77eefd2edaf0148f53374d0cac74d0e25d06e - fix(ci): Fix build-and-release.yaml by @Smartappli in #1413 diff --git a/llama_cpp/__init__.py b/llama_cpp/__init__.py index 63c6225..95c8819 100644 --- a/llama_cpp/__init__.py +++ b/llama_cpp/__init__.py @@ -1,4 +1,4 @@ from .llama_cpp import * from .llama import * -__version__ = "0.2.68" \ No newline at end of file +__version__ = "0.2.69" \ No newline at end of file