From 34081ddc5bbe2fe667ab201abcdbda64c5caa49c Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Wed, 3 Apr 2024 15:38:27 -0400 Subject: [PATCH] chore: Bump version --- CHANGELOG.md | 7 +++++++ llama_cpp/__init__.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8786c01..fc4e29b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.2.59] + +- feat: Update llama.cpp to ggerganov/llama.cpp@ba0c7c70ab5b15f1f2be7fb0dfbe0366dda30d6c +- feat: Binary wheels for CPU, CUDA (12.1 - 12.3), Metal by @abetlen, @jllllll, and @oobabooga in #1247 +- fix: segfault when logits_all=False by @abetlen in 8649d7671bd1a7c0d9cc6a5ad91c6ca286512ab3 +- fix: last tokens passing to sample_repetition_penalties function by @ymikhailov in #1295 + ## [0.2.58] - feat: Update llama.cpp to ggerganov/llama.cpp@ba0c7c70ab5b15f1f2be7fb0dfbe0366dda30d6c diff --git a/llama_cpp/__init__.py b/llama_cpp/__init__.py index e246595..2f5219c 100644 --- a/llama_cpp/__init__.py +++ b/llama_cpp/__init__.py @@ -1,4 +1,4 @@ from .llama_cpp import * from .llama import * -__version__ = "0.2.58" \ No newline at end of file +__version__ = "0.2.59" \ No newline at end of file