From 20ea6fd7d614245d60f1b729cf524b211f991095 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 23 Feb 2024 12:38:36 -0500 Subject: [PATCH] chore: Bump version --- CHANGELOG.md | 14 +++++++++----- llama_cpp/__init__.py | 2 +- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c1ba40f..bb42f61 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,12 +7,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.2.50] + +- docs: Update Functionary OpenAI Server Readme by @jeffrey-fong in #1193 +- fix: LlamaHFTokenizer now receives pre_tokens by @abetlen in 47bad30dd716443652275099fa3851811168ff4a + ## [0.2.49] - fix: module 'llama_cpp.llama_cpp' has no attribute 'c_uint8' in Llama.save_state by @abetlen in db776a885cd4c20811f22f8bd1a27ecc71dba927 - feat: Auto detect Mixtral's slightly different format by @lukestanley in #1214 - ## [0.2.48] - feat: Update llama.cpp to ggerganov/llama.cpp@15499eb94227401bdc8875da6eb85c15d37068f7 @@ -151,7 +155,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - feat: Update llama.cpp to ggerganov/llama.cpp@b3a7c20b5c035250257d2b62851c379b159c899a - feat: Add `saiga` chat format by @femoiseev in #1050 - feat: Added `chatglm3` chat format by @xaviviro in #1059 -- fix: Correct typo in README.md by @qeleb in (#1058) +- fix: Correct typo in README.md by @qeleb in (#1058) ## [0.2.26] @@ -284,7 +288,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [0.2.11] -- Fix bug in `llama_model_params` object has no attribute `logits_all` by @abetlen in d696251fbe40015e8616ea7a7d7ad5257fd1b896 +- Fix bug in `llama_model_params` object has no attribute `logits_all` by @abetlen in d696251fbe40015e8616ea7a7d7ad5257fd1b896 ## [0.2.10] @@ -472,7 +476,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [0.1.60] -NOTE: This release was deleted due to a bug with the packaging system that caused pip installations to fail. +NOTE: This release was deleted due to a bug with the packaging system that caused pip installations to fail. - Truncate max_tokens in create_completion so requested tokens doesn't exceed context size. - Temporarily disable cache for completion requests @@ -496,4 +500,4 @@ NOTE: This release was deleted due to a bug with the packaging system that caus - (misc) Added first version of the changelog - (server) Use async routes - (python-api) Use numpy for internal buffers to reduce memory usage and improve performance. -- (python-api) Performance bug in stop sequence check slowing down streaming. \ No newline at end of file +- (python-api) Performance bug in stop sequence check slowing down streaming. diff --git a/llama_cpp/__init__.py b/llama_cpp/__init__.py index 637f520..fc2efdc 100644 --- a/llama_cpp/__init__.py +++ b/llama_cpp/__init__.py @@ -1,4 +1,4 @@ from .llama_cpp import * from .llama import * -__version__ = "0.2.49" \ No newline at end of file +__version__ = "0.2.50" \ No newline at end of file