diff --git a/llama_cpp/llama_chat_format.py b/llama_cpp/llama_chat_format.py
index 6f402e0..0ef7bd4 100644
--- a/llama_cpp/llama_chat_format.py
+++ b/llama_cpp/llama_chat_format.py
@@ -734,6 +734,28 @@ def format_openchat(
return ChatFormatterResponse(prompt=_prompt, stop=_sep)
+# Chat format for Saiga models, see more details and available models:
+# https://huggingface.co/collections/IlyaGusev/saiga2-saigamistral-6505d4ccc3d1e53166b636cd
+@register_chat_format("saiga")
+def format_saiga(
+ messages: list[llama_types.ChatCompletionRequestMessage],
+ **kwargs,
+) -> ChatFormatterResponse:
+ _message_template = "{role}\n{content}"
+ _roles = dict(user="user", bot="bot", system="system")
+ _messages = _map_roles(messages, _roles)
+
+ _prompt = ""
+ for role, content in _messages:
+ if content:
+ _prompt += _message_template.format(role=role, content=content)
+ else:
+ _prompt += f"{role}\n"
+ # Response template
+ _prompt += "bot"
+ return ChatFormatterResponse(prompt=_prompt.strip())
+
+
@register_chat_completion_handler("functionary")
def functionary_chat_handler(
llama: llama.Llama,