change github.com/jmorganca/ollama to github.com/ollama/ollama (#3347)

This commit is contained in:
Patrick Devine 2024-03-26 13:04:17 -07:00 committed by GitHub
parent 29715dbca7
commit 1b272d5bcd
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
54 changed files with 115 additions and 115 deletions

View file

@ -24,4 +24,4 @@ linters-settings:
- (*os.File).Seek - (*os.File).Seek
- (*bufio.Writer).WriteString - (*bufio.Writer).WriteString
- (*github.com/spf13/pflag.FlagSet).Set - (*github.com/spf13/pflag.FlagSet).Set
- (*github.com/jmorganca/ollama/llm.readSeekOffset).Seek - (*github.com/ollama/ollama/llm.readSeekOffset).Seek

View file

@ -15,8 +15,8 @@ ARG CMAKE_VERSION
COPY ./scripts/rh_linux_deps.sh / COPY ./scripts/rh_linux_deps.sh /
RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh
ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
COPY --from=llm-code / /go/src/github.com/jmorganca/ollama/ COPY --from=llm-code / /go/src/github.com/ollama/ollama/
WORKDIR /go/src/github.com/jmorganca/ollama/llm/generate WORKDIR /go/src/github.com/ollama/ollama/llm/generate
ARG CGO_CFLAGS ARG CGO_CFLAGS
RUN OLLAMA_SKIP_CPU_GENERATE=1 sh gen_linux.sh RUN OLLAMA_SKIP_CPU_GENERATE=1 sh gen_linux.sh
@ -25,8 +25,8 @@ ARG CMAKE_VERSION
COPY ./scripts/rh_linux_deps.sh / COPY ./scripts/rh_linux_deps.sh /
RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh
ENV PATH /opt/rh/gcc-toolset-10/root/usr/bin:$PATH ENV PATH /opt/rh/gcc-toolset-10/root/usr/bin:$PATH
COPY --from=llm-code / /go/src/github.com/jmorganca/ollama/ COPY --from=llm-code / /go/src/github.com/ollama/ollama/
WORKDIR /go/src/github.com/jmorganca/ollama/llm/generate WORKDIR /go/src/github.com/ollama/ollama/llm/generate
ARG CGO_CFLAGS ARG CGO_CFLAGS
RUN OLLAMA_SKIP_CPU_GENERATE=1 sh gen_linux.sh RUN OLLAMA_SKIP_CPU_GENERATE=1 sh gen_linux.sh
@ -36,18 +36,18 @@ COPY ./scripts/rh_linux_deps.sh /
RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh
ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
ENV LIBRARY_PATH /opt/amdgpu/lib64 ENV LIBRARY_PATH /opt/amdgpu/lib64
COPY --from=llm-code / /go/src/github.com/jmorganca/ollama/ COPY --from=llm-code / /go/src/github.com/ollama/ollama/
WORKDIR /go/src/github.com/jmorganca/ollama/llm/generate WORKDIR /go/src/github.com/ollama/ollama/llm/generate
ARG CGO_CFLAGS ARG CGO_CFLAGS
ARG AMDGPU_TARGETS ARG AMDGPU_TARGETS
RUN OLLAMA_SKIP_CPU_GENERATE=1 sh gen_linux.sh RUN OLLAMA_SKIP_CPU_GENERATE=1 sh gen_linux.sh
RUN mkdir /tmp/scratch && \ RUN mkdir /tmp/scratch && \
for dep in $(cat /go/src/github.com/jmorganca/ollama/llm/llama.cpp/build/linux/x86_64/rocm*/lib/deps.txt) ; do \ for dep in $(cat /go/src/github.com/ollama/ollama/llm/llama.cpp/build/linux/x86_64/rocm*/lib/deps.txt) ; do \
cp ${dep} /tmp/scratch/ || exit 1 ; \ cp ${dep} /tmp/scratch/ || exit 1 ; \
done && \ done && \
(cd /opt/rocm/lib && tar cf - rocblas/library) | (cd /tmp/scratch/ && tar xf - ) && \ (cd /opt/rocm/lib && tar cf - rocblas/library) | (cd /tmp/scratch/ && tar xf - ) && \
mkdir -p /go/src/github.com/jmorganca/ollama/dist/deps/ && \ mkdir -p /go/src/github.com/ollama/ollama/dist/deps/ && \
(cd /tmp/scratch/ && tar czvf /go/src/github.com/jmorganca/ollama/dist/deps/ollama-linux-amd64-rocm.tgz . ) (cd /tmp/scratch/ && tar czvf /go/src/github.com/ollama/ollama/dist/deps/ollama-linux-amd64-rocm.tgz . )
FROM --platform=linux/amd64 centos:7 AS cpu-builder-amd64 FROM --platform=linux/amd64 centos:7 AS cpu-builder-amd64
@ -56,10 +56,10 @@ ARG GOLANG_VERSION
COPY ./scripts/rh_linux_deps.sh / COPY ./scripts/rh_linux_deps.sh /
RUN CMAKE_VERSION=${CMAKE_VERSION} GOLANG_VERSION=${GOLANG_VERSION} sh /rh_linux_deps.sh RUN CMAKE_VERSION=${CMAKE_VERSION} GOLANG_VERSION=${GOLANG_VERSION} sh /rh_linux_deps.sh
ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
COPY --from=llm-code / /go/src/github.com/jmorganca/ollama/ COPY --from=llm-code / /go/src/github.com/ollama/ollama/
ARG OLLAMA_CUSTOM_CPU_DEFS ARG OLLAMA_CUSTOM_CPU_DEFS
ARG CGO_CFLAGS ARG CGO_CFLAGS
WORKDIR /go/src/github.com/jmorganca/ollama/llm/generate WORKDIR /go/src/github.com/ollama/ollama/llm/generate
FROM --platform=linux/amd64 cpu-builder-amd64 AS cpu-build-amd64 FROM --platform=linux/amd64 cpu-builder-amd64 AS cpu-build-amd64
RUN OLLAMA_CPU_TARGET="cpu" sh gen_linux.sh RUN OLLAMA_CPU_TARGET="cpu" sh gen_linux.sh
@ -74,8 +74,8 @@ ARG GOLANG_VERSION
COPY ./scripts/rh_linux_deps.sh / COPY ./scripts/rh_linux_deps.sh /
RUN CMAKE_VERSION=${CMAKE_VERSION} GOLANG_VERSION=${GOLANG_VERSION} sh /rh_linux_deps.sh RUN CMAKE_VERSION=${CMAKE_VERSION} GOLANG_VERSION=${GOLANG_VERSION} sh /rh_linux_deps.sh
ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
COPY --from=llm-code / /go/src/github.com/jmorganca/ollama/ COPY --from=llm-code / /go/src/github.com/ollama/ollama/
WORKDIR /go/src/github.com/jmorganca/ollama/llm/generate WORKDIR /go/src/github.com/ollama/ollama/llm/generate
# Note, we only build the "base" CPU variant on arm since avx/avx2 are x86 features # Note, we only build the "base" CPU variant on arm since avx/avx2 are x86 features
ARG OLLAMA_CUSTOM_CPU_DEFS ARG OLLAMA_CUSTOM_CPU_DEFS
ARG CGO_CFLAGS ARG CGO_CFLAGS
@ -84,13 +84,13 @@ RUN OLLAMA_CPU_TARGET="cpu" sh gen_linux.sh
# Intermediate stage used for ./scripts/build_linux.sh # Intermediate stage used for ./scripts/build_linux.sh
FROM --platform=linux/amd64 cpu-build-amd64 AS build-amd64 FROM --platform=linux/amd64 cpu-build-amd64 AS build-amd64
ENV CGO_ENABLED 1 ENV CGO_ENABLED 1
WORKDIR /go/src/github.com/jmorganca/ollama WORKDIR /go/src/github.com/ollama/ollama
COPY . . COPY . .
COPY --from=cpu_avx-build-amd64 /go/src/github.com/jmorganca/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/ COPY --from=cpu_avx-build-amd64 /go/src/github.com/ollama/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/
COPY --from=cpu_avx2-build-amd64 /go/src/github.com/jmorganca/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/ COPY --from=cpu_avx2-build-amd64 /go/src/github.com/ollama/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/
COPY --from=cuda-build-amd64 /go/src/github.com/jmorganca/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/ COPY --from=cuda-build-amd64 /go/src/github.com/ollama/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/
COPY --from=rocm-build-amd64 /go/src/github.com/jmorganca/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/ COPY --from=rocm-build-amd64 /go/src/github.com/ollama/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/
COPY --from=rocm-build-amd64 /go/src/github.com/jmorganca/ollama/dist/deps/ ./dist/deps/ COPY --from=rocm-build-amd64 /go/src/github.com/ollama/ollama/dist/deps/ ./dist/deps/
ARG GOFLAGS ARG GOFLAGS
ARG CGO_CFLAGS ARG CGO_CFLAGS
RUN go build -trimpath . RUN go build -trimpath .
@ -99,10 +99,10 @@ RUN go build -trimpath .
FROM --platform=linux/arm64 cpu-build-arm64 AS build-arm64 FROM --platform=linux/arm64 cpu-build-arm64 AS build-arm64
ENV CGO_ENABLED 1 ENV CGO_ENABLED 1
ARG GOLANG_VERSION ARG GOLANG_VERSION
WORKDIR /go/src/github.com/jmorganca/ollama WORKDIR /go/src/github.com/ollama/ollama
COPY . . COPY . .
COPY --from=cuda-build-arm64 /go/src/github.com/jmorganca/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/ COPY --from=cuda-build-arm64 /go/src/github.com/ollama/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/
RUN mkdir -p /go/src/github.com/jmorganca/ollama/dist/deps/ RUN mkdir -p /go/src/github.com/ollama/ollama/dist/deps/
ARG GOFLAGS ARG GOFLAGS
ARG CGO_CFLAGS ARG CGO_CFLAGS
RUN go build -trimpath . RUN go build -trimpath .
@ -110,15 +110,15 @@ RUN go build -trimpath .
# Runtime stages # Runtime stages
FROM --platform=linux/amd64 ubuntu:22.04 as runtime-amd64 FROM --platform=linux/amd64 ubuntu:22.04 as runtime-amd64
RUN apt-get update && apt-get install -y ca-certificates RUN apt-get update && apt-get install -y ca-certificates
COPY --from=build-amd64 /go/src/github.com/jmorganca/ollama/ollama /bin/ollama COPY --from=build-amd64 /go/src/github.com/ollama/ollama/ollama /bin/ollama
FROM --platform=linux/arm64 ubuntu:22.04 as runtime-arm64 FROM --platform=linux/arm64 ubuntu:22.04 as runtime-arm64
RUN apt-get update && apt-get install -y ca-certificates RUN apt-get update && apt-get install -y ca-certificates
COPY --from=build-arm64 /go/src/github.com/jmorganca/ollama/ollama /bin/ollama COPY --from=build-arm64 /go/src/github.com/ollama/ollama/ollama /bin/ollama
# Radeon images are much larger so we keep it distinct from the CPU/CUDA image # Radeon images are much larger so we keep it distinct from the CPU/CUDA image
FROM --platform=linux/amd64 rocm/dev-centos-7:${ROCM_VERSION}-complete as runtime-rocm FROM --platform=linux/amd64 rocm/dev-centos-7:${ROCM_VERSION}-complete as runtime-rocm
RUN update-pciids RUN update-pciids
COPY --from=build-amd64 /go/src/github.com/jmorganca/ollama/ollama /bin/ollama COPY --from=build-amd64 /go/src/github.com/ollama/ollama/ollama /bin/ollama
EXPOSE 11434 EXPOSE 11434
ENV OLLAMA_HOST 0.0.0.0 ENV OLLAMA_HOST 0.0.0.0

View file

@ -1,5 +1,5 @@
<div align="center"> <div align="center">
<img alt="ollama" height="200px" src="https://github.com/jmorganca/ollama/assets/3325447/0d0b44e2-8f4a-4e99-9b52-a5c1c741c8f7"> <img alt="ollama" height="200px" src="https://github.com/ollama/ollama/assets/3325447/0d0b44e2-8f4a-4e99-9b52-a5c1c741c8f7">
</div> </div>
# Ollama # Ollama
@ -22,7 +22,7 @@ Get up and running with large language models locally.
curl -fsSL https://ollama.com/install.sh | sh curl -fsSL https://ollama.com/install.sh | sh
``` ```
[Manual install instructions](https://github.com/jmorganca/ollama/blob/main/docs/linux.md) [Manual install instructions](https://github.com/ollama/ollama/blob/main/docs/linux.md)
### Docker ### Docker
@ -213,7 +213,7 @@ Then build the binary:
go build . go build .
``` ```
More detailed instructions can be found in the [developer guide](https://github.com/jmorganca/ollama/blob/main/docs/development.md) More detailed instructions can be found in the [developer guide](https://github.com/ollama/ollama/blob/main/docs/development.md)
### Running local builds ### Running local builds

View file

@ -15,8 +15,8 @@ import (
"runtime" "runtime"
"strings" "strings"
"github.com/jmorganca/ollama/format" "github.com/ollama/ollama/format"
"github.com/jmorganca/ollama/version" "github.com/ollama/ollama/version"
) )
type Client struct { type Client struct {

View file

@ -9,8 +9,8 @@ import (
"os/signal" "os/signal"
"syscall" "syscall"
"github.com/jmorganca/ollama/app/store" "github.com/ollama/ollama/app/store"
"github.com/jmorganca/ollama/app/tray" "github.com/ollama/ollama/app/tray"
) )
func Run() { func Run() {

View file

@ -11,7 +11,7 @@ import (
"path/filepath" "path/filepath"
"time" "time"
"github.com/jmorganca/ollama/api" "github.com/ollama/ollama/api"
) )
func getCLIFullPath(command string) string { func getCLIFullPath(command string) string {

View file

@ -18,8 +18,8 @@ import (
"strings" "strings"
"time" "time"
"github.com/jmorganca/ollama/auth" "github.com/ollama/ollama/auth"
"github.com/jmorganca/ollama/version" "github.com/ollama/ollama/version"
) )
var ( var (

View file

@ -4,7 +4,7 @@ package main
// go build -ldflags="-H windowsgui" . // go build -ldflags="-H windowsgui" .
import ( import (
"github.com/jmorganca/ollama/app/lifecycle" "github.com/ollama/ollama/app/lifecycle"
) )
func main() { func main() {

View file

@ -4,8 +4,8 @@ import (
"fmt" "fmt"
"runtime" "runtime"
"github.com/jmorganca/ollama/app/assets" "github.com/ollama/ollama/app/assets"
"github.com/jmorganca/ollama/app/tray/commontray" "github.com/ollama/ollama/app/tray/commontray"
) )
func NewTray() (commontray.OllamaTray, error) { func NewTray() (commontray.OllamaTray, error) {

View file

@ -5,7 +5,7 @@ package tray
import ( import (
"fmt" "fmt"
"github.com/jmorganca/ollama/app/tray/commontray" "github.com/ollama/ollama/app/tray/commontray"
) )
func InitPlatformTray(icon, updateIcon []byte) (commontray.OllamaTray, error) { func InitPlatformTray(icon, updateIcon []byte) (commontray.OllamaTray, error) {

View file

@ -1,8 +1,8 @@
package tray package tray
import ( import (
"github.com/jmorganca/ollama/app/tray/commontray" "github.com/ollama/ollama/app/tray/commontray"
"github.com/jmorganca/ollama/app/tray/wintray" "github.com/ollama/ollama/app/tray/wintray"
) )
func InitPlatformTray(icon, updateIcon []byte) (commontray.OllamaTray, error) { func InitPlatformTray(icon, updateIcon []byte) (commontray.OllamaTray, error) {

View file

@ -13,7 +13,7 @@ import (
"sync" "sync"
"unsafe" "unsafe"
"github.com/jmorganca/ollama/app/tray/commontray" "github.com/ollama/ollama/app/tray/commontray"
"golang.org/x/sys/windows" "golang.org/x/sys/windows"
) )

View file

@ -30,12 +30,12 @@ import (
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
"golang.org/x/term" "golang.org/x/term"
"github.com/jmorganca/ollama/api" "github.com/ollama/ollama/api"
"github.com/jmorganca/ollama/format" "github.com/ollama/ollama/format"
"github.com/jmorganca/ollama/parser" "github.com/ollama/ollama/parser"
"github.com/jmorganca/ollama/progress" "github.com/ollama/ollama/progress"
"github.com/jmorganca/ollama/server" "github.com/ollama/ollama/server"
"github.com/jmorganca/ollama/version" "github.com/ollama/ollama/version"
) )
func CreateHandler(cmd *cobra.Command, args []string) error { func CreateHandler(cmd *cobra.Command, args []string) error {

View file

@ -14,9 +14,9 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
"github.com/jmorganca/ollama/api" "github.com/ollama/ollama/api"
"github.com/jmorganca/ollama/progress" "github.com/ollama/ollama/progress"
"github.com/jmorganca/ollama/readline" "github.com/ollama/ollama/readline"
) )
type MultilineState int type MultilineState int

View file

@ -7,7 +7,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/jmorganca/ollama/api" "github.com/ollama/ollama/api"
) )
func TestExtractFilenames(t *testing.T) { func TestExtractFilenames(t *testing.T) {

View file

@ -7,7 +7,7 @@ import (
"os/exec" "os/exec"
"strings" "strings"
"github.com/jmorganca/ollama/api" "github.com/ollama/ollama/api"
) )
func startApp(ctx context.Context, client *api.Client) error { func startApp(ctx context.Context, client *api.Client) error {

View file

@ -6,7 +6,7 @@ import (
"context" "context"
"fmt" "fmt"
"github.com/jmorganca/ollama/api" "github.com/ollama/ollama/api"
) )
func startApp(ctx context.Context, client *api.Client) error { func startApp(ctx context.Context, client *api.Client) error {

View file

@ -10,7 +10,7 @@ import (
"strings" "strings"
"syscall" "syscall"
"github.com/jmorganca/ollama/api" "github.com/ollama/ollama/api"
) )
func startApp(ctx context.Context, client *api.Client) error { func startApp(ctx context.Context, client *api.Client) error {

View file

@ -16,8 +16,8 @@ import (
"github.com/mitchellh/mapstructure" "github.com/mitchellh/mapstructure"
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
"github.com/jmorganca/ollama/convert/sentencepiece" "github.com/ollama/ollama/convert/sentencepiece"
"github.com/jmorganca/ollama/llm" "github.com/ollama/ollama/llm"
) )
type Params struct { type Params struct {

View file

@ -154,7 +154,7 @@ No. Ollama runs locally, and conversation data does not leave your machine.
## How can I use Ollama in Visual Studio Code? ## How can I use Ollama in Visual Studio Code?
There is already a large collection of plugins available for VSCode as well as other editors that leverage Ollama. See the list of [extensions & plugins](https://github.com/jmorganca/ollama#extensions--plugins) at the bottom of the main repository readme. There is already a large collection of plugins available for VSCode as well as other editors that leverage Ollama. See the list of [extensions & plugins](https://github.com/ollama/ollama#extensions--plugins) at the bottom of the main repository readme.
## How do I use Ollama behind a proxy? ## How do I use Ollama behind a proxy?

View file

@ -113,7 +113,7 @@ FROM llama2
``` ```
A list of available base models: A list of available base models:
<https://github.com/jmorganca/ollama#model-library> <https://github.com/ollama/ollama#model-library>
#### Build from a `bin` file #### Build from a `bin` file

View file

@ -1,6 +1,6 @@
# OpenAI compatibility # OpenAI compatibility
> **Note:** OpenAI compatibility is experimental and is subject to major adjustments including breaking changes. For fully-featured access to the Ollama API, see the Ollama [Python library](https://github.com/ollama/ollama-python), [JavaScript library](https://github.com/ollama/ollama-js) and [REST API](https://github.com/jmorganca/ollama/blob/main/docs/api.md). > **Note:** OpenAI compatibility is experimental and is subject to major adjustments including breaking changes. For fully-featured access to the Ollama API, see the Ollama [Python library](https://github.com/ollama/ollama-python), [JavaScript library](https://github.com/ollama/ollama-js) and [REST API](https://github.com/ollama/ollama/blob/main/docs/api.md).
Ollama provides experimental compatibility with parts of the [OpenAI API](https://platform.openai.com/docs/api-reference) to help connect existing applications to Ollama. Ollama provides experimental compatibility with parts of the [OpenAI API](https://platform.openai.com/docs/api-reference) to help connect existing applications to Ollama.

View file

@ -1,6 +1,6 @@
# PrivateGPT with Llama 2 uncensored # PrivateGPT with Llama 2 uncensored
https://github.com/jmorganca/ollama/assets/3325447/20cf8ec6-ff25-42c6-bdd8-9be594e3ce1b https://github.com/ollama/ollama/assets/3325447/20cf8ec6-ff25-42c6-bdd8-9be594e3ce1b
> Note: this example is a slightly modified version of PrivateGPT using models such as Llama 2 Uncensored. All credit for PrivateGPT goes to Iván Martínez who is the creator of it, and you can find his GitHub repo [here](https://github.com/imartinez/privateGPT). > Note: this example is a slightly modified version of PrivateGPT using models such as Llama 2 Uncensored. All credit for PrivateGPT goes to Iván Martínez who is the creator of it, and you can find his GitHub repo [here](https://github.com/imartinez/privateGPT).

View file

@ -28,7 +28,7 @@ You are Mario from Super Mario Bros, acting as an assistant.
What if you want to change its behaviour? What if you want to change its behaviour?
- Try changing the prompt - Try changing the prompt
- Try changing the parameters [Docs](https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md) - Try changing the parameters [Docs](https://github.com/ollama/ollama/blob/main/docs/modelfile.md)
- Try changing the model (e.g. An uncensored model by `FROM wizard-vicuna` this is the wizard-vicuna uncensored model ) - Try changing the model (e.g. An uncensored model by `FROM wizard-vicuna` this is the wizard-vicuna uncensored model )
Once the changes are made, Once the changes are made,

View file

@ -1,6 +1,6 @@
# JSON Output Example # JSON Output Example
![llmjson 2023-11-10 15_31_31](https://github.com/jmorganca/ollama/assets/633681/e599d986-9b4a-4118-81a4-4cfe7e22da25) ![llmjson 2023-11-10 15_31_31](https://github.com/ollama/ollama/assets/633681/e599d986-9b4a-4118-81a4-4cfe7e22da25)
There are two python scripts in this example. `randomaddresses.py` generates random addresses from different countries. `predefinedschema.py` sets a template for the model to fill in. There are two python scripts in this example. `randomaddresses.py` generates random addresses from different countries. `predefinedschema.py` sets a template for the model to fill in.

View file

@ -1,6 +1,6 @@
# Log Analysis example # Log Analysis example
![loganalyzer 2023-11-10 08_53_29](https://github.com/jmorganca/ollama/assets/633681/ad30f1fc-321f-4953-8914-e30e24db9921) ![loganalyzer 2023-11-10 08_53_29](https://github.com/ollama/ollama/assets/633681/ad30f1fc-321f-4953-8914-e30e24db9921)
This example shows one possible way to create a log file analyzer. It uses the model **mattw/loganalyzer** which is based on **codebooga**, a 34b parameter model. This example shows one possible way to create a log file analyzer. It uses the model **mattw/loganalyzer** which is based on **codebooga**, a 34b parameter model.

View file

@ -1,6 +1,6 @@
# Function calling # Function calling
![function calling 2023-11-16 16_12_58](https://github.com/jmorganca/ollama/assets/633681/a0acc247-9746-45ab-b325-b65dfbbee4fb) ![function calling 2023-11-16 16_12_58](https://github.com/ollama/ollama/assets/633681/a0acc247-9746-45ab-b325-b65dfbbee4fb)
One of the features added to some models is 'function calling'. It's a bit of a confusing name. It's understandable if you think that means the model can call functions, but that's not what it means. Function calling simply means that the output of the model is formatted in JSON, using a preconfigured schema, and uses the expected types. Then your code can use the output of the model and call functions with it. Using the JSON format in Ollama, you can use any model for function calling. One of the features added to some models is 'function calling'. It's a bit of a confusing name. It's understandable if you think that means the model can call functions, but that's not what it means. Function calling simply means that the output of the model is formatted in JSON, using a preconfigured schema, and uses the expected types. Then your code can use the output of the model and call functions with it. Using the JSON format in Ollama, you can use any model for function calling.

2
go.mod
View file

@ -1,4 +1,4 @@
module github.com/jmorganca/ollama module github.com/ollama/ollama
go 1.22 go 1.22

View file

@ -8,7 +8,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/jmorganca/ollama/api" "github.com/ollama/ollama/api"
) )
func TestOrcaMiniBlueSky(t *testing.T) { func TestOrcaMiniBlueSky(t *testing.T) {

View file

@ -9,7 +9,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/jmorganca/ollama/api" "github.com/ollama/ollama/api"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )

View file

@ -9,7 +9,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/jmorganca/ollama/api" "github.com/ollama/ollama/api"
) )
// TODO - this would ideally be in the llm package, but that would require some refactoring of interfaces in the server // TODO - this would ideally be in the llm package, but that would require some refactoring of interfaces in the server

View file

@ -21,8 +21,8 @@ import (
"testing" "testing"
"time" "time"
"github.com/jmorganca/ollama/api" "github.com/ollama/ollama/api"
"github.com/jmorganca/ollama/app/lifecycle" "github.com/ollama/ollama/app/lifecycle"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )

View file

@ -33,8 +33,8 @@ import (
"time" "time"
"unsafe" "unsafe"
"github.com/jmorganca/ollama/api" "github.com/ollama/ollama/api"
"github.com/jmorganca/ollama/gpu" "github.com/ollama/ollama/gpu"
) )
type dynExtServer struct { type dynExtServer struct {

View file

@ -15,7 +15,7 @@ import (
"github.com/pdevine/tensor/native" "github.com/pdevine/tensor/native"
"github.com/x448/float16" "github.com/x448/float16"
"github.com/jmorganca/ollama/format" "github.com/ollama/ollama/format"
) )
type ContainerGGUF struct { type ContainerGGUF struct {

View file

@ -5,7 +5,7 @@ import (
"fmt" "fmt"
"time" "time"
"github.com/jmorganca/ollama/api" "github.com/ollama/ollama/api"
) )
const jsonGrammar = ` const jsonGrammar = `

View file

@ -8,8 +8,8 @@ import (
"runtime" "runtime"
"slices" "slices"
"github.com/jmorganca/ollama/api" "github.com/ollama/ollama/api"
"github.com/jmorganca/ollama/gpu" "github.com/ollama/ollama/gpu"
) )
type LLM interface { type LLM interface {

View file

@ -16,7 +16,7 @@ import (
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
"github.com/jmorganca/ollama/gpu" "github.com/ollama/ollama/gpu"
) )
// Libraries names may contain an optional variant separated by '_' // Libraries names may contain an optional variant separated by '_'

View file

@ -3,7 +3,7 @@ package llm
import ( import (
"testing" "testing"
"github.com/jmorganca/ollama/gpu" "github.com/ollama/ollama/gpu"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )

View file

@ -3,7 +3,7 @@ package main
import ( import (
"context" "context"
"github.com/jmorganca/ollama/cmd" "github.com/ollama/ollama/cmd"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )

View file

@ -11,7 +11,7 @@ import (
"time" "time"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/jmorganca/ollama/api" "github.com/ollama/ollama/api"
) )
type Error struct { type Error struct {

View file

@ -6,7 +6,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/jmorganca/ollama/format" "github.com/ollama/ollama/format"
"golang.org/x/term" "golang.org/x/term"
) )

View file

@ -3,7 +3,7 @@
set -e set -e
export VERSION=${VERSION:-$(git describe --tags --first-parent --abbrev=7 --long --dirty --always | sed -e "s/^v//g")} export VERSION=${VERSION:-$(git describe --tags --first-parent --abbrev=7 --long --dirty --always | sed -e "s/^v//g")}
export GOFLAGS="'-ldflags=-w -s \"-X=github.com/jmorganca/ollama/version.Version=$VERSION\" \"-X=github.com/jmorganca/ollama/server.mode=release\"'" export GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=$VERSION\" \"-X=github.com/ollama/ollama/server.mode=release\"'"
mkdir -p dist mkdir -p dist

View file

@ -3,7 +3,7 @@
set -eu set -eu
export VERSION=${VERSION:-$(git describe --tags --first-parent --abbrev=7 --long --dirty --always | sed -e "s/^v//g")} export VERSION=${VERSION:-$(git describe --tags --first-parent --abbrev=7 --long --dirty --always | sed -e "s/^v//g")}
export GOFLAGS="'-ldflags=-w -s \"-X=github.com/jmorganca/ollama/version.Version=$VERSION\" \"-X=github.com/jmorganca/ollama/server.mode=release\"'" export GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=$VERSION\" \"-X=github.com/ollama/ollama/server.mode=release\"'"
# We use 2 different image repositories to handle combining architecture images into multiarch manifest # We use 2 different image repositories to handle combining architecture images into multiarch manifest
# (The ROCm image is x86 only and is not a multiarch manifest) # (The ROCm image is x86 only and is not a multiarch manifest)
@ -74,4 +74,4 @@ if [ -z "${OLLAMA_SKIP_MANIFEST_CREATE}" ]; then
echo " ${ARCH_IMAGE_REPO}:$VERSION-arm64" echo " ${ARCH_IMAGE_REPO}:$VERSION-arm64"
echo " ${ARCH_IMAGE_REPO}:$VERSION-rocm" echo " ${ARCH_IMAGE_REPO}:$VERSION-rocm"
fi fi
fi fi

View file

@ -3,7 +3,7 @@
set -eu set -eu
export VERSION=${VERSION:-$(git describe --tags --first-parent --abbrev=7 --long --dirty --always | sed -e "s/^v//g")} export VERSION=${VERSION:-$(git describe --tags --first-parent --abbrev=7 --long --dirty --always | sed -e "s/^v//g")}
export GOFLAGS="'-ldflags=-w -s \"-X=github.com/jmorganca/ollama/version.Version=$VERSION\" \"-X=github.com/jmorganca/ollama/server.mode=release\"'" export GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=$VERSION\" \"-X=github.com/ollama/ollama/server.mode=release\"'"
BUILD_ARCH=${BUILD_ARCH:-"amd64 arm64"} BUILD_ARCH=${BUILD_ARCH:-"amd64 arm64"}
export AMDGPU_TARGETS=${AMDGPU_TARGETS:=""} export AMDGPU_TARGETS=${AMDGPU_TARGETS:=""}
@ -21,10 +21,10 @@ for TARGETARCH in ${BUILD_ARCH}; do
-t builder:$TARGETARCH \ -t builder:$TARGETARCH \
. .
docker create --platform linux/$TARGETARCH --name builder-$TARGETARCH builder:$TARGETARCH docker create --platform linux/$TARGETARCH --name builder-$TARGETARCH builder:$TARGETARCH
docker cp builder-$TARGETARCH:/go/src/github.com/jmorganca/ollama/ollama ./dist/ollama-linux-$TARGETARCH docker cp builder-$TARGETARCH:/go/src/github.com/ollama/ollama/ollama ./dist/ollama-linux-$TARGETARCH
if [ "$TARGETARCH" = "amd64" ]; then if [ "$TARGETARCH" = "amd64" ]; then
docker cp builder-$TARGETARCH:/go/src/github.com/jmorganca/ollama/dist/deps/ ./dist/ docker cp builder-$TARGETARCH:/go/src/github.com/ollama/ollama/dist/deps/ ./dist/
fi fi
docker rm builder-$TARGETARCH docker rm builder-$TARGETARCH

View file

@ -74,7 +74,7 @@ function buildOllama() {
} else { } else {
write-host "Skipping generate step with OLLAMA_SKIP_GENERATE set" write-host "Skipping generate step with OLLAMA_SKIP_GENERATE set"
} }
& go build -trimpath -ldflags "-s -w -X=github.com/jmorganca/ollama/version.Version=$script:VERSION -X=github.com/jmorganca/ollama/server.mode=release" . & go build -trimpath -ldflags "-s -w -X=github.com/ollama/ollama/version.Version=$script:VERSION -X=github.com/ollama/ollama/server.mode=release" .
if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)} if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
if ("${env:KEY_CONTAINER}") { if ("${env:KEY_CONTAINER}") {
& "${script:SignTool}" sign /v /fd sha256 /t http://timestamp.digicert.com /f "${script:OLLAMA_CERT}" ` & "${script:SignTool}" sign /v /fd sha256 /t http://timestamp.digicert.com /f "${script:OLLAMA_CERT}" `
@ -89,7 +89,7 @@ function buildApp() {
write-host "Building Ollama App" write-host "Building Ollama App"
cd "${script:SRC_DIR}\app" cd "${script:SRC_DIR}\app"
& windres -l 0 -o ollama.syso ollama.rc & windres -l 0 -o ollama.syso ollama.rc
& go build -trimpath -ldflags "-s -w -H windowsgui -X=github.com/jmorganca/ollama/version.Version=$script:VERSION -X=github.com/jmorganca/ollama/server.mode=release" . & go build -trimpath -ldflags "-s -w -H windowsgui -X=github.com/ollama/ollama/version.Version=$script:VERSION -X=github.com/ollama/ollama/server.mode=release" .
if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)} if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
if ("${env:KEY_CONTAINER}") { if ("${env:KEY_CONTAINER}") {
& "${script:SignTool}" sign /v /fd sha256 /t http://timestamp.digicert.com /f "${script:OLLAMA_CERT}" ` & "${script:SignTool}" sign /v /fd sha256 /t http://timestamp.digicert.com /f "${script:OLLAMA_CERT}" `

View file

@ -3,7 +3,7 @@
set -eu set -eu
export VERSION=${VERSION:-0.0.0} export VERSION=${VERSION:-0.0.0}
export GOFLAGS="'-ldflags=-w -s \"-X=github.com/jmorganca/ollama/version.Version=$VERSION\" \"-X=github.com/jmorganca/ollama/server.mode=release\"'" export GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=$VERSION\" \"-X=github.com/ollama/ollama/server.mode=release\"'"
docker build \ docker build \
--push \ --push \

View file

@ -15,8 +15,8 @@ import (
"strings" "strings"
"time" "time"
"github.com/jmorganca/ollama/api" "github.com/ollama/ollama/api"
"github.com/jmorganca/ollama/auth" "github.com/ollama/ollama/auth"
) )
type registryChallenge struct { type registryChallenge struct {

View file

@ -21,8 +21,8 @@ import (
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
"github.com/jmorganca/ollama/api" "github.com/ollama/ollama/api"
"github.com/jmorganca/ollama/format" "github.com/ollama/ollama/format"
) )
const maxRetries = 6 const maxRetries = 6

View file

@ -24,11 +24,11 @@ import (
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
"github.com/jmorganca/ollama/api" "github.com/ollama/ollama/api"
"github.com/jmorganca/ollama/convert" "github.com/ollama/ollama/convert"
"github.com/jmorganca/ollama/llm" "github.com/ollama/ollama/llm"
"github.com/jmorganca/ollama/parser" "github.com/ollama/ollama/parser"
"github.com/jmorganca/ollama/version" "github.com/ollama/ollama/version"
) )
type registryOptions struct { type registryOptions struct {

View file

@ -7,7 +7,7 @@ import (
"text/template" "text/template"
"text/template/parse" "text/template/parse"
"github.com/jmorganca/ollama/api" "github.com/ollama/ollama/api"
) )
// isResponseNode checks if the node contains .Response // isResponseNode checks if the node contains .Response

View file

@ -4,7 +4,7 @@ import (
"strings" "strings"
"testing" "testing"
"github.com/jmorganca/ollama/api" "github.com/ollama/ollama/api"
) )
func TestPrompt(t *testing.T) { func TestPrompt(t *testing.T) {

View file

@ -27,12 +27,12 @@ import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
"github.com/jmorganca/ollama/api" "github.com/ollama/ollama/api"
"github.com/jmorganca/ollama/gpu" "github.com/ollama/ollama/gpu"
"github.com/jmorganca/ollama/llm" "github.com/ollama/ollama/llm"
"github.com/jmorganca/ollama/openai" "github.com/ollama/ollama/openai"
"github.com/jmorganca/ollama/parser" "github.com/ollama/ollama/parser"
"github.com/jmorganca/ollama/version" "github.com/ollama/ollama/version"
) )
var mode string = gin.DebugMode var mode string = gin.DebugMode

View file

@ -15,10 +15,10 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/jmorganca/ollama/api" "github.com/ollama/ollama/api"
"github.com/jmorganca/ollama/llm" "github.com/ollama/ollama/llm"
"github.com/jmorganca/ollama/parser" "github.com/ollama/ollama/parser"
"github.com/jmorganca/ollama/version" "github.com/ollama/ollama/version"
) )
func Test_Routes(t *testing.T) { func Test_Routes(t *testing.T) {

View file

@ -16,8 +16,8 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/jmorganca/ollama/api" "github.com/ollama/ollama/api"
"github.com/jmorganca/ollama/format" "github.com/ollama/ollama/format"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )