fix compilation issue in Dockerfile, remove from README.md
until ready
This commit is contained in:
parent
dcb6ba389a
commit
7c71c10d4f
4 changed files with 2 additions and 4 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -2,6 +2,5 @@
|
||||||
.vscode
|
.vscode
|
||||||
.env
|
.env
|
||||||
.venv
|
.venv
|
||||||
*.spec
|
|
||||||
dist
|
dist
|
||||||
ollama
|
ollama
|
||||||
|
|
|
@ -1,8 +1,6 @@
|
||||||
FROM golang:1.20
|
FROM golang:1.20
|
||||||
RUN apt-get update && apt-get install -y cmake
|
|
||||||
WORKDIR /go/src/github.com/jmorganca/ollama
|
WORKDIR /go/src/github.com/jmorganca/ollama
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN cmake -S llama -B llama/build && cmake --build llama/build
|
|
||||||
RUN CGO_ENABLED=1 go build -ldflags '-linkmode external -extldflags "-static"' .
|
RUN CGO_ENABLED=1 go build -ldflags '-linkmode external -extldflags "-static"' .
|
||||||
|
|
||||||
FROM alpine
|
FROM alpine
|
||||||
|
|
|
@ -18,7 +18,6 @@ Run large language models with `llama.cpp`.
|
||||||
|
|
||||||
- [Download](https://ollama.ai/download) for macOS
|
- [Download](https://ollama.ai/download) for macOS
|
||||||
- Download for Windows (coming soon)
|
- Download for Windows (coming soon)
|
||||||
- Docker: `docker run -p 11434:11434 ollama/ollama`
|
|
||||||
|
|
||||||
You can also build the [binary from source](#building).
|
You can also build the [binary from source](#building).
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
// +build darwin
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* llama.cpp - git 5bf2a2771886ee86137e01dbc7492f78fb392066
|
* llama.cpp - git 5bf2a2771886ee86137e01dbc7492f78fb392066
|
||||||
*
|
*
|
||||||
|
|
Loading…
Reference in a new issue