Ditch the runner container entirely and use build environment as the runner environment
Running the binary outside the build environment crashes with signal 127 and i am unable to debug why Signed-off-by: baalajimaestro <me@baalajimaestro.me>
This commit is contained in:
parent
696e20eeae
commit
87345eda1b
3 changed files with 12 additions and 31 deletions
31
Dockerfile
31
Dockerfile
|
@ -4,7 +4,10 @@ RUN wget $(echo "https://pkgs.dyn.su/el9/base/x86_64/raven-release.el9.noarch.rp
|
|||
rpm -ivh raven-release*.rpm && \
|
||||
rm -rf raven-release*.rpm && \
|
||||
dnf update -y && \
|
||||
dnf -y --enablerepo=raven-extras install golang
|
||||
dnf -y --enablerepo=raven-extras install golang && \
|
||||
dnf -y install epel-release && \
|
||||
dnf -y update && \
|
||||
dnf -y install supervisor
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
|
@ -12,34 +15,12 @@ ARG GIN_MODE=release
|
|||
|
||||
ADD . .
|
||||
|
||||
RUN OLLAMA_CUSTOM_CPU_DEFS="-DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=Intel10_64lp -DLLAMA_NATIVE=ON" go generate ./... && \
|
||||
RUN OLLAMA_CUSTOM_CPU_DEFS="-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=Intel10_64lp -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_NATIVE=ON" go generate ./... && \
|
||||
go build -ldflags="-s -w"
|
||||
|
||||
FROM oraclelinux:9
|
||||
|
||||
RUN echo -e "[oneAPI]\n\
|
||||
name=Intel® oneAPI repository\n\
|
||||
baseurl=https://yum.repos.intel.com/oneapi\n\
|
||||
enabled=1\n\
|
||||
gpgcheck=1\n\
|
||||
repo_gpgcheck=1\n\
|
||||
gpgkey=https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB" > /tmp/oneAPI.repo
|
||||
|
||||
RUN mv /tmp/oneAPI.repo /etc/yum.repos.d/oneAPI.repo && \
|
||||
dnf update && \
|
||||
dnf install intel-oneapi-mkl epel-release -y && \
|
||||
dnf update && \
|
||||
dnf install supervisor -y && \
|
||||
mkdir /ollama
|
||||
|
||||
COPY --from=build /app/ollama /ollama
|
||||
COPY --from=build /app/serve.sh /ollama/serve.sh
|
||||
COPY --from=build /app/run_model.sh /ollama/run_model.sh
|
||||
COPY --from=build /app/supervisord.conf /etc/supervisor/conf.d/supervisord.conf
|
||||
|
||||
ENV MODEL_NAME="llama"
|
||||
ENV OLLAMA_HOST="0.0.0.0:8080"
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
CMD ["supervisord", "-c", "/etc/supervisor/conf.d/supervisord.conf"]
|
||||
CMD ["supervisord", "-c", "/app/supervisord.conf"]
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
#! /usr/bin/env sh
|
||||
#! /usr/bin/env bash
|
||||
|
||||
sleep 10
|
||||
|
||||
if [ -n "$INIT_MODELS" ]; then
|
||||
/ollama/ollama rm model || true
|
||||
/app/ollama rm model || true
|
||||
echo "FROM /models/$MODEL_NAME" > /tmp/Modelfile
|
||||
echo "PARAMETER temperature 1" >> /tmp/Modelfile
|
||||
echo "PARAMETER num_ctx 4096" >> /tmp/Modelfile
|
||||
|
@ -17,7 +17,7 @@ if [ -n "$INIT_MODELS" ]; then
|
|||
echo "{{ .Response }}<|im_end|>" >> /tmp/Modelfile
|
||||
echo '"""' >> /tmp/Modelfile
|
||||
echo 'SYSTEM """You are Samantha, a sentient AI."""' >> /tmp/Modelfile
|
||||
/ollama/ollama create -f /tmp/Modelfile model
|
||||
/app/ollama create -f /tmp/Modelfile model
|
||||
fi
|
||||
|
||||
/ollama/ollama run model
|
||||
/app/ollama run model
|
||||
|
|
4
serve.sh
4
serve.sh
|
@ -1,3 +1,3 @@
|
|||
#! /usr/bin/env sh
|
||||
#! /usr/bin/env bash
|
||||
|
||||
/ollama/ollama serve
|
||||
/app/ollama serve
|
Loading…
Reference in a new issue