Guard integration tests with a tag
This should help CI avoid running the integration test logic in a container where it's not currently possible.
This commit is contained in:
parent
10da41d677
commit
697bea6939
4 changed files with 15 additions and 5 deletions
|
@ -2,6 +2,9 @@
|
||||||
|
|
||||||
# This script sets up integration tests which run the full stack to verify
|
# This script sets up integration tests which run the full stack to verify
|
||||||
# inference locally
|
# inference locally
|
||||||
|
#
|
||||||
|
# To run the relevant tests use
|
||||||
|
# go test -tags=integration ./server
|
||||||
set -e
|
set -e
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
|
|
||||||
|
@ -29,7 +32,7 @@ for model in ${TEST_MODELS[@]}; do
|
||||||
-o ${OLLAMA_MODELS}/blobs/${CFG_HASH} \
|
-o ${OLLAMA_MODELS}/blobs/${CFG_HASH} \
|
||||||
${REGISTRY_SCHEME}://${REGISTRY}/v2/${TEST_MODEL}/blobs/${CFG_HASH}
|
${REGISTRY_SCHEME}://${REGISTRY}/v2/${TEST_MODEL}/blobs/${CFG_HASH}
|
||||||
|
|
||||||
for LAYER in $(cat ${OLLAMA_MODELS}/manifests/${REGISTRY}/${TEST_MODEL}/${TEST_MODEL_TAG} | jq -r ".layers[].digest" ) ; do
|
for LAYER in $(cat ${OLLAMA_MODELS}/manifests/${REGISTRY}/${TEST_MODEL}/${TEST_MODEL_TAG} | jq -r ".layers[].digest"); do
|
||||||
echo "Pulling blob ${LAYER}"
|
echo "Pulling blob ${LAYER}"
|
||||||
curl -L -C - --header "${ACCEPT_HEADER}" \
|
curl -L -C - --header "${ACCEPT_HEADER}" \
|
||||||
-o ${OLLAMA_MODELS}/blobs/${LAYER} \
|
-o ${OLLAMA_MODELS}/blobs/${LAYER} \
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
//go:build integration
|
||||||
|
|
||||||
package server
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
//go:build integration
|
||||||
|
|
||||||
package server
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
//go:build integration
|
||||||
|
|
||||||
package server
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -38,7 +40,7 @@ func PrepareModelForPrompts(t *testing.T, modelName string, opts api.Options) (*
|
||||||
}
|
}
|
||||||
|
|
||||||
func OneShotPromptResponse(t *testing.T, ctx context.Context, req api.GenerateRequest, model *Model, runner llm.LLM) string {
|
func OneShotPromptResponse(t *testing.T, ctx context.Context, req api.GenerateRequest, model *Model, runner llm.LLM) string {
|
||||||
prompt, err := model.Prompt(PromptVars{
|
prompt, err := model.PreResponsePrompt(PromptVars{
|
||||||
System: req.System,
|
System: req.System,
|
||||||
Prompt: req.Prompt,
|
Prompt: req.Prompt,
|
||||||
First: len(req.Context) == 0,
|
First: len(req.Context) == 0,
|
||||||
|
@ -54,6 +56,7 @@ func OneShotPromptResponse(t *testing.T, ctx context.Context, req api.GenerateRe
|
||||||
success <- true
|
success <- true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
predictReq := llm.PredictOpts{
|
predictReq := llm.PredictOpts{
|
||||||
Prompt: prompt,
|
Prompt: prompt,
|
||||||
Format: req.Format,
|
Format: req.Format,
|
||||||
|
|
Loading…
Reference in a new issue