diff --git a/README.md b/README.md index 834edfd0..17afb175 100644 --- a/README.md +++ b/README.md @@ -50,6 +50,14 @@ models = ollama.models() Serve the ollama http server +### `ollama.add(filepath)` + +Add a model by importing from a file + +```python +ollama.add("./path/to/model") +``` + ## Cooming Soon ### `ollama.pull(model)` @@ -60,14 +68,6 @@ Download a model ollama.pull("huggingface.co/thebloke/llama-7b-ggml") ``` -### `ollama.import(filename)` - -Import a model from a file - -```python -ollama.import("./path/to/model") -``` - ### `ollama.search("query")` Search for compatible models that Ollama can run diff --git a/ollama/cmd/cli.py b/ollama/cmd/cli.py index 65a3ad44..d2aba0cb 100644 --- a/ollama/cmd/cli.py +++ b/ollama/cmd/cli.py @@ -23,8 +23,8 @@ def main(): generate_parser.set_defaults(fn=generate) add_parser = subparsers.add_parser("add") - add_parser.add_argument("model_file") - generate_parser.set_defaults(fn=add) + add_parser.add_argument("file") + add_parser.set_defaults(fn=add) args = parser.parse_args() args = vars(args) @@ -48,4 +48,4 @@ def generate(*args, **kwargs): def add(*args, **kwargs): - model.add(*args, **kwargs) + engine.add(*args, **kwargs) diff --git a/ollama/engine.py b/ollama/engine.py index 5525c410..31213c74 100644 --- a/ollama/engine.py +++ b/ollama/engine.py @@ -1,6 +1,7 @@ import os import json import sys +import shutil from contextlib import contextmanager from llama_cpp import Llama as LLM from template import template @@ -61,3 +62,9 @@ def load(model, models_home=".", llms={}): def unload(model, llms={}): if model in llms: llms.pop(model) + + +def add(file, models_home=".", *args, **kwargs): + if not os.path.exists(file): + raise ValueError("Model file {model} not found") + shutil.move(file, models_home)