Update llama.cpp

This commit is contained in:
Andrei Betlen 2024-02-14 03:47:21 -05:00
parent 6943bab6d8
commit 7b9960d1cb
2 changed files with 2 additions and 71 deletions

View file

@ -146,30 +146,8 @@ _libllava.llava_eval_image_embed.restype = c_bool
################################################ ################################################
# struct clip_vision_hparams {
# int32_t image_size;
# int32_t patch_size;
# int32_t hidden_size;
# int32_t n_intermediate;
# int32_t projection_dim;
# int32_t n_head;
# int32_t n_layer;
# float eps;
# };
class clip_vision_hparams(Structure):
_fields_ = [
("image_size", c_int32),
("patch_size", c_int32),
("hidden_size", c_int32),
("n_intermediate", c_int32),
("projection_dim", c_int32),
("n_head", c_int32),
("n_layer", c_int32),
("eps", c_float),
]
# /** load mmproj model */ # /** load mmproj model */
# CLIP_API struct clip_ctx * clip_model_load(const char * fname, const int verbosity); # CLIP_API struct clip_ctx * clip_model_load (const char * fname, int verbosity);
def clip_model_load(fname: bytes, verbosity: Union[c_int, int]) -> clip_ctx_p: def clip_model_load(fname: bytes, verbosity: Union[c_int, int]) -> clip_ctx_p:
return _libllava.clip_model_load(fname, verbosity) return _libllava.clip_model_load(fname, verbosity)
@ -183,50 +161,3 @@ def clip_free(ctx: clip_ctx_p):
_libllava.clip_free.argtypes = [clip_ctx_p] _libllava.clip_free.argtypes = [clip_ctx_p]
_libllava.clip_free.restype = None _libllava.clip_free.restype = None
# size_t clip_embd_nbytes(const struct clip_ctx * ctx);
# int clip_n_patches(const struct clip_ctx * ctx);
# int clip_n_mmproj_embd(const struct clip_ctx * ctx);
# // RGB uint8 image
# struct clip_image_u8 {
# int nx;
# int ny;
# uint8_t * data = NULL;
# size_t size;
# };
# // RGB float32 image (NHWC)
# // Memory layout: RGBRGBRGB...
# struct clip_image_f32 {
# int nx;
# int ny;
# float * data = NULL;
# size_t size;
# };
# struct clip_image_u8_batch {
# struct clip_image_u8 * data;
# size_t size;
# };
# struct clip_image_f32_batch {
# struct clip_image_f32 * data;
# size_t size;
# };
# struct clip_image_u8 * make_clip_image_u8();
# struct clip_image_f32 * make_clip_image_f32();
# CLIP_API void clip_image_u8_free(clip_image_u8 * img);
# CLIP_API void clip_image_f32_free(clip_image_f32 * img);
# CLIP_API bool clip_image_load_from_file(const char * fname, struct clip_image_u8 * img);
# /** interpret bytes as an image file with length bytes_length, and use the result to populate img */
# CLIP_API bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img);
# bool clip_image_preprocess(const struct clip_ctx * ctx, const struct clip_image_u8 * img, struct clip_image_f32 * res, const bool pad2square);
# bool clip_image_encode(const struct clip_ctx * ctx, const int n_threads, struct clip_image_f32 * img, float * vec);
# bool clip_image_batch_encode(const struct clip_ctx * ctx, const int n_threads, const struct clip_image_f32_batch * imgs,
# float * vec);
# bool clip_model_quantize(const char * fname_inp, const char * fname_out, const int itype);

2
vendor/llama.cpp vendored

@ -1 +1 @@
Subproject commit f5ca054855dea83f424003162f26de376e5643f6 Subproject commit aa2341298924ac89778252015efcb792f2df1e20