From 0ede3bfc1218b464b6e067da102cbe3b732c9d7f Mon Sep 17 00:00:00 2001 From: mrq Date: Sat, 5 Apr 2025 01:22:51 -0500 Subject: [PATCH] updated vall_e.cpp, but i could have sworn it worked much better than this...... --- vall_e.cpp/README.md | 2 + vall_e.cpp/include/ggml-alloc.h | 2 +- vall_e.cpp/include/ggml-backend.h | 8 +- vall_e.cpp/include/ggml-cpp.h | 1 + vall_e.cpp/include/ggml-cpu.h | 5 +- vall_e.cpp/include/ggml-metal.h | 2 +- vall_e.cpp/include/ggml-rpc.h | 4 +- vall_e.cpp/include/ggml-vulkan.h | 2 - vall_e.cpp/include/ggml.h | 225 ++--- vall_e.cpp/include/llama-cpp.h | 7 +- vall_e.cpp/include/llama-impl.h | 164 +--- vall_e.cpp/include/llama-vocab.h | 263 +++--- vall_e.cpp/include/llama.h | 371 +++++--- vall_e.cpp/include/llama.modified.h | 1277 --------------------------- vall_e.cpp/include/llama.vanilla.h | 1258 -------------------------- vall_e.cpp/include/llama_hack.h | 369 ++++++-- vall_e.cpp/vall_e.cpp | 35 +- vall_e/emb/qnt.py | 8 + vall_e/export.py | 2 +- 19 files changed, 809 insertions(+), 3196 deletions(-) delete mode 100644 vall_e.cpp/include/llama.modified.h delete mode 100644 vall_e.cpp/include/llama.vanilla.h diff --git a/vall_e.cpp/README.md b/vall_e.cpp/README.md index 926a89e..805cafb 100644 --- a/vall_e.cpp/README.md +++ b/vall_e.cpp/README.md @@ -19,9 +19,11 @@ Run `make`. [`encodec.cpp`](https://github.com/PABannier/encodec.cpp) requires updating its GGML copy to the latest version, which requires a few lines to get the CPU backend working (per my [fork](https://github.com/e-c-k-e-r/encodec.cpp)). [`llama.cpp`](https://github.com/ggerganov/llama.cpp) only possible modification needs to ensure that a non-causal attention mask is used; everything necessary can be hacked together with clever tricks. +* initially written on commit `9ba399dfa7f115effc63d48e6860a94c9faa31b2`, updated to commit `7a84777f42a9b3ba47db5d20b7662f8ddf92f652` ## To-Do +* [ ] fix regressions that appeared for whatever reason * [x] converted model to GGUF * [x] convert it without modifying any of the existing code, as the tokenizer requires some care * [x] basic framework diff --git a/vall_e.cpp/include/ggml-alloc.h b/vall_e.cpp/include/ggml-alloc.h index 23600ee..2cb150f 100644 --- a/vall_e.cpp/include/ggml-alloc.h +++ b/vall_e.cpp/include/ggml-alloc.h @@ -19,7 +19,7 @@ struct ggml_tallocr { }; GGML_API struct ggml_tallocr ggml_tallocr_new(ggml_backend_buffer_t buffer); -GGML_API void ggml_tallocr_alloc(struct ggml_tallocr * talloc, struct ggml_tensor * tensor); +GGML_API enum ggml_status ggml_tallocr_alloc(struct ggml_tallocr * talloc, struct ggml_tensor * tensor); // Graph allocator /* diff --git a/vall_e.cpp/include/ggml-backend.h b/vall_e.cpp/include/ggml-backend.h index 7221a08..6467149 100644 --- a/vall_e.cpp/include/ggml-backend.h +++ b/vall_e.cpp/include/ggml-backend.h @@ -56,7 +56,7 @@ extern "C" { GGML_API void ggml_backend_buffer_free (ggml_backend_buffer_t buffer); GGML_API void * ggml_backend_buffer_get_base (ggml_backend_buffer_t buffer); GGML_API size_t ggml_backend_buffer_get_size (ggml_backend_buffer_t buffer); - GGML_API void ggml_backend_buffer_init_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); + GGML_API enum ggml_status ggml_backend_buffer_init_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); GGML_API size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer); GGML_API size_t ggml_backend_buffer_get_max_size (ggml_backend_buffer_t buffer); GGML_API size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); @@ -203,6 +203,8 @@ extern "C" { // Backend registry // + GGML_API void ggml_backend_device_register(ggml_backend_dev_t device); + // Backend (reg) enumeration GGML_API size_t ggml_backend_reg_count(void); GGML_API ggml_backend_reg_t ggml_backend_reg_get(size_t index); @@ -340,8 +342,8 @@ extern "C" { GGML_API bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data); // Tensor initialization - GGML_API void ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr); - GGML_API void ggml_backend_view_init(struct ggml_tensor * tensor); + GGML_API enum ggml_status ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr); + GGML_API enum ggml_status ggml_backend_view_init(struct ggml_tensor * tensor); // CPU buffer types are always available GGML_API ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size); diff --git a/vall_e.cpp/include/ggml-cpp.h b/vall_e.cpp/include/ggml-cpp.h index 219361a..a12342c 100644 --- a/vall_e.cpp/include/ggml-cpp.h +++ b/vall_e.cpp/include/ggml-cpp.h @@ -7,6 +7,7 @@ #include "ggml.h" #include "ggml-alloc.h" #include "ggml-backend.h" +#include "gguf.h" #include // Smart pointers for ggml types diff --git a/vall_e.cpp/include/ggml-cpu.h b/vall_e.cpp/include/ggml-cpu.h index 3aa71ba..f5e11f1 100644 --- a/vall_e.cpp/include/ggml-cpu.h +++ b/vall_e.cpp/include/ggml-cpu.h @@ -8,7 +8,7 @@ extern "C" { #endif // the compute plan that needs to be prepared for ggml_graph_compute() - // since https://github.com/ggerganov/ggml/issues/287 + // since https://github.com/ggml-org/ggml/issues/287 struct ggml_cplan { size_t work_size; // size of work buffer, calculated by `ggml_graph_plan()` uint8_t * work_data; // work buffer, to be allocated by caller before calling to `ggml_graph_compute()` @@ -80,6 +80,7 @@ extern "C" { GGML_BACKEND_API int ggml_cpu_has_avx (void); GGML_BACKEND_API int ggml_cpu_has_avx_vnni (void); GGML_BACKEND_API int ggml_cpu_has_avx2 (void); + GGML_BACKEND_API int ggml_cpu_has_bmi2 (void); GGML_BACKEND_API int ggml_cpu_has_f16c (void); GGML_BACKEND_API int ggml_cpu_has_fma (void); GGML_BACKEND_API int ggml_cpu_has_avx512 (void); @@ -95,9 +96,11 @@ extern "C" { GGML_BACKEND_API int ggml_cpu_has_matmul_int8(void); GGML_BACKEND_API int ggml_cpu_has_sve (void); GGML_BACKEND_API int ggml_cpu_get_sve_cnt (void); // sve vector length in bytes + GGML_BACKEND_API int ggml_cpu_has_sme (void); // other GGML_BACKEND_API int ggml_cpu_has_riscv_v (void); GGML_BACKEND_API int ggml_cpu_has_vsx (void); + GGML_BACKEND_API int ggml_cpu_has_vxe (void); GGML_BACKEND_API int ggml_cpu_has_wasm_simd (void); GGML_BACKEND_API int ggml_cpu_has_llamafile (void); diff --git a/vall_e.cpp/include/ggml-metal.h b/vall_e.cpp/include/ggml-metal.h index 669c1f8..a610694 100644 --- a/vall_e.cpp/include/ggml-metal.h +++ b/vall_e.cpp/include/ggml-metal.h @@ -45,7 +45,7 @@ GGML_BACKEND_API bool ggml_backend_is_metal(ggml_backend_t backend); GGML_DEPRECATED( GGML_BACKEND_API ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t size, size_t max_size), - "obsoleted by the new device interface - https://github.com/ggerganov/llama.cpp/pull/9713"); + "obsoleted by the new device interface - https://github.com/ggml-org/llama.cpp/pull/9713"); GGML_BACKEND_API void ggml_backend_metal_set_abort_callback(ggml_backend_t backend, ggml_abort_callback abort_callback, void * user_data); diff --git a/vall_e.cpp/include/ggml-rpc.h b/vall_e.cpp/include/ggml-rpc.h index ade6c3b..4e0d210 100644 --- a/vall_e.cpp/include/ggml-rpc.h +++ b/vall_e.cpp/include/ggml-rpc.h @@ -17,7 +17,9 @@ GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_rpc_buffer_type(const c GGML_BACKEND_API void ggml_backend_rpc_get_device_memory(const char * endpoint, size_t * free, size_t * total); -GGML_BACKEND_API void ggml_backend_rpc_start_server(ggml_backend_t backend, const char * endpoint, size_t free_mem, size_t total_mem); +GGML_BACKEND_API void ggml_backend_rpc_start_server(ggml_backend_t backend, const char * endpoint, + const char * cache_dir, + size_t free_mem, size_t total_mem); GGML_BACKEND_API ggml_backend_reg_t ggml_backend_rpc_reg(void); diff --git a/vall_e.cpp/include/ggml-vulkan.h b/vall_e.cpp/include/ggml-vulkan.h index 53cdba0..ed5ea5f 100644 --- a/vall_e.cpp/include/ggml-vulkan.h +++ b/vall_e.cpp/include/ggml-vulkan.h @@ -10,8 +10,6 @@ extern "C" { #define GGML_VK_NAME "Vulkan" #define GGML_VK_MAX_DEVICES 16 -GGML_BACKEND_API void ggml_vk_instance_init(void); - // backend API GGML_BACKEND_API ggml_backend_t ggml_backend_vk_init(size_t dev_num); diff --git a/vall_e.cpp/include/ggml.h b/vall_e.cpp/include/ggml.h index c714fc8..452c967 100644 --- a/vall_e.cpp/include/ggml.h +++ b/vall_e.cpp/include/ggml.h @@ -198,7 +198,7 @@ #ifndef __GNUC__ # define GGML_ATTRIBUTE_FORMAT(...) -#elif defined(__MINGW32__) +#elif defined(__MINGW32__) && !defined(__clang__) # define GGML_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__))) #else # define GGML_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__))) @@ -241,12 +241,6 @@ #define GGML_ROPE_TYPE_MROPE 8 #define GGML_ROPE_TYPE_VISION 24 -#define GGUF_MAGIC "GGUF" - -#define GGUF_VERSION 3 - -#define GGUF_DEFAULT_ALIGNMENT 32 - #define GGML_UNUSED(x) (void)(x) #define GGML_PAD(x, n) (((x) + (n) - 1) & ~((n) - 1)) @@ -403,12 +397,6 @@ extern "C" { GGML_PREC_F32, }; - enum ggml_backend_type { - GGML_BACKEND_TYPE_CPU = 0, - GGML_BACKEND_TYPE_GPU = 10, - GGML_BACKEND_TYPE_GPU_SPLIT = 20, - }; - // model file types enum ggml_ftype { GGML_FTYPE_UNKNOWN = -1, @@ -466,6 +454,7 @@ extern "C" { GGML_OP_RMS_NORM, GGML_OP_RMS_NORM_BACK, GGML_OP_GROUP_NORM, + GGML_OP_L2_NORM, GGML_OP_MUL_MAT, GGML_OP_MUL_MAT_ID, @@ -513,6 +502,8 @@ extern "C" { GGML_OP_GET_REL_POS, GGML_OP_ADD_REL_POS, GGML_OP_RWKV_WKV6, + GGML_OP_GATED_LINEAR_ATTN, + GGML_OP_RWKV_WKV7, GGML_OP_UNARY, @@ -587,8 +578,6 @@ extern "C" { struct ggml_tensor { enum ggml_type type; - GGML_DEPRECATED(enum ggml_backend_type backend, "use the buffer type to find the storage location of the tensor"); - struct ggml_backend_buffer * buffer; int64_t ne[GGML_MAX_DIMS]; // number of elements @@ -1108,6 +1097,18 @@ extern "C" { int n_groups, float eps); + // l2 normalize along rows + // used in rwkv v7 + GGML_API struct ggml_tensor * ggml_l2_norm( + struct ggml_context * ctx, + struct ggml_tensor * a, + float eps); + + GGML_API struct ggml_tensor * ggml_l2_norm_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a, + float eps); + // a - x // b - dy GGML_API struct ggml_tensor * ggml_rms_norm_back( @@ -1397,16 +1398,20 @@ extern "C" { float scale, float max_bias); - GGML_API struct ggml_tensor * ggml_soft_max_back( + GGML_API struct ggml_tensor * ggml_soft_max_ext_back( struct ggml_context * ctx, struct ggml_tensor * a, - struct ggml_tensor * b); + struct ggml_tensor * b, + float scale, + float max_bias); // in-place, returns view(a) - GGML_API struct ggml_tensor * ggml_soft_max_back_inplace( + GGML_API struct ggml_tensor * ggml_soft_max_ext_back_inplace( struct ggml_context * ctx, struct ggml_tensor * a, - struct ggml_tensor * b); + struct ggml_tensor * b, + float scale, + float max_bias); // rotary position embedding // if (mode & 1) - skip n_past elements (NOT SUPPORTED) @@ -1513,7 +1518,7 @@ extern "C" { // rotary position embedding backward, i.e compute dx from dy // a - dy - GGML_API struct ggml_tensor * ggml_rope_back( + GGML_API struct ggml_tensor * ggml_rope_ext_back( struct ggml_context * ctx, struct ggml_tensor * a, // gradients of ggml_rope result struct ggml_tensor * b, // positions @@ -1528,6 +1533,23 @@ extern "C" { float beta_fast, float beta_slow); + GGML_API struct ggml_tensor * ggml_rope_multi_back( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + struct ggml_tensor * c, + int n_dims, + int sections[4], + int mode, + int n_ctx_orig, + float freq_base, + float freq_scale, + float ext_factor, + float attn_factor, + float beta_fast, + float beta_slow); + + // clamp // in-place, returns view(a) GGML_API struct ggml_tensor * ggml_clamp( @@ -1767,13 +1789,13 @@ extern "C" { struct ggml_tensor * a, int k); -#define GGML_KQ_MASK_PAD 32 +#define GGML_KQ_MASK_PAD 64 - // q: [n_embd, n_batch, n_head, 1] - // k: [n_embd, n_kv, n_head_kv, 1] - // v: [n_embd, n_kv, n_head_kv, 1] !! not transposed !! - // mask: [n_kv, n_batch_pad, 1, 1] !! n_batch_pad = GGML_PAD(n_batch, GGML_KQ_MASK_PAD) !! - // res: [n_embd, n_head, n_batch, 1] !! permuted !! + // q: [n_embd_k, n_batch, n_head, 1] + // k: [n_embd_k, n_kv, n_head_kv, 1] + // v: [n_embd_v, n_kv, n_head_kv, 1] !! not transposed !! + // mask: [n_kv, n_batch_pad, 1, 1] !! n_batch_pad = GGML_PAD(n_batch, GGML_KQ_MASK_PAD) !! + // res: [n_embd_v, n_head, n_batch, 1] !! permuted !! GGML_API struct ggml_tensor * ggml_flash_attn_ext( struct ggml_context * ctx, struct ggml_tensor * q, @@ -1873,6 +1895,25 @@ extern "C" { struct ggml_tensor * td, struct ggml_tensor * state); + GGML_API struct ggml_tensor * ggml_gated_linear_attn( + struct ggml_context * ctx, + struct ggml_tensor * k, + struct ggml_tensor * v, + struct ggml_tensor * q, + struct ggml_tensor * g, + struct ggml_tensor * state, + float scale); + + GGML_API struct ggml_tensor * ggml_rwkv_wkv7( + struct ggml_context * ctx, + struct ggml_tensor * r, + struct ggml_tensor * w, + struct ggml_tensor * k, + struct ggml_tensor * v, + struct ggml_tensor * a, + struct ggml_tensor * b, + struct ggml_tensor * state); + // custom operators typedef void (*ggml_unary_op_f32_t) (const int, float *, const float *); @@ -2111,132 +2152,6 @@ extern "C" { int64_t n_per_row, const float * imatrix); - // - // gguf - // - - enum gguf_type { - GGUF_TYPE_UINT8 = 0, - GGUF_TYPE_INT8 = 1, - GGUF_TYPE_UINT16 = 2, - GGUF_TYPE_INT16 = 3, - GGUF_TYPE_UINT32 = 4, - GGUF_TYPE_INT32 = 5, - GGUF_TYPE_FLOAT32 = 6, - GGUF_TYPE_BOOL = 7, - GGUF_TYPE_STRING = 8, - GGUF_TYPE_ARRAY = 9, - GGUF_TYPE_UINT64 = 10, - GGUF_TYPE_INT64 = 11, - GGUF_TYPE_FLOAT64 = 12, - GGUF_TYPE_COUNT, // marks the end of the enum - }; - - struct gguf_context; - - struct gguf_init_params { - bool no_alloc; - - // if not NULL, create a ggml_context and allocate the tensor data in it - struct ggml_context ** ctx; - }; - - GGML_API struct gguf_context * gguf_init_empty(void); - GGML_API struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params); - //GGML_API struct gguf_context * gguf_init_from_buffer(..); - - GGML_API void gguf_free(struct gguf_context * ctx); - - GGML_API const char * gguf_type_name(enum gguf_type type); - - GGML_API int gguf_get_version (const struct gguf_context * ctx); - GGML_API size_t gguf_get_alignment (const struct gguf_context * ctx); - GGML_API size_t gguf_get_data_offset(const struct gguf_context * ctx); - GGML_API void * gguf_get_data (const struct gguf_context * ctx); - - GGML_API int gguf_get_n_kv(const struct gguf_context * ctx); - GGML_API int gguf_find_key(const struct gguf_context * ctx, const char * key); - GGML_API const char * gguf_get_key (const struct gguf_context * ctx, int key_id); - - GGML_API enum gguf_type gguf_get_kv_type (const struct gguf_context * ctx, int key_id); - GGML_API enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int key_id); - - // will abort if the wrong type is used for the key - GGML_API uint8_t gguf_get_val_u8 (const struct gguf_context * ctx, int key_id); - GGML_API int8_t gguf_get_val_i8 (const struct gguf_context * ctx, int key_id); - GGML_API uint16_t gguf_get_val_u16 (const struct gguf_context * ctx, int key_id); - GGML_API int16_t gguf_get_val_i16 (const struct gguf_context * ctx, int key_id); - GGML_API uint32_t gguf_get_val_u32 (const struct gguf_context * ctx, int key_id); - GGML_API int32_t gguf_get_val_i32 (const struct gguf_context * ctx, int key_id); - GGML_API float gguf_get_val_f32 (const struct gguf_context * ctx, int key_id); - GGML_API uint64_t gguf_get_val_u64 (const struct gguf_context * ctx, int key_id); - GGML_API int64_t gguf_get_val_i64 (const struct gguf_context * ctx, int key_id); - GGML_API double gguf_get_val_f64 (const struct gguf_context * ctx, int key_id); - GGML_API bool gguf_get_val_bool(const struct gguf_context * ctx, int key_id); - GGML_API const char * gguf_get_val_str (const struct gguf_context * ctx, int key_id); - GGML_API const void * gguf_get_val_data(const struct gguf_context * ctx, int key_id); - GGML_API int gguf_get_arr_n (const struct gguf_context * ctx, int key_id); - GGML_API const void * gguf_get_arr_data(const struct gguf_context * ctx, int key_id); - GGML_API const char * gguf_get_arr_str (const struct gguf_context * ctx, int key_id, int i); - - GGML_API int gguf_get_n_tensors (const struct gguf_context * ctx); - GGML_API int gguf_find_tensor (const struct gguf_context * ctx, const char * name); - GGML_API size_t gguf_get_tensor_offset(const struct gguf_context * ctx, int i); - GGML_API char * gguf_get_tensor_name (const struct gguf_context * ctx, int i); - GGML_API enum ggml_type gguf_get_tensor_type (const struct gguf_context * ctx, int i); - - // removes key if it exists - GGML_API void gguf_remove_key(struct gguf_context * ctx, const char * key); - - // overrides existing values or adds a new one - GGML_API void gguf_set_val_u8 (struct gguf_context * ctx, const char * key, uint8_t val); - GGML_API void gguf_set_val_i8 (struct gguf_context * ctx, const char * key, int8_t val); - GGML_API void gguf_set_val_u16 (struct gguf_context * ctx, const char * key, uint16_t val); - GGML_API void gguf_set_val_i16 (struct gguf_context * ctx, const char * key, int16_t val); - GGML_API void gguf_set_val_u32 (struct gguf_context * ctx, const char * key, uint32_t val); - GGML_API void gguf_set_val_i32 (struct gguf_context * ctx, const char * key, int32_t val); - GGML_API void gguf_set_val_f32 (struct gguf_context * ctx, const char * key, float val); - GGML_API void gguf_set_val_u64 (struct gguf_context * ctx, const char * key, uint64_t val); - GGML_API void gguf_set_val_i64 (struct gguf_context * ctx, const char * key, int64_t val); - GGML_API void gguf_set_val_f64 (struct gguf_context * ctx, const char * key, double val); - GGML_API void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val); - GGML_API void gguf_set_val_str (struct gguf_context * ctx, const char * key, const char * val); - GGML_API void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, int n); - GGML_API void gguf_set_arr_str (struct gguf_context * ctx, const char * key, const char ** data, int n); - - // set or add KV pairs from another context - GGML_API void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src); - - // manage tensor info - GGML_API void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tensor); - GGML_API void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type); - GGML_API void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size); - - // writing gguf files can be done in 2 ways: - // - // - write the entire gguf_context to a binary file in a single pass: - // - // gguf_write_to_file(ctx, fname); - // - // - first prepare a file with a placeholder for the meta data, write the tensor data, then write the meta data: - // - // FILE * f = fopen(fname, "wb"); - // fseek(f, gguf_get_meta_size(ctx), SEEK_SET); - // fwrite(f, ...); - // void * data = gguf_meta_get_meta_data(ctx); - // fseek(f, 0, SEEK_SET); - // fwrite(f, data, gguf_get_meta_size(ctx)); - // free(data); - // fclose(f); - // - - // write the entire context to a binary file - GGML_API void gguf_write_to_file(const struct gguf_context * ctx, const char * fname, bool only_meta); - - // get the size in bytes of the meta data (header, kv pairs, tensor info) including padding - GGML_API size_t gguf_get_meta_size(const struct gguf_context * ctx); - GGML_API void gguf_get_meta_data(const struct gguf_context * ctx, void * data); - #ifdef __cplusplus // restrict not standard in C++ # if defined(__GNUC__) @@ -2249,7 +2164,11 @@ extern "C" { # define GGML_RESTRICT # endif #else -# define GGML_RESTRICT restrict +# if defined (_MSC_VER) && (__STDC_VERSION__ < 201112L) +# define GGML_RESTRICT __restrict +# else +# define GGML_RESTRICT restrict +# endif #endif typedef void (*ggml_to_float_t) (const void * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); typedef void (*ggml_from_float_t)(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); diff --git a/vall_e.cpp/include/llama-cpp.h b/vall_e.cpp/include/llama-cpp.h index daa04d4..8f63681 100644 --- a/vall_e.cpp/include/llama-cpp.h +++ b/vall_e.cpp/include/llama-cpp.h @@ -9,7 +9,7 @@ #include "llama.h" struct llama_model_deleter { - void operator()(llama_model * model) { llama_free_model(model); } + void operator()(llama_model * model) { llama_model_free(model); } }; struct llama_context_deleter { @@ -20,6 +20,11 @@ struct llama_sampler_deleter { void operator()(llama_sampler * sampler) { llama_sampler_free(sampler); } }; +struct llama_adapter_lora_deleter { + void operator()(llama_adapter_lora * adapter) { llama_adapter_lora_free(adapter); } +}; + typedef std::unique_ptr llama_model_ptr; typedef std::unique_ptr llama_context_ptr; typedef std::unique_ptr llama_sampler_ptr; +typedef std::unique_ptr llama_adapter_lora_ptr; diff --git a/vall_e.cpp/include/llama-impl.h b/vall_e.cpp/include/llama-impl.h index 70f16b6..02b1d07 100644 --- a/vall_e.cpp/include/llama-impl.h +++ b/vall_e.cpp/include/llama-impl.h @@ -1,19 +1,18 @@ #pragma once -#include "llama.h" +#include "ggml.h" // for ggml_log_level #include #include -#include #ifdef __GNUC__ -#ifdef __MINGW32__ -#define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__))) +# if defined(__MINGW32__) && !defined(__clang__) +# define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__))) +# else +# define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__))) +# endif #else -#define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__))) -#endif -#else -#define LLAMA_ATTRIBUTE_FORMAT(...) +# define LLAMA_ATTRIBUTE_FORMAT(...) #endif // @@ -35,147 +34,28 @@ void llama_log_callback_default(ggml_log_level level, const char * text, void * // helpers // -struct time_meas { - time_meas(int64_t & t_acc, bool disable = false) : t_start_us(disable ? -1 : ggml_time_us()), t_acc(t_acc) {} +template +struct no_init { + T value; + no_init() { /* do nothing */ } +}; - ~time_meas() { - if (t_start_us >= 0) { - t_acc += ggml_time_us() - t_start_us; - } - } +struct time_meas { + time_meas(int64_t & t_acc, bool disable = false); + ~time_meas(); const int64_t t_start_us; int64_t & t_acc; }; -static void replace_all(std::string & s, const std::string & search, const std::string & replace) { - if (search.empty()) { - return; - } - std::string builder; - builder.reserve(s.length()); - size_t pos = 0; - size_t last_pos = 0; - while ((pos = s.find(search, last_pos)) != std::string::npos) { - builder.append(s, last_pos, pos - last_pos); - builder.append(replace); - last_pos = pos + search.length(); - } - builder.append(s, last_pos, std::string::npos); - s = std::move(builder); -} +void replace_all(std::string & s, const std::string & search, const std::string & replace); -const std::vector> & llama_internal_get_tensor_map( - struct llama_context * ctx -); +// TODO: rename to llama_format ? +LLAMA_ATTRIBUTE_FORMAT(1, 2) +std::string format(const char * fmt, ...); -// the ring buffer works similarly to std::deque, but with a fixed capacity -template -struct ring_buffer { - ring_buffer(size_t cap) : capacity(cap), data(cap) {} +std::string llama_format_tensor_shape(const std::vector & ne); +std::string llama_format_tensor_shape(const struct ggml_tensor * t); - T & front() { - if (sz == 0) { - throw std::runtime_error("ring buffer is empty"); - } - return data[first]; - } - - const T & front() const { - if (sz == 0) { - throw std::runtime_error("ring buffer is empty"); - } - return data[first]; - } - - T & back() { - if (sz == 0) { - throw std::runtime_error("ring buffer is empty"); - } - return data[pos]; - } - - const T & back() const { - if (sz == 0) { - throw std::runtime_error("ring buffer is empty"); - } - return data[pos]; - } - - void push_back(const T & value) { - if (capacity == 0) { - throw std::runtime_error("ring buffer: capacity is zero"); - } - - if (sz == capacity) { - // advance the start when buffer is full - first = (first + 1) % capacity; - } else { - sz++; - } - data[pos] = value; - pos = (pos + 1) % capacity; - } - - T pop_front() { - if (sz == 0) { - throw std::runtime_error("ring buffer is empty"); - } - T value = data[first]; - first = (first + 1) % capacity; - sz--; - return value; - } - - //T & operator[](size_t i) { - // if (i >= sz) { - // throw std::runtime_error("ring buffer: index out of bounds"); - // } - // return data[(first + i) % capacity]; - //} - - //const T & at(size_t i) const { - // if (i >= sz) { - // throw std::runtime_error("ring buffer: index out of bounds"); - // } - // return data[(first + i) % capacity]; - //} - - const T & rat(size_t i) const { - if (i >= sz) { - throw std::runtime_error("ring buffer: index out of bounds"); - } - return data[(first + sz - i - 1) % capacity]; - } - - std::vector to_vector() const { - std::vector result; - result.reserve(sz); - for (size_t i = 0; i < sz; i++) { - result.push_back(data[(first + i) % capacity]); - } - return result; - } - - void clear() { - // here only reset the status of the buffer - sz = 0; - first = 0; - pos = 0; - } - - bool empty() const { - return sz == 0; - } - - size_t size() const { - return sz; - } - - size_t capacity = 0; - size_t sz = 0; - size_t first = 0; - size_t pos = 0; - std::vector data; -}; +std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i); diff --git a/vall_e.cpp/include/llama-vocab.h b/vall_e.cpp/include/llama-vocab.h index 4bb16d2..5ce3552 100644 --- a/vall_e.cpp/include/llama-vocab.h +++ b/vall_e.cpp/include/llama-vocab.h @@ -1,170 +1,125 @@ #pragma once -#include "llama-impl.h" +#include "llama.h" #include #include -#include -#include -#include +#include -struct llm_tokenizer; +struct LLM_KV; +struct llama_model_loader; struct llama_vocab { - using id = llama_token; - using token = std::string; - using tattr = llama_token_attr; - struct token_data { - token text; - float score; - tattr attr; + std::string text; + float score; + llama_token_attr attr; }; - uint32_t n_vocab = 0; // TODO: not great because has to keep in sync with hparams.n_vocab - - enum llama_vocab_type type = LLAMA_VOCAB_TYPE_SPM; - enum llama_vocab_pre_type type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; - - int max_token_len = 0; // used for optimizing longest token search - - std::unordered_map token_to_id; - std::vector id_to_token; - - std::vector cache_special_tokens; - std::vector cache_token_to_piece; // llama_token_to_piece(special = true); - - std::map, int> bpe_ranks; - - // default LLaMA special tokens - // TODO: should we set all of these to LLAMA_TOKEN_NULL? - id special_bos_id = 1; - id special_eos_id = 2; - id special_eot_id = LLAMA_TOKEN_NULL; - id special_eom_id = LLAMA_TOKEN_NULL; - id special_unk_id = 0; - id special_sep_id = LLAMA_TOKEN_NULL; - id special_pad_id = LLAMA_TOKEN_NULL; - id special_cls_id = LLAMA_TOKEN_NULL; - id special_mask_id = LLAMA_TOKEN_NULL; - - id linefeed_id = 13; - - // fim tokens - id special_fim_pre_id = LLAMA_TOKEN_NULL; - id special_fim_suf_id = LLAMA_TOKEN_NULL; - id special_fim_mid_id = LLAMA_TOKEN_NULL; - id special_fim_pad_id = LLAMA_TOKEN_NULL; - id special_fim_rep_id = LLAMA_TOKEN_NULL; // repo - id special_fim_sep_id = LLAMA_TOKEN_NULL; // file separator - - // set of all tokens that cause "end of generation" - std::set special_eog_ids; - - // tokenizer flags - bool tokenizer_add_space_prefix = false; - bool tokenizer_add_bos = false; - bool tokenizer_add_eos = false; - bool tokenizer_ignore_merges = false; - bool tokenizer_clean_spaces = false; // clean_up_tokenization_spaces - bool tokenizer_remove_extra_whitespaces = false; - bool tokenizer_escape_whitespaces = true; - bool tokenizer_treat_whitespace_as_suffix = false; - - std::vector precompiled_charsmap; - - llm_tokenizer * tokenizer = nullptr; - - llama_vocab() = default; + llama_vocab(); ~llama_vocab(); + void load(llama_model_loader & ml, const LLM_KV & kv); + + enum llama_vocab_type get_type() const; + enum llama_vocab_pre_type get_pre_type() const; + + uint32_t n_tokens() const; + uint32_t n_token_types() const; + + std::string type_name() const; + + bool is_normal (llama_token id) const; + bool is_unknown (llama_token id) const; + bool is_control (llama_token id) const; + bool is_byte (llama_token id) const; + bool is_user_defined(llama_token id) const; + bool is_unused (llama_token id) const; + bool is_eog (llama_token id) const; + + uint8_t token_to_byte(llama_token id) const; + llama_token byte_to_token(uint8_t ch) const; + + llama_token text_to_token(const std::string & text) const; + + const token_data & get_token_data(llama_token id) const; + + const char * token_get_text (llama_token id) const; + float token_get_score(llama_token id) const; + llama_token_attr token_get_attr (llama_token id) const; + + llama_token token_bos() const; + llama_token token_eos() const; + llama_token token_eot() const; + llama_token token_eom() const; + llama_token token_unk() const; + llama_token token_sep() const; + llama_token token_nl () const; + llama_token token_pad() const; + + llama_token token_prefix() const; + llama_token token_middle() const; + llama_token token_suffix() const; + + llama_token token_fim_pre() const; + llama_token token_fim_suf() const; + llama_token token_fim_mid() const; + llama_token token_fim_pad() const; + llama_token token_fim_rep() const; + llama_token token_fim_sep() const; + + bool get_add_space_prefix () const; + bool get_add_bos () const; + bool get_add_eos () const; + bool get_ignore_merges () const; + bool get_clean_spaces () const; + bool get_remove_extra_whitespaces () const; + bool get_escape_whitespaces () const; + bool get_treat_whitespace_as_suffix() const; + + int max_token_len() const; + int find_bpe_rank(const std::string & token_left, const std::string & token_right) const; - void init_tokenizer(); + int32_t tokenize( + const char * text, + int32_t text_len, + llama_token * tokens, + int32_t n_tokens_max, + bool add_special, + bool parse_special) const; + + std::vector tokenize( + const std::string & raw_text, + bool add_special, + bool parse_special = false) const; + + // does not write null-terminator to buf + int32_t token_to_piece( + llama_token token, + char * buf, + int32_t length, + int32_t lstrip, + bool special) const; + + // use cached data + const std::string & token_to_piece(llama_token token) const; + + int32_t detokenize( + const llama_token * tokens, + int32_t n_tokens, + char * text, + int32_t text_len_max, + bool remove_special, + bool unparse_special) const; + + std::string detokenize( + const std::vector & tokens, + bool special) const; + + void print_info() const; + +private: + struct impl; + std::unique_ptr pimpl; }; - -// -// internal API -// - -// TODO: rename to llama_tokenize_impl -// TODO: This should probably be in llama.h -std::vector llama_tokenize_internal( - const llama_vocab & vocab, - std::string raw_text, - bool add_special, - bool parse_special = false); - -// TODO: move the API below as member functions of llama_vocab -llama_token llama_byte_to_token_impl(const llama_vocab & vocab, uint8_t ch); - -const char * llama_token_get_text_impl(const struct llama_vocab & vocab, llama_token token); - -float llama_token_get_score_impl(const struct llama_vocab & vocab, llama_token token); - -llama_token_attr llama_token_get_attr_impl(const struct llama_vocab & vocab, llama_token token); - -bool llama_token_is_eog_impl(const struct llama_vocab & vocab, llama_token token); - -bool llama_token_is_control_impl(const struct llama_vocab & vocab, llama_token token); - -llama_token llama_token_bos_impl(const struct llama_vocab & vocab); -llama_token llama_token_eos_impl(const struct llama_vocab & vocab); -llama_token llama_token_eot_impl(const struct llama_vocab & vocab); -llama_token llama_token_eom_impl(const struct llama_vocab & vocab); -llama_token llama_token_cls_impl(const struct llama_vocab & vocab); -llama_token llama_token_sep_impl(const struct llama_vocab & vocab); -llama_token llama_token_nl_impl (const struct llama_vocab & vocab); -llama_token llama_token_pad_impl(const struct llama_vocab & vocab); - -llama_token llama_token_prefix_impl(const struct llama_vocab & vocab); -llama_token llama_token_middle_impl(const struct llama_vocab & vocab); -llama_token llama_token_suffix_impl(const struct llama_vocab & vocab); - -llama_token llama_token_fim_pre_impl(const struct llama_vocab & vocab); -llama_token llama_token_fim_suf_impl(const struct llama_vocab & vocab); -llama_token llama_token_fim_mid_impl(const struct llama_vocab & vocab); -llama_token llama_token_fim_pad_impl(const struct llama_vocab & vocab); -llama_token llama_token_fim_rep_impl(const struct llama_vocab & vocab); -llama_token llama_token_fim_sep_impl(const struct llama_vocab & vocab); - -bool llama_add_bos_token_impl(const struct llama_vocab & vocab); -bool llama_add_eos_token_impl(const struct llama_vocab & vocab); - -int32_t llama_tokenize_impl( - const struct llama_vocab & vocab, - const char * text, - int32_t text_len, - llama_token * tokens, - int32_t n_tokens_max, - bool add_special, - bool parse_special); - -// does not write null-terminator to buf -int32_t llama_token_to_piece_impl( - const struct llama_vocab & vocab, - llama_token token, - char * buf, - int32_t length, - int32_t lstrip, - bool special); - -// check if token0 is contained as a prefix in token1 -bool llama_token_is_prefix_impl( - const struct llama_vocab & vocab, - llama_token token0, - llama_token token1); - -int32_t llama_detokenize_impl( - const struct llama_vocab & vocab, - const llama_token * tokens, - int32_t n_tokens, - char * text, - int32_t text_len_max, - bool remove_special, - bool unparse_special); - -std::string llama_detokenize( - const struct llama_vocab & vocab, - const std::vector & tokens, - bool special); diff --git a/vall_e.cpp/include/llama.h b/vall_e.cpp/include/llama.h index a4abf39..fca2b03 100644 --- a/vall_e.cpp/include/llama.h +++ b/vall_e.cpp/include/llama.h @@ -34,7 +34,6 @@ #define LLAMA_DEFAULT_SEED 0xFFFFFFFF -// TODO: use everywhere in the implementation #define LLAMA_TOKEN_NULL -1 #define LLAMA_FILE_MAGIC_GGLA 0x67676c61u // 'ggla' @@ -57,10 +56,11 @@ extern "C" { // TODO: show sample usage // - // struct llama_vocab; // TODO: add in the future + struct llama_vocab; struct llama_model; struct llama_context; struct llama_sampler; + struct llama_kv_cache; typedef int32_t llama_pos; typedef int32_t llama_token; @@ -105,6 +105,11 @@ extern "C" { LLAMA_VOCAB_PRE_TYPE_EXAONE = 25, LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26, LLAMA_VOCAB_PRE_TYPE_MINERVA = 27, + LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM = 28, + LLAMA_VOCAB_PRE_TYPE_GPT4O = 29, + LLAMA_VOCAB_PRE_TYPE_SUPERBPE = 30, + LLAMA_VOCAB_PRE_TYPE_TRILLION = 31, + LLAMA_VOCAB_PRE_TYPE_BAILINGMOE = 32, }; enum llama_rope_type { @@ -213,7 +218,7 @@ extern "C" { LLAMA_SPLIT_MODE_ROW = 2, // split layers and KV across GPUs, use tensor parallelism if supported }; - // TODO: simplify (https://github.com/ggerganov/llama.cpp/pull/9294#pullrequestreview-2286561979) + // TODO: simplify (https://github.com/ggml-org/llama.cpp/pull/9294#pullrequestreview-2286561979) typedef struct llama_token_data { llama_token id; // token id float logit; // log-odds of the token @@ -275,10 +280,18 @@ extern "C" { }; }; + struct llama_model_tensor_buft_override { + const char * pattern; + ggml_backend_buffer_type_t buft; + }; + struct llama_model_params { // NULL-terminated list of devices to use for offloading (if NULL, all available devices are used) ggml_backend_dev_t * devices; + // NULL-terminated list of buffer types to use for tensors that match a pattern + const struct llama_model_tensor_buft_override * tensor_buft_overrides; + int32_t n_gpu_layers; // number of layers to store in VRAM enum llama_split_mode split_mode; // how to split the model across multiple GPUs @@ -288,9 +301,6 @@ extern "C" { // proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices() const float * tensor_split; - // comma separated list of RPC servers to use for offloading - const char * rpc_servers; - // Called with a progress value between 0.0 and 1.0. Pass NULL to disable. // If the provided progress_callback returns true, model loading continues. // If it returns false, model loading is immediately aborted. @@ -310,7 +320,7 @@ extern "C" { }; // NOTE: changing the default values of parameters marked as [EXPERIMENTAL] may cause crashes or incorrect results in certain configurations - // https://github.com/ggerganov/llama.cpp/pull/7544 + // https://github.com/ggml-org/llama.cpp/pull/7544 struct llama_context_params { uint32_t n_ctx; // text context, 0 = from model uint32_t n_batch; // logical maximum batch size that can be submitted to llama_decode @@ -323,7 +333,7 @@ extern "C" { enum llama_pooling_type pooling_type; // whether to pool (sum) embedding results by sequence id enum llama_attention_type attention_type; // attention type to use for embeddings - // ref: https://github.com/ggerganov/llama.cpp/pull/2054 + // ref: https://github.com/ggml-org/llama.cpp/pull/2054 float rope_freq_base; // RoPE base frequency, 0 = from model float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model float yarn_ext_factor; // YaRN extrapolation mix factor, negative = from model @@ -385,10 +395,10 @@ extern "C" { } llama_chat_message; // lora adapter - struct llama_lora_adapter; + struct llama_adapter_lora; // Helpers for getting default parameters - // TODO: update API to start accepting pointers to params structs (https://github.com/ggerganov/llama.cpp/discussions/9172) + // TODO: update API to start accepting pointers to params structs (https://github.com/ggml-org/llama.cpp/discussions/9172) LLAMA_API struct llama_model_params llama_model_default_params(void); LLAMA_API struct llama_context_params llama_context_default_params(void); LLAMA_API struct llama_sampler_chain_params llama_sampler_chain_default_params(void); @@ -399,30 +409,53 @@ extern "C" { // Call once at the start of the program LLAMA_API void llama_backend_init(void); + // Call once at the end of the program - currently only used for MPI + LLAMA_API void llama_backend_free(void); + //optional: LLAMA_API void llama_numa_init(enum ggml_numa_strategy numa); // Optional: an auto threadpool gets created in ggml if not passed explicitly LLAMA_API void llama_attach_threadpool( - struct llama_context * ctx, - ggml_threadpool_t threadpool, - ggml_threadpool_t threadpool_batch); + struct llama_context * ctx, + ggml_threadpool_t threadpool, + ggml_threadpool_t threadpool_batch); + LLAMA_API void llama_detach_threadpool(struct llama_context * ctx); - // Call once at the end of the program - currently only used for MPI - LLAMA_API void llama_backend_free(void); + DEPRECATED(LLAMA_API struct llama_model * llama_load_model_from_file( + const char * path_model, + struct llama_model_params params), + "use llama_model_load_from_file instead"); - LLAMA_API struct llama_model * llama_load_model_from_file( + // Load the model from a file + // If the file is split into multiple parts, the file name must follow this pattern: -%05d-of-%05d.gguf + // If the split file name does not follow this pattern, use llama_model_load_from_splits + LLAMA_API struct llama_model * llama_model_load_from_file( const char * path_model, struct llama_model_params params); - LLAMA_API void llama_free_model(struct llama_model * model); + // Load the model from multiple splits (support custom naming scheme) + // The paths must be in the correct order + LLAMA_API struct llama_model * llama_model_load_from_splits( + const char ** paths, + size_t n_paths, + struct llama_model_params params); - // TODO: rename to llama_init_from_model - LLAMA_API struct llama_context * llama_new_context_with_model( + DEPRECATED(LLAMA_API void llama_free_model(struct llama_model * model), + "use llama_model_free instead"); + + LLAMA_API void llama_model_free(struct llama_model * model); + + LLAMA_API struct llama_context * llama_init_from_model( struct llama_model * model, struct llama_context_params params); + DEPRECATED(LLAMA_API struct llama_context * llama_new_context_with_model( + struct llama_model * model, + struct llama_context_params params), + "use llama_init_from_model instead"); + // Frees all allocated memory LLAMA_API void llama_free(struct llama_context * ctx); @@ -440,20 +473,32 @@ extern "C" { LLAMA_API uint32_t llama_n_ubatch (const struct llama_context * ctx); LLAMA_API uint32_t llama_n_seq_max (const struct llama_context * ctx); - LLAMA_API int32_t llama_n_vocab (const struct llama_model * model); - LLAMA_API int32_t llama_n_ctx_train(const struct llama_model * model); - LLAMA_API int32_t llama_n_embd (const struct llama_model * model); - LLAMA_API int32_t llama_n_layer (const struct llama_model * model); - LLAMA_API int32_t llama_n_head (const struct llama_model * model); + DEPRECATED(LLAMA_API int32_t llama_n_ctx_train(const struct llama_model * model), "use llama_model_n_ctx_train instead"); + DEPRECATED(LLAMA_API int32_t llama_n_embd (const struct llama_model * model), "use llama_model_n_embd instead"); + DEPRECATED(LLAMA_API int32_t llama_n_layer (const struct llama_model * model), "use llama_model_n_layer instead"); + DEPRECATED(LLAMA_API int32_t llama_n_head (const struct llama_model * model), "use llama_model_n_head instead"); - LLAMA_API const struct llama_model * llama_get_model(const struct llama_context * ctx); + DEPRECATED(LLAMA_API int32_t llama_n_vocab (const struct llama_vocab * vocab), "use llama_vocab_n_tokens instead"); - LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx); - LLAMA_API enum llama_vocab_type llama_vocab_type (const struct llama_model * model); - LLAMA_API enum llama_rope_type llama_rope_type (const struct llama_model * model); + LLAMA_API const struct llama_model * llama_get_model (const struct llama_context * ctx); + LLAMA_API struct llama_kv_cache * llama_get_kv_self ( struct llama_context * ctx); + LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx); // TODO: rename to llama_get_pooling_type + + LLAMA_API const struct llama_vocab * llama_model_get_vocab(const struct llama_model * model); + LLAMA_API enum llama_rope_type llama_model_rope_type(const struct llama_model * model); + + LLAMA_API int32_t llama_model_n_ctx_train(const struct llama_model * model); + LLAMA_API int32_t llama_model_n_embd (const struct llama_model * model); + LLAMA_API int32_t llama_model_n_layer (const struct llama_model * model); + LLAMA_API int32_t llama_model_n_head (const struct llama_model * model); + LLAMA_API int32_t llama_model_n_head_kv (const struct llama_model * model); // Get the model's RoPE frequency scaling factor - LLAMA_API float llama_rope_freq_scale_train(const struct llama_model * model); + LLAMA_API float llama_model_rope_freq_scale_train(const struct llama_model * model); + + LLAMA_API enum llama_vocab_type llama_vocab_type(const struct llama_vocab * vocab); + + LLAMA_API int32_t llama_vocab_n_tokens(const struct llama_vocab * vocab); // Functions to access the model's GGUF metadata scalar values // - The functions return the length of the string on success, or -1 on failure @@ -479,6 +524,10 @@ extern "C" { // Returns the total size of all the tensors in the model in bytes LLAMA_API uint64_t llama_model_size(const struct llama_model * model); + // Get the default chat template. Returns nullptr if not available + // If name is NULL, returns the default chat template + LLAMA_API const char * llama_model_chat_template(const struct llama_model * model, const char * name); + // Returns the total number of parameters in the model LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model); @@ -501,32 +550,36 @@ extern "C" { const char * fname_out, const llama_model_quantize_params * params); + // + // Adapters + // + // Load a LoRA adapter from file - // The loaded adapter will be associated to the given model, and will be free when the model is deleted - LLAMA_API struct llama_lora_adapter * llama_lora_adapter_init( + LLAMA_API struct llama_adapter_lora * llama_adapter_lora_init( struct llama_model * model, const char * path_lora); + // Manually free a LoRA adapter + // Note: loaded adapters will be free when the associated model is deleted + LLAMA_API void llama_adapter_lora_free(struct llama_adapter_lora * adapter); + + // The following functions operate on a llama_context, hence the naming: llama_verb_... + // Add a loaded LoRA adapter to given context // This will not modify model's weight - LLAMA_API int32_t llama_lora_adapter_set( + LLAMA_API int32_t llama_set_adapter_lora( struct llama_context * ctx, - struct llama_lora_adapter * adapter, + struct llama_adapter_lora * adapter, float scale); // Remove a specific LoRA adapter from given context // Return -1 if the adapter is not present in the context - LLAMA_API int32_t llama_lora_adapter_remove( + LLAMA_API int32_t llama_rm_adapter_lora( struct llama_context * ctx, - struct llama_lora_adapter * adapter); + struct llama_adapter_lora * adapter); // Remove all LoRA adapters from given context - LLAMA_API void llama_lora_adapter_clear( - struct llama_context * ctx); - - // Manually free a LoRA adapter - // Note: loaded adapters will be free when the associated model is deleted - LLAMA_API void llama_lora_adapter_free(struct llama_lora_adapter * adapter); + LLAMA_API void llama_clear_adapter_lora(struct llama_context * ctx); // Apply a loaded control vector to a llama_context, or if data is NULL, clear // the currently loaded vector. @@ -534,8 +587,8 @@ extern "C" { // to an n_embd x n_layers buffer starting from layer 1. // il_start and il_end are the layer range the vector should apply to (both inclusive) // See llama_control_vector_load in common to load a control vector. - LLAMA_API int32_t llama_control_vector_apply( - struct llama_context * lctx, + LLAMA_API int32_t llama_apply_adapter_cvec( + struct llama_context * ctx, const float * data, size_t len, int32_t n_embd, @@ -546,6 +599,8 @@ extern "C" { // KV cache // + // TODO: start using struct llama_kv_cache + // Information associated with an individual cell in the KV cache view. struct llama_kv_cache_view_cell { // The position for this cell. Takes KV cache shifts into account. @@ -592,17 +647,26 @@ extern "C" { LLAMA_API void llama_kv_cache_view_free(struct llama_kv_cache_view * view); // Update the KV cache view structure with the current state of the KV cache. (use only for debugging purposes) + // TODO: change signature to llama_kv_cache_view_update(struct llama_kv_cache_view * view, const struct llama_context * ctx) LLAMA_API void llama_kv_cache_view_update(const struct llama_context * ctx, struct llama_kv_cache_view * view); + /// + // Returns the number of tokens in the KV cache (slow, use only for debug) // If a KV cell has multiple sequences assigned to it, it will be counted multiple times - LLAMA_API int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx); + LLAMA_API int32_t llama_kv_self_n_tokens(const struct llama_context * ctx); + + DEPRECATED(LLAMA_API int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx), + "use llama_kv_self_n_tokens instead"); // Returns the number of used KV cells (i.e. have at least one sequence assigned to them) - LLAMA_API int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx); + LLAMA_API int32_t llama_kv_self_used_cells(const struct llama_context * ctx); + + DEPRECATED(LLAMA_API int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx), + "use llama_kv_self_used_cells instead"); // Clear the KV cache - both cell info is erased and KV data is zeroed - LLAMA_API void llama_kv_cache_clear( + LLAMA_API void llama_kv_self_clear( struct llama_context * ctx); // Removes all tokens that belong to the specified sequence and have positions in [p0, p1) @@ -610,7 +674,7 @@ extern "C" { // seq_id < 0 : match any sequence // p0 < 0 : [0, p1] // p1 < 0 : [p0, inf) - LLAMA_API bool llama_kv_cache_seq_rm( + LLAMA_API bool llama_kv_self_seq_rm( struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, @@ -620,7 +684,7 @@ extern "C" { // Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence // p0 < 0 : [0, p1] // p1 < 0 : [p0, inf) - LLAMA_API void llama_kv_cache_seq_cp( + LLAMA_API void llama_kv_self_seq_cp( struct llama_context * ctx, llama_seq_id seq_id_src, llama_seq_id seq_id_dst, @@ -628,17 +692,17 @@ extern "C" { llama_pos p1); // Removes all tokens that do not belong to the specified sequence - LLAMA_API void llama_kv_cache_seq_keep( + LLAMA_API void llama_kv_self_seq_keep( struct llama_context * ctx, llama_seq_id seq_id); // Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1) // If the KV cache is RoPEd, the KV data is updated accordingly: // - lazily on next llama_decode() - // - explicitly with llama_kv_cache_update() + // - explicitly with llama_kv_self_update() // p0 < 0 : [0, p1] // p1 < 0 : [p0, inf) - LLAMA_API void llama_kv_cache_seq_add( + LLAMA_API void llama_kv_self_seq_add( struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, @@ -648,10 +712,10 @@ extern "C" { // Integer division of the positions by factor of `d > 1` // If the KV cache is RoPEd, the KV data is updated accordingly: // - lazily on next llama_decode() - // - explicitly with llama_kv_cache_update() + // - explicitly with llama_kv_self_update() // p0 < 0 : [0, p1] // p1 < 0 : [p0, inf) - LLAMA_API void llama_kv_cache_seq_div( + LLAMA_API void llama_kv_self_seq_div( struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, @@ -659,21 +723,76 @@ extern "C" { int d); // Returns the largest position present in the KV cache for the specified sequence - LLAMA_API llama_pos llama_kv_cache_seq_pos_max( + LLAMA_API llama_pos llama_kv_self_seq_pos_max( struct llama_context * ctx, - llama_seq_id seq_id); + llama_seq_id seq_id); // Defragment the KV cache // This will be applied: // - lazily on next llama_decode() - // - explicitly with llama_kv_cache_update() - LLAMA_API void llama_kv_cache_defrag(struct llama_context * ctx); - - // Apply the KV cache updates (such as K-shifts, defragmentation, etc.) - LLAMA_API void llama_kv_cache_update(struct llama_context * ctx); + // - explicitly with llama_kv_self_update() + LLAMA_API void llama_kv_self_defrag(struct llama_context * ctx); // Check if the context supports KV cache shifting - LLAMA_API bool llama_kv_cache_can_shift(struct llama_context * ctx); + LLAMA_API bool llama_kv_self_can_shift(const struct llama_context * ctx); + + // Apply the KV cache updates (such as K-shifts, defragmentation, etc.) + LLAMA_API void llama_kv_self_update(struct llama_context * ctx); + + DEPRECATED(LLAMA_API void llama_kv_cache_clear( + struct llama_context * ctx), + "use llama_kv_self_clear instead"); + + DEPRECATED(LLAMA_API bool llama_kv_cache_seq_rm( + struct llama_context * ctx, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1), + "use llama_kv_self_seq_rm instead"); + + DEPRECATED(LLAMA_API void llama_kv_cache_seq_cp( + struct llama_context * ctx, + llama_seq_id seq_id_src, + llama_seq_id seq_id_dst, + llama_pos p0, + llama_pos p1), + "use llama_kv_self_seq_cp instead"); + + DEPRECATED(LLAMA_API void llama_kv_cache_seq_keep( + struct llama_context * ctx, + llama_seq_id seq_id), + "use llama_kv_self_seq_keep instead"); + + DEPRECATED(LLAMA_API void llama_kv_cache_seq_add( + struct llama_context * ctx, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1, + llama_pos delta), + "use llama_kv_self_seq_add instead"); + + DEPRECATED(LLAMA_API void llama_kv_cache_seq_div( + struct llama_context * ctx, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1, + int d), + "use llama_kv_self_seq_div instead"); + + DEPRECATED(LLAMA_API llama_pos llama_kv_cache_seq_pos_max( + struct llama_context * ctx, + llama_seq_id seq_id), + "use llama_kv_self_seq_pos_max instead"); + + DEPRECATED(LLAMA_API void llama_kv_cache_defrag(struct llama_context * ctx), + "use llama_kv_self_defrag instead"); + + DEPRECATED(LLAMA_API bool llama_kv_cache_can_shift(const struct llama_context * ctx), + "use llama_kv_self_can_shift instead"); + + DEPRECATED(LLAMA_API void llama_kv_cache_update(struct llama_context * ctx), + "use llama_kv_self_update instead"); + // // State / sessions @@ -837,6 +956,10 @@ extern "C" { // If set to true, the model will only attend to the past tokens LLAMA_API void llama_set_causal_attn(struct llama_context * ctx, bool causal_attn); + // Set whether the model is in warmup mode or not + // If true, all model tensors are activated during llama_decode() to load and cache their weights. + LLAMA_API void llama_set_warmup(struct llama_context * ctx, bool warmup); + // Set abort callback LLAMA_API void llama_set_abort_callback(struct llama_context * ctx, ggml_abort_callback abort_callback, void * abort_callback_data); @@ -883,41 +1006,60 @@ extern "C" { // Vocab // - LLAMA_API const char * llama_token_get_text(const struct llama_model * model, llama_token token); + LLAMA_API const char * llama_vocab_get_text(const struct llama_vocab * vocab, llama_token token); - LLAMA_API float llama_token_get_score(const struct llama_model * model, llama_token token); + LLAMA_API float llama_vocab_get_score(const struct llama_vocab * vocab, llama_token token); - LLAMA_API enum llama_token_attr llama_token_get_attr(const struct llama_model * model, llama_token token); + LLAMA_API enum llama_token_attr llama_vocab_get_attr(const struct llama_vocab * vocab, llama_token token); // Check if the token is supposed to end generation (end-of-generation, eg. EOS, EOT, etc.) - LLAMA_API bool llama_token_is_eog(const struct llama_model * model, llama_token token); + LLAMA_API bool llama_vocab_is_eog(const struct llama_vocab * vocab, llama_token token); // Identify if Token Id is a control token or a render-able token - LLAMA_API bool llama_token_is_control(const struct llama_model * model, llama_token token); + LLAMA_API bool llama_vocab_is_control(const struct llama_vocab * vocab, llama_token token); // Special tokens - LLAMA_API llama_token llama_token_bos(const struct llama_model * model); // beginning-of-sentence - LLAMA_API llama_token llama_token_eos(const struct llama_model * model); // end-of-sentence - LLAMA_API llama_token llama_token_eot(const struct llama_model * model); // end-of-turn - LLAMA_API llama_token llama_token_cls(const struct llama_model * model); // classification - LLAMA_API llama_token llama_token_sep(const struct llama_model * model); // sentence separator - LLAMA_API llama_token llama_token_nl (const struct llama_model * model); // next-line - LLAMA_API llama_token llama_token_pad(const struct llama_model * model); // padding + LLAMA_API llama_token llama_vocab_bos(const struct llama_vocab * vocab); // beginning-of-sentence + LLAMA_API llama_token llama_vocab_eos(const struct llama_vocab * vocab); // end-of-sentence + LLAMA_API llama_token llama_vocab_eot(const struct llama_vocab * vocab); // end-of-turn + LLAMA_API llama_token llama_vocab_sep(const struct llama_vocab * vocab); // sentence separator + LLAMA_API llama_token llama_vocab_nl (const struct llama_vocab * vocab); // next-line + LLAMA_API llama_token llama_vocab_pad(const struct llama_vocab * vocab); // padding - LLAMA_API bool llama_add_bos_token(const struct llama_model * model); - LLAMA_API bool llama_add_eos_token(const struct llama_model * model); + LLAMA_API bool llama_vocab_get_add_bos(const struct llama_vocab * vocab); + LLAMA_API bool llama_vocab_get_add_eos(const struct llama_vocab * vocab); - // infill tokens - DEPRECATED(LLAMA_API llama_token llama_token_prefix(const struct llama_model * model), "use llama_token_fim_pre instead"); - DEPRECATED(LLAMA_API llama_token llama_token_middle(const struct llama_model * model), "use llama_token_fim_mid instead"); - DEPRECATED(LLAMA_API llama_token llama_token_suffix(const struct llama_model * model), "use llama_token_fim_suf instead"); + LLAMA_API llama_token llama_vocab_fim_pre(const struct llama_vocab * vocab); + LLAMA_API llama_token llama_vocab_fim_suf(const struct llama_vocab * vocab); + LLAMA_API llama_token llama_vocab_fim_mid(const struct llama_vocab * vocab); + LLAMA_API llama_token llama_vocab_fim_pad(const struct llama_vocab * vocab); + LLAMA_API llama_token llama_vocab_fim_rep(const struct llama_vocab * vocab); + LLAMA_API llama_token llama_vocab_fim_sep(const struct llama_vocab * vocab); - LLAMA_API llama_token llama_token_fim_pre(const struct llama_model * model); - LLAMA_API llama_token llama_token_fim_suf(const struct llama_model * model); - LLAMA_API llama_token llama_token_fim_mid(const struct llama_model * model); - LLAMA_API llama_token llama_token_fim_pad(const struct llama_model * model); - LLAMA_API llama_token llama_token_fim_rep(const struct llama_model * model); - LLAMA_API llama_token llama_token_fim_sep(const struct llama_model * model); + DEPRECATED(LLAMA_API const char * llama_token_get_text(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_text instead"); + DEPRECATED(LLAMA_API float llama_token_get_score(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_score instead"); + DEPRECATED(LLAMA_API enum llama_token_attr llama_token_get_attr(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_attr instead"); + DEPRECATED(LLAMA_API bool llama_token_is_eog(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_is_eog instead"); + DEPRECATED(LLAMA_API bool llama_token_is_control(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_is_control instead"); + DEPRECATED(LLAMA_API llama_token llama_token_bos(const struct llama_vocab * vocab), "use llama_vocab_bos instead"); + DEPRECATED(LLAMA_API llama_token llama_token_eos(const struct llama_vocab * vocab), "use llama_vocab_eos instead"); + DEPRECATED(LLAMA_API llama_token llama_token_eot(const struct llama_vocab * vocab), "use llama_vocab_eot instead"); + DEPRECATED(LLAMA_API llama_token llama_token_cls(const struct llama_vocab * vocab), "use llama_vocab_cls instead"); + DEPRECATED(LLAMA_API llama_token llama_token_sep(const struct llama_vocab * vocab), "use llama_vocab_sep instead"); + DEPRECATED(LLAMA_API llama_token llama_token_nl (const struct llama_vocab * vocab), "use llama_vocab_nl instead"); + DEPRECATED(LLAMA_API llama_token llama_token_pad(const struct llama_vocab * vocab), "use llama_vocab_pad instead"); + DEPRECATED(LLAMA_API bool llama_add_bos_token(const struct llama_vocab * vocab), "use llama_vocab_get_add_bos instead"); + DEPRECATED(LLAMA_API bool llama_add_eos_token(const struct llama_vocab * vocab), "use llama_vocab_get_add_eos instead"); + DEPRECATED(LLAMA_API llama_token llama_token_fim_pre(const struct llama_vocab * vocab), "use llama_vocab_fim_pre instead"); + DEPRECATED(LLAMA_API llama_token llama_token_fim_suf(const struct llama_vocab * vocab), "use llama_vocab_fim_suf instead"); + DEPRECATED(LLAMA_API llama_token llama_token_fim_mid(const struct llama_vocab * vocab), "use llama_vocab_fim_mid instead"); + DEPRECATED(LLAMA_API llama_token llama_token_fim_pad(const struct llama_vocab * vocab), "use llama_vocab_fim_pad instead"); + DEPRECATED(LLAMA_API llama_token llama_token_fim_rep(const struct llama_vocab * vocab), "use llama_vocab_fim_rep instead"); + DEPRECATED(LLAMA_API llama_token llama_token_fim_sep(const struct llama_vocab * vocab), "use llama_vocab_fim_sep instead"); + + // CLS is equivalent to BOS + DEPRECATED(LLAMA_API llama_token llama_vocab_cls(const struct llama_vocab * vocab), // classification + "use llama_vocab_bos instead"); // // Tokenization @@ -933,7 +1075,7 @@ extern "C" { /// @param parse_special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated /// as plaintext. Does not insert a leading space. LLAMA_API int32_t llama_tokenize( - const struct llama_model * model, + const struct llama_vocab * vocab, const char * text, int32_t text_len, llama_token * tokens, @@ -947,7 +1089,7 @@ extern "C" { // User can skip up to 'lstrip' leading spaces before copying (useful when encoding/decoding multiple tokens with 'add_space_prefix') // @param special If true, special tokens are rendered in the output. LLAMA_API int32_t llama_token_to_piece( - const struct llama_model * model, + const struct llama_vocab * vocab, llama_token token, char * buf, int32_t length, @@ -961,7 +1103,7 @@ extern "C" { /// @param remove_special Allow to remove BOS and EOS tokens if model is configured to do so. /// @param unparse_special If true, special tokens are rendered in the output. LLAMA_API int32_t llama_detokenize( - const struct llama_model * model, + const struct llama_vocab * vocab, const llama_token * tokens, int32_t n_tokens, char * text, @@ -975,7 +1117,7 @@ extern "C" { /// Apply chat template. Inspired by hf apply_chat_template() on python. /// Both "model" and "custom_template" are optional, but at least one is required. "custom_template" has higher precedence than "model" - /// NOTE: This function does not use a jinja parser. It only support a pre-defined list of template. See more: https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template + /// NOTE: This function does not use a jinja parser. It only support a pre-defined list of template. See more: https://github.com/ggml-org/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template /// @param tmpl A Jinja template to use for this chat. If this is nullptr, the model’s default chat template will be used instead. /// @param chat Pointer to a list of multiple llama_chat_message /// @param n_msg Number of llama_chat_message in this chat @@ -984,7 +1126,6 @@ extern "C" { /// @param length The size of the allocated buffer /// @return The total number of bytes of the formatted prompt. If is it larger than the size of buffer, you may need to re-alloc it and then re-apply the template. LLAMA_API int32_t llama_chat_apply_template( - const struct llama_model * model, const char * tmpl, const struct llama_chat_message * chat, size_t n_msg, @@ -1032,7 +1173,6 @@ extern "C" { // llama_sampler_free(smpl); // // TODO: In the future, llama_sampler will be utilized to offload the sampling to the backends (e.g. GPU). - // TODO: in the future, the entire sampling API that uses llama_model should start using llama_vocab // typedef void * llama_sampler_context_t; @@ -1051,11 +1191,12 @@ extern "C" { }; struct llama_sampler { - struct llama_sampler_i * iface; - llama_sampler_context_t ctx; + const struct llama_sampler_i * iface; + llama_sampler_context_t ctx; }; // mirror of llama_sampler_i: + LLAMA_API struct llama_sampler * llama_sampler_init (const struct llama_sampler_i * iface, llama_sampler_context_t ctx); LLAMA_API const char * llama_sampler_name (const struct llama_sampler * smpl); LLAMA_API void llama_sampler_accept( struct llama_sampler * smpl, llama_token token); LLAMA_API void llama_sampler_apply ( struct llama_sampler * smpl, llama_token_data_array * cur_p); @@ -1085,7 +1226,7 @@ extern "C" { /// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits. /// NOTE: Avoid using on the full vocabulary as the sorting can become slow. For example, apply top-k or top-p sampling first. DEPRECATED(LLAMA_API struct llama_sampler * llama_sampler_init_softmax (void), - "will be removed in the future (see https://github.com/ggerganov/llama.cpp/pull/9896#discussion_r1800920915)"); + "will be removed in the future (see https://github.com/ggml-org/llama.cpp/pull/9896#discussion_r1800920915)"); /// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 LLAMA_API struct llama_sampler * llama_sampler_init_top_k (int32_t k); @@ -1093,7 +1234,7 @@ extern "C" { /// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 LLAMA_API struct llama_sampler * llama_sampler_init_top_p (float p, size_t min_keep); - /// @details Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841 + /// @details Minimum P sampling as described in https://github.com/ggml-org/llama.cpp/pull/3841 LLAMA_API struct llama_sampler * llama_sampler_init_min_p (float p, size_t min_keep); /// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666. @@ -1108,6 +1249,9 @@ extern "C" { /// @details XTC sampler as described in https://github.com/oobabooga/text-generation-webui/pull/6335 LLAMA_API struct llama_sampler * llama_sampler_init_xtc (float p, float t, size_t min_keep, uint32_t seed); + /// @details Top n sigma sampling as described in academic paper "Top-nσ: Not All Logits Are You Need" https://arxiv.org/pdf/2411.07641 + LLAMA_API struct llama_sampler * llama_sampler_init_top_n_sigma(float n); + /// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words. /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text. /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text. @@ -1131,11 +1275,39 @@ extern "C" { float tau, float eta); + /// @details Intializes a GBNF grammar, see grammars/README.md for details. + /// @param vocab The vocabulary that this grammar will be used with. + /// @param grammar_str The production rules for the grammar, encoded as a string. Returns an empty grammar if empty. Returns NULL if parsing of grammar_str fails. + /// @param grammar_root The name of the start symbol for the grammar. LLAMA_API struct llama_sampler * llama_sampler_init_grammar( - const struct llama_model * model, + const struct llama_vocab * vocab, const char * grammar_str, const char * grammar_root); + DEPRECATED(LLAMA_API struct llama_sampler * llama_sampler_init_grammar_lazy( + const struct llama_vocab * vocab, + const char * grammar_str, + const char * grammar_root, + const char ** trigger_words, + size_t num_trigger_words, + const llama_token * trigger_tokens, + size_t num_trigger_tokens), + "use llama_sampler_init_grammar_lazy_patterns instead"); + + + /// @details Lazy grammar sampler, introduced in https://github.com/ggml-org/llama.cpp/pull/9639 + /// @param trigger_patterns A list of patterns that will trigger the grammar sampler. Pattern will be matched from the start of the generation output, and grammar sampler will be fed content starting from its first match group. + /// @param trigger_tokens A list of tokens that will trigger the grammar sampler. Grammar sampler will be fed content starting from the trigger token included. + LLAMA_API struct llama_sampler * llama_sampler_init_grammar_lazy_patterns( + const struct llama_vocab * vocab, + const char * grammar_str, + const char * grammar_root, + const char ** trigger_patterns, + size_t num_trigger_patterns, + const llama_token * trigger_tokens, + size_t num_trigger_tokens); + + /// NOTE: Avoid using on the full vocabulary as searching for repeated tokens can become slow. For example, apply top-k or top-p sampling first. LLAMA_API struct llama_sampler * llama_sampler_init_penalties( int32_t penalty_last_n, // last n tokens to penalize (0 = disable penalty, -1 = context size) @@ -1144,8 +1316,9 @@ extern "C" { float penalty_present); // 0.0 = disabled /// @details DRY sampler, designed by p-e-w, as described in: https://github.com/oobabooga/text-generation-webui/pull/5677, porting Koboldcpp implementation authored by pi6am: https://github.com/LostRuins/koboldcpp/pull/982 - LLAMA_API struct llama_sampler * llama_sampler_init_dry( - const struct llama_model * model, + LLAMA_API struct llama_sampler * llama_sampler_init_dry( + const struct llama_vocab * vocab, + int32_t n_ctx_train, float dry_multiplier, float dry_base, int32_t dry_allowed_length, @@ -1179,7 +1352,7 @@ extern "C" { // 3. discard non-EOG tokens with low prob // 4. if no tokens are left -> pick EOT // - LLAMA_API struct llama_sampler * llama_sampler_init_infill(const struct llama_model * model); + LLAMA_API struct llama_sampler * llama_sampler_init_infill(const struct llama_vocab * vocab); // Returns the seed used by the sampler if applicable, LLAMA_DEFAULT_SEED otherwise LLAMA_API uint32_t llama_sampler_get_seed(const struct llama_sampler * smpl); diff --git a/vall_e.cpp/include/llama.modified.h b/vall_e.cpp/include/llama.modified.h deleted file mode 100644 index 59f2ac1..0000000 --- a/vall_e.cpp/include/llama.modified.h +++ /dev/null @@ -1,1277 +0,0 @@ -#ifndef LLAMA_H -#define LLAMA_H - -#include "ggml.h" -#include "ggml-cpu.h" -#include "ggml-backend.h" - -#include -#include -#include -#include - -#ifdef LLAMA_SHARED -# if defined(_WIN32) && !defined(__MINGW32__) -# ifdef LLAMA_BUILD -# define LLAMA_API __declspec(dllexport) -# else -# define LLAMA_API __declspec(dllimport) -# endif -# else -# define LLAMA_API __attribute__ ((visibility ("default"))) -# endif -#else -# define LLAMA_API -#endif - -#ifdef __GNUC__ -# define DEPRECATED(func, hint) func __attribute__((deprecated(hint))) -#elif defined(_MSC_VER) -# define DEPRECATED(func, hint) __declspec(deprecated(hint)) func -#else -# define DEPRECATED(func, hint) func -#endif - -#define LLAMA_DEFAULT_SEED 0xFFFFFFFF - -// TODO: use everywhere in the implementation -#define LLAMA_TOKEN_NULL -1 - -#define LLAMA_FILE_MAGIC_GGLA 0x67676c61u // 'ggla' -#define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn' -#define LLAMA_FILE_MAGIC_GGSQ 0x67677371u // 'ggsq' - -#define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN -#define LLAMA_SESSION_VERSION 9 - -#define LLAMA_STATE_SEQ_MAGIC LLAMA_FILE_MAGIC_GGSQ -#define LLAMA_STATE_SEQ_VERSION 2 - -#ifdef __cplusplus -extern "C" { -#endif - - // - // C interface - // - // TODO: show sample usage - // - - // struct llama_vocab; // TODO: add in the future - struct llama_model; - struct llama_context; - struct llama_sampler; - - typedef int32_t llama_pos; - typedef int32_t llama_token; - typedef int32_t llama_seq_id; - - enum llama_vocab_type { - LLAMA_VOCAB_TYPE_NONE = 0, // For models without vocab - LLAMA_VOCAB_TYPE_SPM = 1, // LLaMA tokenizer based on byte-level BPE with byte fallback - LLAMA_VOCAB_TYPE_BPE = 2, // GPT-2 tokenizer based on byte-level BPE - LLAMA_VOCAB_TYPE_WPM = 3, // BERT tokenizer based on WordPiece - LLAMA_VOCAB_TYPE_UGM = 4, // T5 tokenizer based on Unigram - LLAMA_VOCAB_TYPE_RWKV = 5, // RWKV tokenizer based on greedy tokenization - }; - - // pre-tokenization types - enum llama_vocab_pre_type { - LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0, - LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1, - LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM = 2, - LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER = 3, - LLAMA_VOCAB_PRE_TYPE_FALCON = 4, - LLAMA_VOCAB_PRE_TYPE_MPT = 5, - LLAMA_VOCAB_PRE_TYPE_STARCODER = 6, - LLAMA_VOCAB_PRE_TYPE_GPT2 = 7, - LLAMA_VOCAB_PRE_TYPE_REFACT = 8, - LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9, - LLAMA_VOCAB_PRE_TYPE_STABLELM2 = 10, - LLAMA_VOCAB_PRE_TYPE_QWEN2 = 11, - LLAMA_VOCAB_PRE_TYPE_OLMO = 12, - LLAMA_VOCAB_PRE_TYPE_DBRX = 13, - LLAMA_VOCAB_PRE_TYPE_SMAUG = 14, - LLAMA_VOCAB_PRE_TYPE_PORO = 15, - LLAMA_VOCAB_PRE_TYPE_CHATGLM3 = 16, - LLAMA_VOCAB_PRE_TYPE_CHATGLM4 = 17, - LLAMA_VOCAB_PRE_TYPE_VIKING = 18, - LLAMA_VOCAB_PRE_TYPE_JAIS = 19, - LLAMA_VOCAB_PRE_TYPE_TEKKEN = 20, - LLAMA_VOCAB_PRE_TYPE_SMOLLM = 21, - LLAMA_VOCAB_PRE_TYPE_CODESHELL = 22, - LLAMA_VOCAB_PRE_TYPE_BLOOM = 23, - LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH = 24, - LLAMA_VOCAB_PRE_TYPE_EXAONE = 25, - LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26, - LLAMA_VOCAB_PRE_TYPE_MINERVA = 27, - }; - - enum llama_rope_type { - LLAMA_ROPE_TYPE_NONE = -1, - LLAMA_ROPE_TYPE_NORM = 0, - LLAMA_ROPE_TYPE_NEOX = GGML_ROPE_TYPE_NEOX, - LLAMA_ROPE_TYPE_MROPE = GGML_ROPE_TYPE_MROPE, - LLAMA_ROPE_TYPE_VISION = GGML_ROPE_TYPE_VISION, - }; - - enum llama_token_type { //TODO: remove, required until per token attributes are available from GGUF file - LLAMA_TOKEN_TYPE_UNDEFINED = 0, - LLAMA_TOKEN_TYPE_NORMAL = 1, - LLAMA_TOKEN_TYPE_UNKNOWN = 2, - LLAMA_TOKEN_TYPE_CONTROL = 3, - LLAMA_TOKEN_TYPE_USER_DEFINED = 4, - LLAMA_TOKEN_TYPE_UNUSED = 5, - LLAMA_TOKEN_TYPE_BYTE = 6, - }; - - enum llama_token_attr { - LLAMA_TOKEN_ATTR_UNDEFINED = 0, - LLAMA_TOKEN_ATTR_UNKNOWN = 1 << 0, - LLAMA_TOKEN_ATTR_UNUSED = 1 << 1, - LLAMA_TOKEN_ATTR_NORMAL = 1 << 2, - LLAMA_TOKEN_ATTR_CONTROL = 1 << 3, // SPECIAL? - LLAMA_TOKEN_ATTR_USER_DEFINED = 1 << 4, - LLAMA_TOKEN_ATTR_BYTE = 1 << 5, - LLAMA_TOKEN_ATTR_NORMALIZED = 1 << 6, - LLAMA_TOKEN_ATTR_LSTRIP = 1 << 7, - LLAMA_TOKEN_ATTR_RSTRIP = 1 << 8, - LLAMA_TOKEN_ATTR_SINGLE_WORD = 1 << 9, - }; - - // model file types - enum llama_ftype { - LLAMA_FTYPE_ALL_F32 = 0, - LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors - // LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16 - // LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed - // LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed - LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q3_K_S = 11, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q3_K_M = 12, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q3_K_L = 13, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q4_K_S = 14, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q4_K_M = 15, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q5_K_S = 16, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q5_K_M = 17, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q6_K = 18, // except 1d tensors - LLAMA_FTYPE_MOSTLY_IQ2_XXS = 19, // except 1d tensors - LLAMA_FTYPE_MOSTLY_IQ2_XS = 20, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q2_K_S = 21, // except 1d tensors - LLAMA_FTYPE_MOSTLY_IQ3_XS = 22, // except 1d tensors - LLAMA_FTYPE_MOSTLY_IQ3_XXS = 23, // except 1d tensors - LLAMA_FTYPE_MOSTLY_IQ1_S = 24, // except 1d tensors - LLAMA_FTYPE_MOSTLY_IQ4_NL = 25, // except 1d tensors - LLAMA_FTYPE_MOSTLY_IQ3_S = 26, // except 1d tensors - LLAMA_FTYPE_MOSTLY_IQ3_M = 27, // except 1d tensors - LLAMA_FTYPE_MOSTLY_IQ2_S = 28, // except 1d tensors - LLAMA_FTYPE_MOSTLY_IQ2_M = 29, // except 1d tensors - LLAMA_FTYPE_MOSTLY_IQ4_XS = 30, // except 1d tensors - LLAMA_FTYPE_MOSTLY_IQ1_M = 31, // except 1d tensors - LLAMA_FTYPE_MOSTLY_BF16 = 32, // except 1d tensors - //LLAMA_FTYPE_MOSTLY_Q4_0_4_4 = 33, // removed from gguf files, use Q4_0 and runtime repack - //LLAMA_FTYPE_MOSTLY_Q4_0_4_8 = 34, // removed from gguf files, use Q4_0 and runtime repack - //LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35, // removed from gguf files, use Q4_0 and runtime repack - LLAMA_FTYPE_MOSTLY_TQ1_0 = 36, // except 1d tensors - LLAMA_FTYPE_MOSTLY_TQ2_0 = 37, // except 1d tensors - - LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file - }; - - enum llama_rope_scaling_type { - LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED = -1, - LLAMA_ROPE_SCALING_TYPE_NONE = 0, - LLAMA_ROPE_SCALING_TYPE_LINEAR = 1, - LLAMA_ROPE_SCALING_TYPE_YARN = 2, - LLAMA_ROPE_SCALING_TYPE_LONGROPE = 3, - LLAMA_ROPE_SCALING_TYPE_MAX_VALUE = LLAMA_ROPE_SCALING_TYPE_LONGROPE, - }; - - enum llama_pooling_type { - LLAMA_POOLING_TYPE_UNSPECIFIED = -1, - LLAMA_POOLING_TYPE_NONE = 0, - LLAMA_POOLING_TYPE_MEAN = 1, - LLAMA_POOLING_TYPE_CLS = 2, - LLAMA_POOLING_TYPE_LAST = 3, - LLAMA_POOLING_TYPE_RANK = 4, // used by reranking models to attach the classification head to the graph - }; - - enum llama_attention_type { - LLAMA_ATTENTION_TYPE_UNSPECIFIED = -1, - LLAMA_ATTENTION_TYPE_CAUSAL = 0, - LLAMA_ATTENTION_TYPE_NON_CAUSAL = 1, - }; - - enum llama_split_mode { - LLAMA_SPLIT_MODE_NONE = 0, // single GPU - LLAMA_SPLIT_MODE_LAYER = 1, // split layers and KV across GPUs - LLAMA_SPLIT_MODE_ROW = 2, // split layers and KV across GPUs, use tensor parallelism if supported - }; - - // TODO: simplify (https://github.com/ggerganov/llama.cpp/pull/9294#pullrequestreview-2286561979) - typedef struct llama_token_data { - llama_token id; // token id - float logit; // log-odds of the token - float p; // probability of the token - } llama_token_data; - - typedef struct llama_token_data_array { - // TODO: consider SoA - // NOTE: this pointer can be modified by the samplers - llama_token_data * data; - size_t size; - int64_t selected; // this is the index in the data array (i.e. not the token id) - bool sorted; - } llama_token_data_array; - - typedef bool (*llama_progress_callback)(float progress, void * user_data); - - // Input data for llama_decode - // A llama_batch object can contain input about one or many sequences - // The provided arrays (i.e. token, embd, pos, etc.) must have size of n_tokens - // - // - token : the token ids of the input (used when embd is NULL) - // - embd : token embeddings (i.e. float vector of size n_embd) (used when token is NULL) - // - pos : the positions of the respective token in the sequence - // (if set to NULL, the token position will be tracked automatically by llama_decode) - // - seq_id : the sequence to which the respective token belongs - // (if set to NULL, the sequence ID will be assumed to be 0) - // - logits : if zero, the logits (and/or the embeddings) for the respective token will not be output - // (if set to NULL, only the logits for last token will be returned) - // - typedef struct llama_batch { - int32_t n_tokens; - - llama_token * token; - float * embd; - llama_pos * pos; - int32_t * n_seq_id; - llama_seq_id ** seq_id; - int8_t * logits; // TODO: rename this to "output" - } llama_batch; - - enum llama_model_kv_override_type { - LLAMA_KV_OVERRIDE_TYPE_INT, - LLAMA_KV_OVERRIDE_TYPE_FLOAT, - LLAMA_KV_OVERRIDE_TYPE_BOOL, - LLAMA_KV_OVERRIDE_TYPE_STR, - }; - - struct llama_model_kv_override { - enum llama_model_kv_override_type tag; - - char key[128]; - - union { - int64_t val_i64; - double val_f64; - bool val_bool; - char val_str[128]; - }; - }; - - struct llama_model_params { - // NULL-terminated list of devices to use for offloading (if NULL, all available devices are used) - ggml_backend_dev_t * devices; - - int32_t n_gpu_layers; // number of layers to store in VRAM - enum llama_split_mode split_mode; // how to split the model across multiple GPUs - - // the GPU that is used for the entire model when split_mode is LLAMA_SPLIT_MODE_NONE - int32_t main_gpu; - - // proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices() - const float * tensor_split; - - // comma separated list of RPC servers to use for offloading - const char * rpc_servers; - - // Called with a progress value between 0.0 and 1.0. Pass NULL to disable. - // If the provided progress_callback returns true, model loading continues. - // If it returns false, model loading is immediately aborted. - llama_progress_callback progress_callback; - - // context pointer passed to the progress callback - void * progress_callback_user_data; - - // override key-value pairs of the model meta data - const struct llama_model_kv_override * kv_overrides; - - // Keep the booleans together to avoid misalignment during copy-by-value. - bool vocab_only; // only load the vocabulary, no weights - bool use_mmap; // use mmap if possible - bool use_mlock; // force system to keep model in RAM - bool check_tensors; // validate model tensor data - }; - - // NOTE: changing the default values of parameters marked as [EXPERIMENTAL] may cause crashes or incorrect results in certain configurations - // https://github.com/ggerganov/llama.cpp/pull/7544 - struct llama_context_params { - uint32_t n_ctx; // text context, 0 = from model - uint32_t n_batch; // logical maximum batch size that can be submitted to llama_decode - uint32_t n_ubatch; // physical maximum batch size - uint32_t n_seq_max; // max number of sequences (i.e. distinct states for recurrent models) - int32_t n_threads; // number of threads to use for generation - int32_t n_threads_batch; // number of threads to use for batch processing - - enum llama_rope_scaling_type rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type` - enum llama_pooling_type pooling_type; // whether to pool (sum) embedding results by sequence id - enum llama_attention_type attention_type; // attention type to use for embeddings - - // ref: https://github.com/ggerganov/llama.cpp/pull/2054 - float rope_freq_base; // RoPE base frequency, 0 = from model - float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model - float yarn_ext_factor; // YaRN extrapolation mix factor, negative = from model - float yarn_attn_factor; // YaRN magnitude scaling factor - float yarn_beta_fast; // YaRN low correction dim - float yarn_beta_slow; // YaRN high correction dim - uint32_t yarn_orig_ctx; // YaRN original context size - float defrag_thold; // defragment the KV cache if holes/size > thold, < 0 disabled (default) - - ggml_backend_sched_eval_callback cb_eval; - void * cb_eval_user_data; - - enum ggml_type type_k; // data type for K cache [EXPERIMENTAL] - enum ggml_type type_v; // data type for V cache [EXPERIMENTAL] - - // Keep the booleans together and at the end of the struct to avoid misalignment during copy-by-value. - // TODO: move at the end of the struct - bool logits_all; // the llama_decode() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead) - bool embeddings; // if true, extract embeddings (together with logits) - bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU - bool flash_attn; // whether to use flash attention [EXPERIMENTAL] - bool no_perf; // whether to measure performance timings - - // Abort callback - // if it returns true, execution of llama_decode() will be aborted - // currently works only with CPU execution - ggml_abort_callback abort_callback; - void * abort_callback_data; - }; - - // VALL-E specific stuff that needs a home - struct llama_vall_e_userdata { - struct ggml_tensor* prom_embds[12]; // contains input prompt audio embeddings - struct ggml_tensor* resp_embds[12]; // contains output audio embeddings - struct ggml_tensor* aux_embds[12]; // contains non-audio embeddings - struct ggml_tensor* heads[12]; // contains our classifier tensors - }; - - // model quantization parameters - typedef struct llama_model_quantize_params { - int32_t nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency() - enum llama_ftype ftype; // quantize to this llama_ftype - enum ggml_type output_tensor_type; // output tensor type - enum ggml_type token_embedding_type; // token embeddings tensor type - bool allow_requantize; // allow quantizing non-f32/f16 tensors - bool quantize_output_tensor; // quantize output.weight - bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored - bool pure; // quantize all tensors to the default type - bool keep_split; // quantize to the same number of shards - void * imatrix; // pointer to importance matrix data - void * kv_overrides; // pointer to vector containing overrides - } llama_model_quantize_params; - - typedef struct llama_logit_bias { - llama_token token; - float bias; - } llama_logit_bias; - - typedef struct llama_sampler_chain_params { - bool no_perf; // whether to measure performance timings - } llama_sampler_chain_params; - - // used in chat template - typedef struct llama_chat_message { - const char * role; - const char * content; - } llama_chat_message; - - // lora adapter - struct llama_lora_adapter; - - // Helpers for getting default parameters - // TODO: update API to start accepting pointers to params structs (https://github.com/ggerganov/llama.cpp/discussions/9172) - LLAMA_API struct llama_model_params llama_model_default_params(void); - LLAMA_API struct llama_context_params llama_context_default_params(void); - LLAMA_API struct llama_sampler_chain_params llama_sampler_chain_default_params(void); - LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(void); - - // Initialize the llama + ggml backend - // If numa is true, use NUMA optimizations - // Call once at the start of the program - LLAMA_API void llama_backend_init(void); - - //optional: - LLAMA_API void llama_numa_init(enum ggml_numa_strategy numa); - - // Optional: an auto threadpool gets created in ggml if not passed explicitly - LLAMA_API void llama_attach_threadpool( - struct llama_context * ctx, - ggml_threadpool_t threadpool, - ggml_threadpool_t threadpool_batch); - LLAMA_API void llama_detach_threadpool(struct llama_context * ctx); - - // Call once at the end of the program - currently only used for MPI - LLAMA_API void llama_backend_free(void); - - LLAMA_API struct llama_model * llama_load_model_from_file( - const char * path_model, - struct llama_model_params params); - - LLAMA_API void llama_free_model(struct llama_model * model); - - // TODO: rename to llama_init_from_model - LLAMA_API struct llama_context * llama_new_context_with_model( - struct llama_model * model, - struct llama_context_params params); - - // Frees all allocated memory - LLAMA_API void llama_free(struct llama_context * ctx); - - LLAMA_API int64_t llama_time_us(void); - - LLAMA_API size_t llama_max_devices(void); - - LLAMA_API bool llama_supports_mmap (void); - LLAMA_API bool llama_supports_mlock (void); - LLAMA_API bool llama_supports_gpu_offload(void); - LLAMA_API bool llama_supports_rpc (void); - - LLAMA_API uint32_t llama_n_ctx (const struct llama_context * ctx); - LLAMA_API uint32_t llama_n_batch (const struct llama_context * ctx); - LLAMA_API uint32_t llama_n_ubatch (const struct llama_context * ctx); - LLAMA_API uint32_t llama_n_seq_max (const struct llama_context * ctx); - - LLAMA_API int32_t llama_n_vocab (const struct llama_model * model); - LLAMA_API int32_t llama_n_ctx_train(const struct llama_model * model); - LLAMA_API int32_t llama_n_embd (const struct llama_model * model); - LLAMA_API int32_t llama_n_layer (const struct llama_model * model); - LLAMA_API int32_t llama_n_head (const struct llama_model * model); - - LLAMA_API const struct llama_model * llama_get_model(const struct llama_context * ctx); - - LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx); - LLAMA_API enum llama_vocab_type llama_vocab_type (const struct llama_model * model); - LLAMA_API enum llama_rope_type llama_rope_type (const struct llama_model * model); - - // Get the model's RoPE frequency scaling factor - LLAMA_API float llama_rope_freq_scale_train(const struct llama_model * model); - - // Functions to access the model's GGUF metadata scalar values - // - The functions return the length of the string on success, or -1 on failure - // - The output string is always null-terminated and cleared on failure - // - When retrieving a string, an extra byte must be allocated to account for the null terminator - // - GGUF array values are not supported by these functions - - // Get metadata value as a string by key name - LLAMA_API int32_t llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size); - - // Get the number of metadata key/value pairs - LLAMA_API int32_t llama_model_meta_count(const struct llama_model * model); - - // Get metadata key name by index - LLAMA_API int32_t llama_model_meta_key_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size); - - // Get metadata value as a string by index - LLAMA_API int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size); - - // Get a string describing the model type - LLAMA_API int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size); - - // Returns the total size of all the tensors in the model in bytes - LLAMA_API uint64_t llama_model_size(const struct llama_model * model); - - // Returns the total number of parameters in the model - LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model); - - // Returns true if the model contains an encoder that requires llama_encode() call - LLAMA_API bool llama_model_has_encoder(const struct llama_model * model); - - // Returns true if the model contains a decoder that requires llama_decode() call - LLAMA_API bool llama_model_has_decoder(const struct llama_model * model); - - // For encoder-decoder models, this function returns id of the token that must be provided - // to the decoder to start generating output sequence. For other models, it returns -1. - LLAMA_API llama_token llama_model_decoder_start_token(const struct llama_model * model); - - // Returns true if the model is recurrent (like Mamba, RWKV, etc.) - LLAMA_API bool llama_model_is_recurrent(const struct llama_model * model); - - // Returns 0 on success - LLAMA_API uint32_t llama_model_quantize( - const char * fname_inp, - const char * fname_out, - const llama_model_quantize_params * params); - - // Load a LoRA adapter from file - // The loaded adapter will be associated to the given model, and will be free when the model is deleted - LLAMA_API struct llama_lora_adapter * llama_lora_adapter_init( - struct llama_model * model, - const char * path_lora); - - // Add a loaded LoRA adapter to given context - // This will not modify model's weight - LLAMA_API int32_t llama_lora_adapter_set( - struct llama_context * ctx, - struct llama_lora_adapter * adapter, - float scale); - - // Remove a specific LoRA adapter from given context - // Return -1 if the adapter is not present in the context - LLAMA_API int32_t llama_lora_adapter_remove( - struct llama_context * ctx, - struct llama_lora_adapter * adapter); - - // Remove all LoRA adapters from given context - LLAMA_API void llama_lora_adapter_clear( - struct llama_context * ctx); - - // Manually free a LoRA adapter - // Note: loaded adapters will be free when the associated model is deleted - LLAMA_API void llama_lora_adapter_free(struct llama_lora_adapter * adapter); - - // Apply a loaded control vector to a llama_context, or if data is NULL, clear - // the currently loaded vector. - // n_embd should be the size of a single layer's control, and data should point - // to an n_embd x n_layers buffer starting from layer 1. - // il_start and il_end are the layer range the vector should apply to (both inclusive) - // See llama_control_vector_load in common to load a control vector. - LLAMA_API int32_t llama_control_vector_apply( - struct llama_context * lctx, - const float * data, - size_t len, - int32_t n_embd, - int32_t il_start, - int32_t il_end); - - // - // KV cache - // - - // Information associated with an individual cell in the KV cache view. - struct llama_kv_cache_view_cell { - // The position for this cell. Takes KV cache shifts into account. - // May be negative if the cell is not populated. - llama_pos pos; - }; - - // An updateable view of the KV cache. - struct llama_kv_cache_view { - // Number of KV cache cells. This will be the same as the context size. - int32_t n_cells; - - // Maximum number of sequences that can exist in a cell. It's not an error - // if there are more sequences in a cell than this value, however they will - // not be visible in the view cells_sequences. - int32_t n_seq_max; - - // Number of tokens in the cache. For example, if there are two populated - // cells, the first with 1 sequence id in it and the second with 2 sequence - // ids then you'll have 3 tokens. - int32_t token_count; - - // Number of populated cache cells. - int32_t used_cells; - - // Maximum contiguous empty slots in the cache. - int32_t max_contiguous; - - // Index to the start of the max_contiguous slot range. Can be negative - // when cache is full. - int32_t max_contiguous_idx; - - // Information for an individual cell. - struct llama_kv_cache_view_cell * cells; - - // The sequences for each cell. There will be n_seq_max items per cell. - llama_seq_id * cells_sequences; - }; - - // Create an empty KV cache view. (use only for debugging purposes) - LLAMA_API struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_context * ctx, int32_t n_seq_max); - - // Free a KV cache view. (use only for debugging purposes) - LLAMA_API void llama_kv_cache_view_free(struct llama_kv_cache_view * view); - - // Update the KV cache view structure with the current state of the KV cache. (use only for debugging purposes) - LLAMA_API void llama_kv_cache_view_update(const struct llama_context * ctx, struct llama_kv_cache_view * view); - - // Returns the number of tokens in the KV cache (slow, use only for debug) - // If a KV cell has multiple sequences assigned to it, it will be counted multiple times - LLAMA_API int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx); - - // Returns the number of used KV cells (i.e. have at least one sequence assigned to them) - LLAMA_API int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx); - - // Clear the KV cache - both cell info is erased and KV data is zeroed - LLAMA_API void llama_kv_cache_clear( - struct llama_context * ctx); - - // Removes all tokens that belong to the specified sequence and have positions in [p0, p1) - // Returns false if a partial sequence cannot be removed. Removing a whole sequence never fails - // seq_id < 0 : match any sequence - // p0 < 0 : [0, p1] - // p1 < 0 : [p0, inf) - LLAMA_API bool llama_kv_cache_seq_rm( - struct llama_context * ctx, - llama_seq_id seq_id, - llama_pos p0, - llama_pos p1); - - // Copy all tokens that belong to the specified sequence to another sequence - // Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence - // p0 < 0 : [0, p1] - // p1 < 0 : [p0, inf) - LLAMA_API void llama_kv_cache_seq_cp( - struct llama_context * ctx, - llama_seq_id seq_id_src, - llama_seq_id seq_id_dst, - llama_pos p0, - llama_pos p1); - - // Removes all tokens that do not belong to the specified sequence - LLAMA_API void llama_kv_cache_seq_keep( - struct llama_context * ctx, - llama_seq_id seq_id); - - // Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1) - // If the KV cache is RoPEd, the KV data is updated accordingly: - // - lazily on next llama_decode() - // - explicitly with llama_kv_cache_update() - // p0 < 0 : [0, p1] - // p1 < 0 : [p0, inf) - LLAMA_API void llama_kv_cache_seq_add( - struct llama_context * ctx, - llama_seq_id seq_id, - llama_pos p0, - llama_pos p1, - llama_pos delta); - - // Integer division of the positions by factor of `d > 1` - // If the KV cache is RoPEd, the KV data is updated accordingly: - // - lazily on next llama_decode() - // - explicitly with llama_kv_cache_update() - // p0 < 0 : [0, p1] - // p1 < 0 : [p0, inf) - LLAMA_API void llama_kv_cache_seq_div( - struct llama_context * ctx, - llama_seq_id seq_id, - llama_pos p0, - llama_pos p1, - int d); - - // Returns the largest position present in the KV cache for the specified sequence - LLAMA_API llama_pos llama_kv_cache_seq_pos_max( - struct llama_context * ctx, - llama_seq_id seq_id); - - // Defragment the KV cache - // This will be applied: - // - lazily on next llama_decode() - // - explicitly with llama_kv_cache_update() - LLAMA_API void llama_kv_cache_defrag(struct llama_context * ctx); - - // Apply the KV cache updates (such as K-shifts, defragmentation, etc.) - LLAMA_API void llama_kv_cache_update(struct llama_context * ctx); - - // Check if the context supports KV cache shifting - LLAMA_API bool llama_kv_cache_can_shift(struct llama_context * ctx); - - // - // State / sessions - // - - // Returns the *actual* size in bytes of the state - // (logits, embedding and kv_cache) - // Only use when saving the state, not when restoring it, otherwise the size may be too small. - LLAMA_API size_t llama_state_get_size(struct llama_context * ctx); - LLAMA_API DEPRECATED(size_t llama_get_state_size(struct llama_context * ctx), - "use llama_state_get_size instead"); - - // Copies the state to the specified destination address. - // Destination needs to have allocated enough memory. - // Returns the number of bytes copied - LLAMA_API size_t llama_state_get_data( - struct llama_context * ctx, - uint8_t * dst, - size_t size); - LLAMA_API DEPRECATED(size_t llama_copy_state_data( - struct llama_context * ctx, - uint8_t * dst), - "use llama_state_get_data instead"); - - // Set the state reading from the specified address - // Returns the number of bytes read - LLAMA_API size_t llama_state_set_data( - struct llama_context * ctx, - const uint8_t * src, - size_t size); - LLAMA_API DEPRECATED(size_t llama_set_state_data( - struct llama_context * ctx, - const uint8_t * src), - "use llama_state_set_data instead"); - - // Save/load session file - LLAMA_API bool llama_state_load_file( - struct llama_context * ctx, - const char * path_session, - llama_token * tokens_out, - size_t n_token_capacity, - size_t * n_token_count_out); - LLAMA_API DEPRECATED(bool llama_load_session_file( - struct llama_context * ctx, - const char * path_session, - llama_token * tokens_out, - size_t n_token_capacity, - size_t * n_token_count_out), - "use llama_state_load_file instead"); - - LLAMA_API bool llama_state_save_file( - struct llama_context * ctx, - const char * path_session, - const llama_token * tokens, - size_t n_token_count); - LLAMA_API DEPRECATED(bool llama_save_session_file( - struct llama_context * ctx, - const char * path_session, - const llama_token * tokens, - size_t n_token_count), - "use llama_state_save_file instead"); - - // Get the exact size needed to copy the KV cache of a single sequence - LLAMA_API size_t llama_state_seq_get_size( - struct llama_context * ctx, - llama_seq_id seq_id); - - // Copy the KV cache of a single sequence into the specified buffer - LLAMA_API size_t llama_state_seq_get_data( - struct llama_context * ctx, - uint8_t * dst, - size_t size, - llama_seq_id seq_id); - - // Copy the sequence data (originally copied with `llama_state_seq_get_data`) into the specified sequence - // Returns: - // - Positive: Ok - // - Zero: Failed to load - LLAMA_API size_t llama_state_seq_set_data( - struct llama_context * ctx, - const uint8_t * src, - size_t size, - llama_seq_id dest_seq_id); - - LLAMA_API size_t llama_state_seq_save_file( - struct llama_context * ctx, - const char * filepath, - llama_seq_id seq_id, - const llama_token * tokens, - size_t n_token_count); - - LLAMA_API size_t llama_state_seq_load_file( - struct llama_context * ctx, - const char * filepath, - llama_seq_id dest_seq_id, - llama_token * tokens_out, - size_t n_token_capacity, - size_t * n_token_count_out); - - // - // Decoding - // - - // Return batch for single sequence of tokens - // The sequence ID will be fixed to 0 - // The position of the tokens will be tracked automatically by llama_decode - // - // NOTE: this is a helper function to facilitate transition to the new batch API - avoid using it - // - LLAMA_API struct llama_batch llama_batch_get_one( - llama_token * tokens, - int32_t n_tokens); - - // Allocates a batch of tokens on the heap that can hold a maximum of n_tokens - // Each token can be assigned up to n_seq_max sequence ids - // The batch has to be freed with llama_batch_free() - // If embd != 0, llama_batch.embd will be allocated with size of n_tokens * embd * sizeof(float) - // Otherwise, llama_batch.token will be allocated to store n_tokens llama_token - // The rest of the llama_batch members are allocated with size n_tokens - // All members are left uninitialized - LLAMA_API struct llama_batch llama_batch_init( - int32_t n_tokens, - int32_t embd, - int32_t n_seq_max); - - // Frees a batch of tokens allocated with llama_batch_init() - LLAMA_API void llama_batch_free(struct llama_batch batch); - - // Processes a batch of tokens with the ecoder part of the encoder-decoder model. - // Stores the encoder output internally for later use by the decoder cross-attention layers. - // 0 - success - // < 0 - error. the KV cache state is restored to the state before this call - LLAMA_API int32_t llama_encode( - struct llama_context * ctx, - struct llama_batch batch); - - // Positive return values does not mean a fatal error, but rather a warning. - // 0 - success - // 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context) - // < 0 - error. the KV cache state is restored to the state before this call - LLAMA_API int32_t llama_decode( - struct llama_context * ctx, - struct llama_batch batch); - - // Set the number of threads used for decoding - // n_threads is the number of threads used for generation (single token) - // n_threads_batch is the number of threads used for prompt and batch processing (multiple tokens) - LLAMA_API void llama_set_n_threads(struct llama_context * ctx, int32_t n_threads, int32_t n_threads_batch); - - // Get the number of threads used for generation of a single token. - LLAMA_API int32_t llama_n_threads(struct llama_context * ctx); - - // Get the number of threads used for prompt and batch processing (multiple token). - LLAMA_API int32_t llama_n_threads_batch(struct llama_context * ctx); - - // Set whether the model is in embeddings mode or not - // If true, embeddings will be returned but logits will not - LLAMA_API void llama_set_embeddings(struct llama_context * ctx, bool embeddings); - - // Set whether to use causal attention or not - // If set to true, the model will only attend to the past tokens - LLAMA_API void llama_set_causal_attn(struct llama_context * ctx, bool causal_attn); - - // Set abort callback - LLAMA_API void llama_set_abort_callback(struct llama_context * ctx, ggml_abort_callback abort_callback, void * abort_callback_data); - - // Wait until all computations are finished - // This is automatically done when using one of the functions below to obtain the computation results - // and is not necessary to call it explicitly in most cases - LLAMA_API void llama_synchronize(struct llama_context * ctx); - - // Token logits obtained from the last call to llama_decode() - // The logits for which llama_batch.logits[i] != 0 are stored contiguously - // in the order they have appeared in the batch. - // Rows: number of tokens for which llama_batch.logits[i] != 0 - // Cols: n_vocab - LLAMA_API float * llama_get_logits(struct llama_context * ctx); - - // Logits for the ith token. For positive indices, Equivalent to: - // llama_get_logits(ctx) + ctx->output_ids[i]*n_vocab - // Negative indicies can be used to access logits in reverse order, -1 is the last logit. - // returns NULL for invalid ids. - LLAMA_API float * llama_get_logits_ith(struct llama_context * ctx, int32_t i); - - /* BEGIN VALL-E SPECIFIC HELPERS */ - // returns the tensor to the model's model.output - LLAMA_API struct ggml_tensor * llama_get_output_head_tensor(struct llama_model * model); - // returns the tensor to the model's model.tok_embd - LLAMA_API struct ggml_tensor * llama_get_embedding_weights(struct llama_model * model); - // sets target output head by direct tensor - LLAMA_API void llama_set_output_head(struct llama_model * ctx, struct ggml_tensor *); - // fetches VALL-E userdata (which currently contains the embeddings + output heads) - LLAMA_API struct llama_vall_e_userdata * llama_get_vall_e_userdata(struct llama_model * model); - /* END VALL-E SPECIFIC HELPERS */ - - // Get all output token embeddings. - // when pooling_type == LLAMA_POOLING_TYPE_NONE or when using a generative model, - // the embeddings for which llama_batch.logits[i] != 0 are stored contiguously - // in the order they have appeared in the batch. - // shape: [n_outputs*n_embd] - // Otherwise, returns NULL. - LLAMA_API float * llama_get_embeddings(struct llama_context * ctx); - - // Get the embeddings for the ith token. For positive indices, Equivalent to: - // llama_get_embeddings(ctx) + ctx->output_ids[i]*n_embd - // Negative indicies can be used to access embeddings in reverse order, -1 is the last embedding. - // shape: [n_embd] (1-dimensional) - // returns NULL for invalid ids. - LLAMA_API float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i); - - // Get the embeddings for a sequence id - // Returns NULL if pooling_type is LLAMA_POOLING_TYPE_NONE - // when pooling_type == LLAMA_POOLING_TYPE_RANK, returns float[1] with the rank of the sequence - // otherwise: float[n_embd] (1-dimensional) - LLAMA_API float * llama_get_embeddings_seq(struct llama_context * ctx, llama_seq_id seq_id); - - // - // Vocab - // - - LLAMA_API const char * llama_token_get_text(const struct llama_model * model, llama_token token); - - LLAMA_API float llama_token_get_score(const struct llama_model * model, llama_token token); - - LLAMA_API enum llama_token_attr llama_token_get_attr(const struct llama_model * model, llama_token token); - - // Check if the token is supposed to end generation (end-of-generation, eg. EOS, EOT, etc.) - LLAMA_API bool llama_token_is_eog(const struct llama_model * model, llama_token token); - - // Identify if Token Id is a control token or a render-able token - LLAMA_API bool llama_token_is_control(const struct llama_model * model, llama_token token); - - // Special tokens - LLAMA_API llama_token llama_token_bos(const struct llama_model * model); // beginning-of-sentence - LLAMA_API llama_token llama_token_eos(const struct llama_model * model); // end-of-sentence - LLAMA_API llama_token llama_token_eot(const struct llama_model * model); // end-of-turn - LLAMA_API llama_token llama_token_cls(const struct llama_model * model); // classification - LLAMA_API llama_token llama_token_sep(const struct llama_model * model); // sentence separator - LLAMA_API llama_token llama_token_nl (const struct llama_model * model); // next-line - LLAMA_API llama_token llama_token_pad(const struct llama_model * model); // padding - - LLAMA_API bool llama_add_bos_token(const struct llama_model * model); - LLAMA_API bool llama_add_eos_token(const struct llama_model * model); - - // infill tokens - DEPRECATED(LLAMA_API llama_token llama_token_prefix(const struct llama_model * model), "use llama_token_fim_pre instead"); - DEPRECATED(LLAMA_API llama_token llama_token_middle(const struct llama_model * model), "use llama_token_fim_mid instead"); - DEPRECATED(LLAMA_API llama_token llama_token_suffix(const struct llama_model * model), "use llama_token_fim_suf instead"); - - LLAMA_API llama_token llama_token_fim_pre(const struct llama_model * model); - LLAMA_API llama_token llama_token_fim_suf(const struct llama_model * model); - LLAMA_API llama_token llama_token_fim_mid(const struct llama_model * model); - LLAMA_API llama_token llama_token_fim_pad(const struct llama_model * model); - LLAMA_API llama_token llama_token_fim_rep(const struct llama_model * model); - LLAMA_API llama_token llama_token_fim_sep(const struct llama_model * model); - - // - // Tokenization - // - // The API is thread-safe. - // - - /// @details Convert the provided text into tokens. - /// @param tokens The tokens pointer must be large enough to hold the resulting tokens. - /// @return Returns the number of tokens on success, no more than n_tokens_max - /// @return Returns a negative number on failure - the number of tokens that would have been returned - /// @param add_special Allow to add BOS and EOS tokens if model is configured to do so. - /// @param parse_special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated - /// as plaintext. Does not insert a leading space. - LLAMA_API int32_t llama_tokenize( - const struct llama_model * model, - const char * text, - int32_t text_len, - llama_token * tokens, - int32_t n_tokens_max, - bool add_special, - bool parse_special); - - // Token Id -> Piece. - // Uses the vocabulary in the provided context. - // Does not write null terminator to the buffer. - // User can skip up to 'lstrip' leading spaces before copying (useful when encoding/decoding multiple tokens with 'add_space_prefix') - // @param special If true, special tokens are rendered in the output. - LLAMA_API int32_t llama_token_to_piece( - const struct llama_model * model, - llama_token token, - char * buf, - int32_t length, - int32_t lstrip, - bool special); - - /// @details Convert the provided tokens into text (inverse of llama_tokenize()). - /// @param text The char pointer must be large enough to hold the resulting text. - /// @return Returns the number of chars/bytes on success, no more than text_len_max. - /// @return Returns a negative number on failure - the number of chars/bytes that would have been returned. - /// @param remove_special Allow to remove BOS and EOS tokens if model is configured to do so. - /// @param unparse_special If true, special tokens are rendered in the output. - LLAMA_API int32_t llama_detokenize( - const struct llama_model * model, - const llama_token * tokens, - int32_t n_tokens, - char * text, - int32_t text_len_max, - bool remove_special, - bool unparse_special); - - // - // Chat templates - // - - /// Apply chat template. Inspired by hf apply_chat_template() on python. - /// Both "model" and "custom_template" are optional, but at least one is required. "custom_template" has higher precedence than "model" - /// NOTE: This function does not use a jinja parser. It only support a pre-defined list of template. See more: https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template - /// @param tmpl A Jinja template to use for this chat. If this is nullptr, the model’s default chat template will be used instead. - /// @param chat Pointer to a list of multiple llama_chat_message - /// @param n_msg Number of llama_chat_message in this chat - /// @param add_ass Whether to end the prompt with the token(s) that indicate the start of an assistant message. - /// @param buf A buffer to hold the output formatted prompt. The recommended alloc size is 2 * (total number of characters of all messages) - /// @param length The size of the allocated buffer - /// @return The total number of bytes of the formatted prompt. If is it larger than the size of buffer, you may need to re-alloc it and then re-apply the template. - LLAMA_API int32_t llama_chat_apply_template( - const struct llama_model * model, - const char * tmpl, - const struct llama_chat_message * chat, - size_t n_msg, - bool add_ass, - char * buf, - int32_t length); - - // Get list of built-in chat templates - LLAMA_API int32_t llama_chat_builtin_templates(const char ** output, size_t len); - - // - // Sampling API - // - // Sample usage: - // - // // prepare the sampling chain at the start - // auto sparams = llama_sampler_chain_default_params(); - // - // llama_sampler * smpl = llama_sampler_chain_init(sparams); - // - // llama_sampler_chain_add(smpl, llama_sampler_init_top_k(50)); - // llama_sampler_chain_add(smpl, llama_sampler_init_top_p(0.9, 1)); - // llama_sampler_chain_add(smpl, llama_sampler_init_temp (0.8)); - // - // // typically, the chain should end with a sampler such as "greedy", "dist" or "mirostat" - // // this sampler will be responsible to select the actual token - // llama_sampler_chain_add(smpl, llama_sampler_init_dist(seed)); - // - // ... - // - // // decoding loop: - // while (...) { - // ... - // - // llama_decode(ctx, batch); - // - // // sample from the logits of the last token in the batch - // const llama_token id = llama_sampler_sample(smpl, ctx, -1); - // - // // accepting the token updates the internal state of certain samplers (e.g. grammar, repetition, etc.) - // llama_sampler_accept(smpl, id); - // ... - // } - // - // llama_sampler_free(smpl); - // - // TODO: In the future, llama_sampler will be utilized to offload the sampling to the backends (e.g. GPU). - // TODO: in the future, the entire sampling API that uses llama_model should start using llama_vocab - // - - typedef void * llama_sampler_context_t; - - // user code can implement the interface below in order to create custom llama_sampler - struct llama_sampler_i { - const char * (*name) (const struct llama_sampler * smpl); // can be NULL - void (*accept)( struct llama_sampler * smpl, llama_token token); // can be NULL - void (*apply) ( struct llama_sampler * smpl, llama_token_data_array * cur_p); // required - void (*reset) ( struct llama_sampler * smpl); // can be NULL - struct llama_sampler * (*clone) (const struct llama_sampler * smpl); // can be NULL if ctx is NULL - void (*free) ( struct llama_sampler * smpl); // can be NULL if ctx is NULL - - // TODO: API for internal libllama usage for appending the sampling to an existing ggml_cgraph - //void (*apply_ggml) (struct llama_sampler * smpl, ...); - }; - - struct llama_sampler { - struct llama_sampler_i * iface; - llama_sampler_context_t ctx; - }; - - // mirror of llama_sampler_i: - LLAMA_API const char * llama_sampler_name (const struct llama_sampler * smpl); - LLAMA_API void llama_sampler_accept( struct llama_sampler * smpl, llama_token token); - LLAMA_API void llama_sampler_apply ( struct llama_sampler * smpl, llama_token_data_array * cur_p); - LLAMA_API void llama_sampler_reset ( struct llama_sampler * smpl); - LLAMA_API struct llama_sampler * llama_sampler_clone (const struct llama_sampler * smpl); - // important: do not free if the sampler has been added to a llama_sampler_chain (via llama_sampler_chain_add) - LLAMA_API void llama_sampler_free ( struct llama_sampler * smpl); - - // llama_sampler_chain - // a type of llama_sampler that can chain multiple samplers one after another - - LLAMA_API struct llama_sampler * llama_sampler_chain_init(struct llama_sampler_chain_params params); - - // important: takes ownership of the sampler object and will free it when llama_sampler_free is called - LLAMA_API void llama_sampler_chain_add( struct llama_sampler * chain, struct llama_sampler * smpl); - LLAMA_API struct llama_sampler * llama_sampler_chain_get(const struct llama_sampler * chain, int32_t i); - LLAMA_API int llama_sampler_chain_n (const struct llama_sampler * chain); - - // after removing a sampler, the chain will no longer own it, and it will not be freed when the chain is freed - LLAMA_API struct llama_sampler * llama_sampler_chain_remove( struct llama_sampler * chain, int32_t i); - - // available samplers: - - LLAMA_API struct llama_sampler * llama_sampler_init_greedy(void); - LLAMA_API struct llama_sampler * llama_sampler_init_dist (uint32_t seed); - - /// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits. - /// NOTE: Avoid using on the full vocabulary as the sorting can become slow. For example, apply top-k or top-p sampling first. - DEPRECATED(LLAMA_API struct llama_sampler * llama_sampler_init_softmax (void), - "will be removed in the future (see https://github.com/ggerganov/llama.cpp/pull/9896#discussion_r1800920915)"); - - /// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 - LLAMA_API struct llama_sampler * llama_sampler_init_top_k (int32_t k); - - /// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 - LLAMA_API struct llama_sampler * llama_sampler_init_top_p (float p, size_t min_keep); - - /// @details Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841 - LLAMA_API struct llama_sampler * llama_sampler_init_min_p (float p, size_t min_keep); - - /// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666. - LLAMA_API struct llama_sampler * llama_sampler_init_typical (float p, size_t min_keep); - - /// #details Updates the logits l_i` = l_i/t. When t <= 0.0f, the maximum logit is kept at it's original value, the rest are set to -inf - LLAMA_API struct llama_sampler * llama_sampler_init_temp (float t); - - /// @details Dynamic temperature implementation (a.k.a. entropy) described in the paper https://arxiv.org/abs/2309.02772. - LLAMA_API struct llama_sampler * llama_sampler_init_temp_ext (float t, float delta, float exponent); - - /// @details XTC sampler as described in https://github.com/oobabooga/text-generation-webui/pull/6335 - LLAMA_API struct llama_sampler * llama_sampler_init_xtc (float p, float t, size_t min_keep, uint32_t seed); - - /// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words. - /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text. - /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text. - /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates. - /// @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm. - /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal. - LLAMA_API struct llama_sampler * llama_sampler_init_mirostat( - int32_t n_vocab, - uint32_t seed, - float tau, - float eta, - int32_t m); - - /// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words. - /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text. - /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text. - /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates. - /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal. - LLAMA_API struct llama_sampler * llama_sampler_init_mirostat_v2( - uint32_t seed, - float tau, - float eta); - - LLAMA_API struct llama_sampler * llama_sampler_init_grammar( - const struct llama_model * model, - const char * grammar_str, - const char * grammar_root); - - /// NOTE: Avoid using on the full vocabulary as searching for repeated tokens can become slow. For example, apply top-k or top-p sampling first. - LLAMA_API struct llama_sampler * llama_sampler_init_penalties( - int32_t penalty_last_n, // last n tokens to penalize (0 = disable penalty, -1 = context size) - float penalty_repeat, // 1.0 = disabled - float penalty_freq, // 0.0 = disabled - float penalty_present); // 0.0 = disabled - - /// @details DRY sampler, designed by p-e-w, as described in: https://github.com/oobabooga/text-generation-webui/pull/5677, porting Koboldcpp implementation authored by pi6am: https://github.com/LostRuins/koboldcpp/pull/982 - LLAMA_API struct llama_sampler * llama_sampler_init_dry( - const struct llama_model * model, - float dry_multiplier, - float dry_base, - int32_t dry_allowed_length, - int32_t dry_penalty_last_n, - const char ** seq_breakers, - size_t num_breakers); - - LLAMA_API struct llama_sampler * llama_sampler_init_logit_bias( - int32_t n_vocab, - int32_t n_logit_bias, - const llama_logit_bias * logit_bias); - - // this sampler is meant to be used for fill-in-the-middle infilling - // it's supposed to be used after top_k + top_p sampling - // - // 1. if the sum of the EOG probs times the number of candidates is higher than the sum of the other probs -> pick EOG - // 2. combine probs of tokens that have the same prefix - // - // example: - // - // - before: - // "hel": 0.5 - // "hell": 0.2 - // "hello": 0.1 - // "dummy": 0.1 - // - // - after: - // "hel": 0.8 - // "dummy": 0.1 - // - // 3. discard non-EOG tokens with low prob - // 4. if no tokens are left -> pick EOT - // - LLAMA_API struct llama_sampler * llama_sampler_init_infill(const struct llama_model * model); - - // Returns the seed used by the sampler if applicable, LLAMA_DEFAULT_SEED otherwise - LLAMA_API uint32_t llama_sampler_get_seed(const struct llama_sampler * smpl); - - /// @details Sample and accept a token from the idx-th output of the last evaluation - // - // Shorthand for: - // const auto * logits = llama_get_logits_ith(ctx, idx); - // llama_token_data_array cur_p = { ... init from logits ... }; - // llama_sampler_apply(smpl, &cur_p); - // auto token = cur_p.data[cur_p.selected].id; - // llama_sampler_accept(smpl, token); - // return token; - // Returns the sampled token - LLAMA_API llama_token llama_sampler_sample(struct llama_sampler * smpl, struct llama_context * ctx, int32_t idx); - - // TODO: extend in the future - //LLAMA_API void llama_decode_with_sampler(struct llama_context * ctx, struct llama_sampler * smpl, struct llama_batch batch, ...); - - // - // Model split - // - - /// @details Build a split GGUF final path for this chunk. - /// llama_split_path(split_path, sizeof(split_path), "/models/ggml-model-q4_0", 2, 4) => split_path = "/models/ggml-model-q4_0-00002-of-00004.gguf" - // Returns the split_path length. - LLAMA_API int llama_split_path(char * split_path, size_t maxlen, const char * path_prefix, int split_no, int split_count); - - /// @details Extract the path prefix from the split_path if and only if the split_no and split_count match. - /// llama_split_prefix(split_prefix, 64, "/models/ggml-model-q4_0-00002-of-00004.gguf", 2, 4) => split_prefix = "/models/ggml-model-q4_0" - // Returns the split_prefix length. - LLAMA_API int llama_split_prefix(char * split_prefix, size_t maxlen, const char * split_path, int split_no, int split_count); - - // Print system information - LLAMA_API const char * llama_print_system_info(void); - - // Set callback for all future logging events. - // If this is not called, or NULL is supplied, everything is output on stderr. - LLAMA_API void llama_log_set(ggml_log_callback log_callback, void * user_data); - - // - // Performance utils - // - // NOTE: Used by llama.cpp examples, avoid using in third-party apps. Instead, do your own performance measurements. - // - - struct llama_perf_context_data { - double t_start_ms; - double t_load_ms; - double t_p_eval_ms; - double t_eval_ms; - - int32_t n_p_eval; - int32_t n_eval; - }; - - struct llama_perf_sampler_data { - double t_sample_ms; - - int32_t n_sample; - }; - - LLAMA_API struct llama_perf_context_data llama_perf_context (const struct llama_context * ctx); - LLAMA_API void llama_perf_context_print(const struct llama_context * ctx); - LLAMA_API void llama_perf_context_reset( struct llama_context * ctx); - - // NOTE: the following work only with samplers constructed via llama_sampler_chain_init - LLAMA_API struct llama_perf_sampler_data llama_perf_sampler (const struct llama_sampler * chain); - LLAMA_API void llama_perf_sampler_print(const struct llama_sampler * chain); - LLAMA_API void llama_perf_sampler_reset( struct llama_sampler * chain); - -#ifdef __cplusplus -} -#endif - -#endif // LLAMA_H diff --git a/vall_e.cpp/include/llama.vanilla.h b/vall_e.cpp/include/llama.vanilla.h deleted file mode 100644 index a4abf39..0000000 --- a/vall_e.cpp/include/llama.vanilla.h +++ /dev/null @@ -1,1258 +0,0 @@ -#ifndef LLAMA_H -#define LLAMA_H - -#include "ggml.h" -#include "ggml-cpu.h" -#include "ggml-backend.h" - -#include -#include -#include -#include - -#ifdef LLAMA_SHARED -# if defined(_WIN32) && !defined(__MINGW32__) -# ifdef LLAMA_BUILD -# define LLAMA_API __declspec(dllexport) -# else -# define LLAMA_API __declspec(dllimport) -# endif -# else -# define LLAMA_API __attribute__ ((visibility ("default"))) -# endif -#else -# define LLAMA_API -#endif - -#ifdef __GNUC__ -# define DEPRECATED(func, hint) func __attribute__((deprecated(hint))) -#elif defined(_MSC_VER) -# define DEPRECATED(func, hint) __declspec(deprecated(hint)) func -#else -# define DEPRECATED(func, hint) func -#endif - -#define LLAMA_DEFAULT_SEED 0xFFFFFFFF - -// TODO: use everywhere in the implementation -#define LLAMA_TOKEN_NULL -1 - -#define LLAMA_FILE_MAGIC_GGLA 0x67676c61u // 'ggla' -#define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn' -#define LLAMA_FILE_MAGIC_GGSQ 0x67677371u // 'ggsq' - -#define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN -#define LLAMA_SESSION_VERSION 9 - -#define LLAMA_STATE_SEQ_MAGIC LLAMA_FILE_MAGIC_GGSQ -#define LLAMA_STATE_SEQ_VERSION 2 - -#ifdef __cplusplus -extern "C" { -#endif - - // - // C interface - // - // TODO: show sample usage - // - - // struct llama_vocab; // TODO: add in the future - struct llama_model; - struct llama_context; - struct llama_sampler; - - typedef int32_t llama_pos; - typedef int32_t llama_token; - typedef int32_t llama_seq_id; - - enum llama_vocab_type { - LLAMA_VOCAB_TYPE_NONE = 0, // For models without vocab - LLAMA_VOCAB_TYPE_SPM = 1, // LLaMA tokenizer based on byte-level BPE with byte fallback - LLAMA_VOCAB_TYPE_BPE = 2, // GPT-2 tokenizer based on byte-level BPE - LLAMA_VOCAB_TYPE_WPM = 3, // BERT tokenizer based on WordPiece - LLAMA_VOCAB_TYPE_UGM = 4, // T5 tokenizer based on Unigram - LLAMA_VOCAB_TYPE_RWKV = 5, // RWKV tokenizer based on greedy tokenization - }; - - // pre-tokenization types - enum llama_vocab_pre_type { - LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0, - LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1, - LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM = 2, - LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER = 3, - LLAMA_VOCAB_PRE_TYPE_FALCON = 4, - LLAMA_VOCAB_PRE_TYPE_MPT = 5, - LLAMA_VOCAB_PRE_TYPE_STARCODER = 6, - LLAMA_VOCAB_PRE_TYPE_GPT2 = 7, - LLAMA_VOCAB_PRE_TYPE_REFACT = 8, - LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9, - LLAMA_VOCAB_PRE_TYPE_STABLELM2 = 10, - LLAMA_VOCAB_PRE_TYPE_QWEN2 = 11, - LLAMA_VOCAB_PRE_TYPE_OLMO = 12, - LLAMA_VOCAB_PRE_TYPE_DBRX = 13, - LLAMA_VOCAB_PRE_TYPE_SMAUG = 14, - LLAMA_VOCAB_PRE_TYPE_PORO = 15, - LLAMA_VOCAB_PRE_TYPE_CHATGLM3 = 16, - LLAMA_VOCAB_PRE_TYPE_CHATGLM4 = 17, - LLAMA_VOCAB_PRE_TYPE_VIKING = 18, - LLAMA_VOCAB_PRE_TYPE_JAIS = 19, - LLAMA_VOCAB_PRE_TYPE_TEKKEN = 20, - LLAMA_VOCAB_PRE_TYPE_SMOLLM = 21, - LLAMA_VOCAB_PRE_TYPE_CODESHELL = 22, - LLAMA_VOCAB_PRE_TYPE_BLOOM = 23, - LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH = 24, - LLAMA_VOCAB_PRE_TYPE_EXAONE = 25, - LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26, - LLAMA_VOCAB_PRE_TYPE_MINERVA = 27, - }; - - enum llama_rope_type { - LLAMA_ROPE_TYPE_NONE = -1, - LLAMA_ROPE_TYPE_NORM = 0, - LLAMA_ROPE_TYPE_NEOX = GGML_ROPE_TYPE_NEOX, - LLAMA_ROPE_TYPE_MROPE = GGML_ROPE_TYPE_MROPE, - LLAMA_ROPE_TYPE_VISION = GGML_ROPE_TYPE_VISION, - }; - - enum llama_token_type { //TODO: remove, required until per token attributes are available from GGUF file - LLAMA_TOKEN_TYPE_UNDEFINED = 0, - LLAMA_TOKEN_TYPE_NORMAL = 1, - LLAMA_TOKEN_TYPE_UNKNOWN = 2, - LLAMA_TOKEN_TYPE_CONTROL = 3, - LLAMA_TOKEN_TYPE_USER_DEFINED = 4, - LLAMA_TOKEN_TYPE_UNUSED = 5, - LLAMA_TOKEN_TYPE_BYTE = 6, - }; - - enum llama_token_attr { - LLAMA_TOKEN_ATTR_UNDEFINED = 0, - LLAMA_TOKEN_ATTR_UNKNOWN = 1 << 0, - LLAMA_TOKEN_ATTR_UNUSED = 1 << 1, - LLAMA_TOKEN_ATTR_NORMAL = 1 << 2, - LLAMA_TOKEN_ATTR_CONTROL = 1 << 3, // SPECIAL? - LLAMA_TOKEN_ATTR_USER_DEFINED = 1 << 4, - LLAMA_TOKEN_ATTR_BYTE = 1 << 5, - LLAMA_TOKEN_ATTR_NORMALIZED = 1 << 6, - LLAMA_TOKEN_ATTR_LSTRIP = 1 << 7, - LLAMA_TOKEN_ATTR_RSTRIP = 1 << 8, - LLAMA_TOKEN_ATTR_SINGLE_WORD = 1 << 9, - }; - - // model file types - enum llama_ftype { - LLAMA_FTYPE_ALL_F32 = 0, - LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors - // LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16 - // LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed - // LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed - LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q3_K_S = 11, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q3_K_M = 12, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q3_K_L = 13, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q4_K_S = 14, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q4_K_M = 15, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q5_K_S = 16, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q5_K_M = 17, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q6_K = 18, // except 1d tensors - LLAMA_FTYPE_MOSTLY_IQ2_XXS = 19, // except 1d tensors - LLAMA_FTYPE_MOSTLY_IQ2_XS = 20, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q2_K_S = 21, // except 1d tensors - LLAMA_FTYPE_MOSTLY_IQ3_XS = 22, // except 1d tensors - LLAMA_FTYPE_MOSTLY_IQ3_XXS = 23, // except 1d tensors - LLAMA_FTYPE_MOSTLY_IQ1_S = 24, // except 1d tensors - LLAMA_FTYPE_MOSTLY_IQ4_NL = 25, // except 1d tensors - LLAMA_FTYPE_MOSTLY_IQ3_S = 26, // except 1d tensors - LLAMA_FTYPE_MOSTLY_IQ3_M = 27, // except 1d tensors - LLAMA_FTYPE_MOSTLY_IQ2_S = 28, // except 1d tensors - LLAMA_FTYPE_MOSTLY_IQ2_M = 29, // except 1d tensors - LLAMA_FTYPE_MOSTLY_IQ4_XS = 30, // except 1d tensors - LLAMA_FTYPE_MOSTLY_IQ1_M = 31, // except 1d tensors - LLAMA_FTYPE_MOSTLY_BF16 = 32, // except 1d tensors - //LLAMA_FTYPE_MOSTLY_Q4_0_4_4 = 33, // removed from gguf files, use Q4_0 and runtime repack - //LLAMA_FTYPE_MOSTLY_Q4_0_4_8 = 34, // removed from gguf files, use Q4_0 and runtime repack - //LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35, // removed from gguf files, use Q4_0 and runtime repack - LLAMA_FTYPE_MOSTLY_TQ1_0 = 36, // except 1d tensors - LLAMA_FTYPE_MOSTLY_TQ2_0 = 37, // except 1d tensors - - LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file - }; - - enum llama_rope_scaling_type { - LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED = -1, - LLAMA_ROPE_SCALING_TYPE_NONE = 0, - LLAMA_ROPE_SCALING_TYPE_LINEAR = 1, - LLAMA_ROPE_SCALING_TYPE_YARN = 2, - LLAMA_ROPE_SCALING_TYPE_LONGROPE = 3, - LLAMA_ROPE_SCALING_TYPE_MAX_VALUE = LLAMA_ROPE_SCALING_TYPE_LONGROPE, - }; - - enum llama_pooling_type { - LLAMA_POOLING_TYPE_UNSPECIFIED = -1, - LLAMA_POOLING_TYPE_NONE = 0, - LLAMA_POOLING_TYPE_MEAN = 1, - LLAMA_POOLING_TYPE_CLS = 2, - LLAMA_POOLING_TYPE_LAST = 3, - LLAMA_POOLING_TYPE_RANK = 4, // used by reranking models to attach the classification head to the graph - }; - - enum llama_attention_type { - LLAMA_ATTENTION_TYPE_UNSPECIFIED = -1, - LLAMA_ATTENTION_TYPE_CAUSAL = 0, - LLAMA_ATTENTION_TYPE_NON_CAUSAL = 1, - }; - - enum llama_split_mode { - LLAMA_SPLIT_MODE_NONE = 0, // single GPU - LLAMA_SPLIT_MODE_LAYER = 1, // split layers and KV across GPUs - LLAMA_SPLIT_MODE_ROW = 2, // split layers and KV across GPUs, use tensor parallelism if supported - }; - - // TODO: simplify (https://github.com/ggerganov/llama.cpp/pull/9294#pullrequestreview-2286561979) - typedef struct llama_token_data { - llama_token id; // token id - float logit; // log-odds of the token - float p; // probability of the token - } llama_token_data; - - typedef struct llama_token_data_array { - // TODO: consider SoA - // NOTE: this pointer can be modified by the samplers - llama_token_data * data; - size_t size; - int64_t selected; // this is the index in the data array (i.e. not the token id) - bool sorted; - } llama_token_data_array; - - typedef bool (*llama_progress_callback)(float progress, void * user_data); - - // Input data for llama_decode - // A llama_batch object can contain input about one or many sequences - // The provided arrays (i.e. token, embd, pos, etc.) must have size of n_tokens - // - // - token : the token ids of the input (used when embd is NULL) - // - embd : token embeddings (i.e. float vector of size n_embd) (used when token is NULL) - // - pos : the positions of the respective token in the sequence - // (if set to NULL, the token position will be tracked automatically by llama_decode) - // - seq_id : the sequence to which the respective token belongs - // (if set to NULL, the sequence ID will be assumed to be 0) - // - logits : if zero, the logits (and/or the embeddings) for the respective token will not be output - // (if set to NULL, only the logits for last token will be returned) - // - typedef struct llama_batch { - int32_t n_tokens; - - llama_token * token; - float * embd; - llama_pos * pos; - int32_t * n_seq_id; - llama_seq_id ** seq_id; - int8_t * logits; // TODO: rename this to "output" - } llama_batch; - - enum llama_model_kv_override_type { - LLAMA_KV_OVERRIDE_TYPE_INT, - LLAMA_KV_OVERRIDE_TYPE_FLOAT, - LLAMA_KV_OVERRIDE_TYPE_BOOL, - LLAMA_KV_OVERRIDE_TYPE_STR, - }; - - struct llama_model_kv_override { - enum llama_model_kv_override_type tag; - - char key[128]; - - union { - int64_t val_i64; - double val_f64; - bool val_bool; - char val_str[128]; - }; - }; - - struct llama_model_params { - // NULL-terminated list of devices to use for offloading (if NULL, all available devices are used) - ggml_backend_dev_t * devices; - - int32_t n_gpu_layers; // number of layers to store in VRAM - enum llama_split_mode split_mode; // how to split the model across multiple GPUs - - // the GPU that is used for the entire model when split_mode is LLAMA_SPLIT_MODE_NONE - int32_t main_gpu; - - // proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices() - const float * tensor_split; - - // comma separated list of RPC servers to use for offloading - const char * rpc_servers; - - // Called with a progress value between 0.0 and 1.0. Pass NULL to disable. - // If the provided progress_callback returns true, model loading continues. - // If it returns false, model loading is immediately aborted. - llama_progress_callback progress_callback; - - // context pointer passed to the progress callback - void * progress_callback_user_data; - - // override key-value pairs of the model meta data - const struct llama_model_kv_override * kv_overrides; - - // Keep the booleans together to avoid misalignment during copy-by-value. - bool vocab_only; // only load the vocabulary, no weights - bool use_mmap; // use mmap if possible - bool use_mlock; // force system to keep model in RAM - bool check_tensors; // validate model tensor data - }; - - // NOTE: changing the default values of parameters marked as [EXPERIMENTAL] may cause crashes or incorrect results in certain configurations - // https://github.com/ggerganov/llama.cpp/pull/7544 - struct llama_context_params { - uint32_t n_ctx; // text context, 0 = from model - uint32_t n_batch; // logical maximum batch size that can be submitted to llama_decode - uint32_t n_ubatch; // physical maximum batch size - uint32_t n_seq_max; // max number of sequences (i.e. distinct states for recurrent models) - int32_t n_threads; // number of threads to use for generation - int32_t n_threads_batch; // number of threads to use for batch processing - - enum llama_rope_scaling_type rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type` - enum llama_pooling_type pooling_type; // whether to pool (sum) embedding results by sequence id - enum llama_attention_type attention_type; // attention type to use for embeddings - - // ref: https://github.com/ggerganov/llama.cpp/pull/2054 - float rope_freq_base; // RoPE base frequency, 0 = from model - float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model - float yarn_ext_factor; // YaRN extrapolation mix factor, negative = from model - float yarn_attn_factor; // YaRN magnitude scaling factor - float yarn_beta_fast; // YaRN low correction dim - float yarn_beta_slow; // YaRN high correction dim - uint32_t yarn_orig_ctx; // YaRN original context size - float defrag_thold; // defragment the KV cache if holes/size > thold, < 0 disabled (default) - - ggml_backend_sched_eval_callback cb_eval; - void * cb_eval_user_data; - - enum ggml_type type_k; // data type for K cache [EXPERIMENTAL] - enum ggml_type type_v; // data type for V cache [EXPERIMENTAL] - - // Keep the booleans together and at the end of the struct to avoid misalignment during copy-by-value. - // TODO: move at the end of the struct - bool logits_all; // the llama_decode() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead) - bool embeddings; // if true, extract embeddings (together with logits) - bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU - bool flash_attn; // whether to use flash attention [EXPERIMENTAL] - bool no_perf; // whether to measure performance timings - - // Abort callback - // if it returns true, execution of llama_decode() will be aborted - // currently works only with CPU execution - ggml_abort_callback abort_callback; - void * abort_callback_data; - }; - - // model quantization parameters - typedef struct llama_model_quantize_params { - int32_t nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency() - enum llama_ftype ftype; // quantize to this llama_ftype - enum ggml_type output_tensor_type; // output tensor type - enum ggml_type token_embedding_type; // token embeddings tensor type - bool allow_requantize; // allow quantizing non-f32/f16 tensors - bool quantize_output_tensor; // quantize output.weight - bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored - bool pure; // quantize all tensors to the default type - bool keep_split; // quantize to the same number of shards - void * imatrix; // pointer to importance matrix data - void * kv_overrides; // pointer to vector containing overrides - } llama_model_quantize_params; - - typedef struct llama_logit_bias { - llama_token token; - float bias; - } llama_logit_bias; - - typedef struct llama_sampler_chain_params { - bool no_perf; // whether to measure performance timings - } llama_sampler_chain_params; - - // used in chat template - typedef struct llama_chat_message { - const char * role; - const char * content; - } llama_chat_message; - - // lora adapter - struct llama_lora_adapter; - - // Helpers for getting default parameters - // TODO: update API to start accepting pointers to params structs (https://github.com/ggerganov/llama.cpp/discussions/9172) - LLAMA_API struct llama_model_params llama_model_default_params(void); - LLAMA_API struct llama_context_params llama_context_default_params(void); - LLAMA_API struct llama_sampler_chain_params llama_sampler_chain_default_params(void); - LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(void); - - // Initialize the llama + ggml backend - // If numa is true, use NUMA optimizations - // Call once at the start of the program - LLAMA_API void llama_backend_init(void); - - //optional: - LLAMA_API void llama_numa_init(enum ggml_numa_strategy numa); - - // Optional: an auto threadpool gets created in ggml if not passed explicitly - LLAMA_API void llama_attach_threadpool( - struct llama_context * ctx, - ggml_threadpool_t threadpool, - ggml_threadpool_t threadpool_batch); - LLAMA_API void llama_detach_threadpool(struct llama_context * ctx); - - // Call once at the end of the program - currently only used for MPI - LLAMA_API void llama_backend_free(void); - - LLAMA_API struct llama_model * llama_load_model_from_file( - const char * path_model, - struct llama_model_params params); - - LLAMA_API void llama_free_model(struct llama_model * model); - - // TODO: rename to llama_init_from_model - LLAMA_API struct llama_context * llama_new_context_with_model( - struct llama_model * model, - struct llama_context_params params); - - // Frees all allocated memory - LLAMA_API void llama_free(struct llama_context * ctx); - - LLAMA_API int64_t llama_time_us(void); - - LLAMA_API size_t llama_max_devices(void); - - LLAMA_API bool llama_supports_mmap (void); - LLAMA_API bool llama_supports_mlock (void); - LLAMA_API bool llama_supports_gpu_offload(void); - LLAMA_API bool llama_supports_rpc (void); - - LLAMA_API uint32_t llama_n_ctx (const struct llama_context * ctx); - LLAMA_API uint32_t llama_n_batch (const struct llama_context * ctx); - LLAMA_API uint32_t llama_n_ubatch (const struct llama_context * ctx); - LLAMA_API uint32_t llama_n_seq_max (const struct llama_context * ctx); - - LLAMA_API int32_t llama_n_vocab (const struct llama_model * model); - LLAMA_API int32_t llama_n_ctx_train(const struct llama_model * model); - LLAMA_API int32_t llama_n_embd (const struct llama_model * model); - LLAMA_API int32_t llama_n_layer (const struct llama_model * model); - LLAMA_API int32_t llama_n_head (const struct llama_model * model); - - LLAMA_API const struct llama_model * llama_get_model(const struct llama_context * ctx); - - LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx); - LLAMA_API enum llama_vocab_type llama_vocab_type (const struct llama_model * model); - LLAMA_API enum llama_rope_type llama_rope_type (const struct llama_model * model); - - // Get the model's RoPE frequency scaling factor - LLAMA_API float llama_rope_freq_scale_train(const struct llama_model * model); - - // Functions to access the model's GGUF metadata scalar values - // - The functions return the length of the string on success, or -1 on failure - // - The output string is always null-terminated and cleared on failure - // - When retrieving a string, an extra byte must be allocated to account for the null terminator - // - GGUF array values are not supported by these functions - - // Get metadata value as a string by key name - LLAMA_API int32_t llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size); - - // Get the number of metadata key/value pairs - LLAMA_API int32_t llama_model_meta_count(const struct llama_model * model); - - // Get metadata key name by index - LLAMA_API int32_t llama_model_meta_key_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size); - - // Get metadata value as a string by index - LLAMA_API int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size); - - // Get a string describing the model type - LLAMA_API int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size); - - // Returns the total size of all the tensors in the model in bytes - LLAMA_API uint64_t llama_model_size(const struct llama_model * model); - - // Returns the total number of parameters in the model - LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model); - - // Returns true if the model contains an encoder that requires llama_encode() call - LLAMA_API bool llama_model_has_encoder(const struct llama_model * model); - - // Returns true if the model contains a decoder that requires llama_decode() call - LLAMA_API bool llama_model_has_decoder(const struct llama_model * model); - - // For encoder-decoder models, this function returns id of the token that must be provided - // to the decoder to start generating output sequence. For other models, it returns -1. - LLAMA_API llama_token llama_model_decoder_start_token(const struct llama_model * model); - - // Returns true if the model is recurrent (like Mamba, RWKV, etc.) - LLAMA_API bool llama_model_is_recurrent(const struct llama_model * model); - - // Returns 0 on success - LLAMA_API uint32_t llama_model_quantize( - const char * fname_inp, - const char * fname_out, - const llama_model_quantize_params * params); - - // Load a LoRA adapter from file - // The loaded adapter will be associated to the given model, and will be free when the model is deleted - LLAMA_API struct llama_lora_adapter * llama_lora_adapter_init( - struct llama_model * model, - const char * path_lora); - - // Add a loaded LoRA adapter to given context - // This will not modify model's weight - LLAMA_API int32_t llama_lora_adapter_set( - struct llama_context * ctx, - struct llama_lora_adapter * adapter, - float scale); - - // Remove a specific LoRA adapter from given context - // Return -1 if the adapter is not present in the context - LLAMA_API int32_t llama_lora_adapter_remove( - struct llama_context * ctx, - struct llama_lora_adapter * adapter); - - // Remove all LoRA adapters from given context - LLAMA_API void llama_lora_adapter_clear( - struct llama_context * ctx); - - // Manually free a LoRA adapter - // Note: loaded adapters will be free when the associated model is deleted - LLAMA_API void llama_lora_adapter_free(struct llama_lora_adapter * adapter); - - // Apply a loaded control vector to a llama_context, or if data is NULL, clear - // the currently loaded vector. - // n_embd should be the size of a single layer's control, and data should point - // to an n_embd x n_layers buffer starting from layer 1. - // il_start and il_end are the layer range the vector should apply to (both inclusive) - // See llama_control_vector_load in common to load a control vector. - LLAMA_API int32_t llama_control_vector_apply( - struct llama_context * lctx, - const float * data, - size_t len, - int32_t n_embd, - int32_t il_start, - int32_t il_end); - - // - // KV cache - // - - // Information associated with an individual cell in the KV cache view. - struct llama_kv_cache_view_cell { - // The position for this cell. Takes KV cache shifts into account. - // May be negative if the cell is not populated. - llama_pos pos; - }; - - // An updateable view of the KV cache. - struct llama_kv_cache_view { - // Number of KV cache cells. This will be the same as the context size. - int32_t n_cells; - - // Maximum number of sequences that can exist in a cell. It's not an error - // if there are more sequences in a cell than this value, however they will - // not be visible in the view cells_sequences. - int32_t n_seq_max; - - // Number of tokens in the cache. For example, if there are two populated - // cells, the first with 1 sequence id in it and the second with 2 sequence - // ids then you'll have 3 tokens. - int32_t token_count; - - // Number of populated cache cells. - int32_t used_cells; - - // Maximum contiguous empty slots in the cache. - int32_t max_contiguous; - - // Index to the start of the max_contiguous slot range. Can be negative - // when cache is full. - int32_t max_contiguous_idx; - - // Information for an individual cell. - struct llama_kv_cache_view_cell * cells; - - // The sequences for each cell. There will be n_seq_max items per cell. - llama_seq_id * cells_sequences; - }; - - // Create an empty KV cache view. (use only for debugging purposes) - LLAMA_API struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_context * ctx, int32_t n_seq_max); - - // Free a KV cache view. (use only for debugging purposes) - LLAMA_API void llama_kv_cache_view_free(struct llama_kv_cache_view * view); - - // Update the KV cache view structure with the current state of the KV cache. (use only for debugging purposes) - LLAMA_API void llama_kv_cache_view_update(const struct llama_context * ctx, struct llama_kv_cache_view * view); - - // Returns the number of tokens in the KV cache (slow, use only for debug) - // If a KV cell has multiple sequences assigned to it, it will be counted multiple times - LLAMA_API int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx); - - // Returns the number of used KV cells (i.e. have at least one sequence assigned to them) - LLAMA_API int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx); - - // Clear the KV cache - both cell info is erased and KV data is zeroed - LLAMA_API void llama_kv_cache_clear( - struct llama_context * ctx); - - // Removes all tokens that belong to the specified sequence and have positions in [p0, p1) - // Returns false if a partial sequence cannot be removed. Removing a whole sequence never fails - // seq_id < 0 : match any sequence - // p0 < 0 : [0, p1] - // p1 < 0 : [p0, inf) - LLAMA_API bool llama_kv_cache_seq_rm( - struct llama_context * ctx, - llama_seq_id seq_id, - llama_pos p0, - llama_pos p1); - - // Copy all tokens that belong to the specified sequence to another sequence - // Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence - // p0 < 0 : [0, p1] - // p1 < 0 : [p0, inf) - LLAMA_API void llama_kv_cache_seq_cp( - struct llama_context * ctx, - llama_seq_id seq_id_src, - llama_seq_id seq_id_dst, - llama_pos p0, - llama_pos p1); - - // Removes all tokens that do not belong to the specified sequence - LLAMA_API void llama_kv_cache_seq_keep( - struct llama_context * ctx, - llama_seq_id seq_id); - - // Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1) - // If the KV cache is RoPEd, the KV data is updated accordingly: - // - lazily on next llama_decode() - // - explicitly with llama_kv_cache_update() - // p0 < 0 : [0, p1] - // p1 < 0 : [p0, inf) - LLAMA_API void llama_kv_cache_seq_add( - struct llama_context * ctx, - llama_seq_id seq_id, - llama_pos p0, - llama_pos p1, - llama_pos delta); - - // Integer division of the positions by factor of `d > 1` - // If the KV cache is RoPEd, the KV data is updated accordingly: - // - lazily on next llama_decode() - // - explicitly with llama_kv_cache_update() - // p0 < 0 : [0, p1] - // p1 < 0 : [p0, inf) - LLAMA_API void llama_kv_cache_seq_div( - struct llama_context * ctx, - llama_seq_id seq_id, - llama_pos p0, - llama_pos p1, - int d); - - // Returns the largest position present in the KV cache for the specified sequence - LLAMA_API llama_pos llama_kv_cache_seq_pos_max( - struct llama_context * ctx, - llama_seq_id seq_id); - - // Defragment the KV cache - // This will be applied: - // - lazily on next llama_decode() - // - explicitly with llama_kv_cache_update() - LLAMA_API void llama_kv_cache_defrag(struct llama_context * ctx); - - // Apply the KV cache updates (such as K-shifts, defragmentation, etc.) - LLAMA_API void llama_kv_cache_update(struct llama_context * ctx); - - // Check if the context supports KV cache shifting - LLAMA_API bool llama_kv_cache_can_shift(struct llama_context * ctx); - - // - // State / sessions - // - - // Returns the *actual* size in bytes of the state - // (logits, embedding and kv_cache) - // Only use when saving the state, not when restoring it, otherwise the size may be too small. - LLAMA_API size_t llama_state_get_size(struct llama_context * ctx); - LLAMA_API DEPRECATED(size_t llama_get_state_size(struct llama_context * ctx), - "use llama_state_get_size instead"); - - // Copies the state to the specified destination address. - // Destination needs to have allocated enough memory. - // Returns the number of bytes copied - LLAMA_API size_t llama_state_get_data( - struct llama_context * ctx, - uint8_t * dst, - size_t size); - LLAMA_API DEPRECATED(size_t llama_copy_state_data( - struct llama_context * ctx, - uint8_t * dst), - "use llama_state_get_data instead"); - - // Set the state reading from the specified address - // Returns the number of bytes read - LLAMA_API size_t llama_state_set_data( - struct llama_context * ctx, - const uint8_t * src, - size_t size); - LLAMA_API DEPRECATED(size_t llama_set_state_data( - struct llama_context * ctx, - const uint8_t * src), - "use llama_state_set_data instead"); - - // Save/load session file - LLAMA_API bool llama_state_load_file( - struct llama_context * ctx, - const char * path_session, - llama_token * tokens_out, - size_t n_token_capacity, - size_t * n_token_count_out); - LLAMA_API DEPRECATED(bool llama_load_session_file( - struct llama_context * ctx, - const char * path_session, - llama_token * tokens_out, - size_t n_token_capacity, - size_t * n_token_count_out), - "use llama_state_load_file instead"); - - LLAMA_API bool llama_state_save_file( - struct llama_context * ctx, - const char * path_session, - const llama_token * tokens, - size_t n_token_count); - LLAMA_API DEPRECATED(bool llama_save_session_file( - struct llama_context * ctx, - const char * path_session, - const llama_token * tokens, - size_t n_token_count), - "use llama_state_save_file instead"); - - // Get the exact size needed to copy the KV cache of a single sequence - LLAMA_API size_t llama_state_seq_get_size( - struct llama_context * ctx, - llama_seq_id seq_id); - - // Copy the KV cache of a single sequence into the specified buffer - LLAMA_API size_t llama_state_seq_get_data( - struct llama_context * ctx, - uint8_t * dst, - size_t size, - llama_seq_id seq_id); - - // Copy the sequence data (originally copied with `llama_state_seq_get_data`) into the specified sequence - // Returns: - // - Positive: Ok - // - Zero: Failed to load - LLAMA_API size_t llama_state_seq_set_data( - struct llama_context * ctx, - const uint8_t * src, - size_t size, - llama_seq_id dest_seq_id); - - LLAMA_API size_t llama_state_seq_save_file( - struct llama_context * ctx, - const char * filepath, - llama_seq_id seq_id, - const llama_token * tokens, - size_t n_token_count); - - LLAMA_API size_t llama_state_seq_load_file( - struct llama_context * ctx, - const char * filepath, - llama_seq_id dest_seq_id, - llama_token * tokens_out, - size_t n_token_capacity, - size_t * n_token_count_out); - - // - // Decoding - // - - // Return batch for single sequence of tokens - // The sequence ID will be fixed to 0 - // The position of the tokens will be tracked automatically by llama_decode - // - // NOTE: this is a helper function to facilitate transition to the new batch API - avoid using it - // - LLAMA_API struct llama_batch llama_batch_get_one( - llama_token * tokens, - int32_t n_tokens); - - // Allocates a batch of tokens on the heap that can hold a maximum of n_tokens - // Each token can be assigned up to n_seq_max sequence ids - // The batch has to be freed with llama_batch_free() - // If embd != 0, llama_batch.embd will be allocated with size of n_tokens * embd * sizeof(float) - // Otherwise, llama_batch.token will be allocated to store n_tokens llama_token - // The rest of the llama_batch members are allocated with size n_tokens - // All members are left uninitialized - LLAMA_API struct llama_batch llama_batch_init( - int32_t n_tokens, - int32_t embd, - int32_t n_seq_max); - - // Frees a batch of tokens allocated with llama_batch_init() - LLAMA_API void llama_batch_free(struct llama_batch batch); - - // Processes a batch of tokens with the ecoder part of the encoder-decoder model. - // Stores the encoder output internally for later use by the decoder cross-attention layers. - // 0 - success - // < 0 - error. the KV cache state is restored to the state before this call - LLAMA_API int32_t llama_encode( - struct llama_context * ctx, - struct llama_batch batch); - - // Positive return values does not mean a fatal error, but rather a warning. - // 0 - success - // 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context) - // < 0 - error. the KV cache state is restored to the state before this call - LLAMA_API int32_t llama_decode( - struct llama_context * ctx, - struct llama_batch batch); - - // Set the number of threads used for decoding - // n_threads is the number of threads used for generation (single token) - // n_threads_batch is the number of threads used for prompt and batch processing (multiple tokens) - LLAMA_API void llama_set_n_threads(struct llama_context * ctx, int32_t n_threads, int32_t n_threads_batch); - - // Get the number of threads used for generation of a single token. - LLAMA_API int32_t llama_n_threads(struct llama_context * ctx); - - // Get the number of threads used for prompt and batch processing (multiple token). - LLAMA_API int32_t llama_n_threads_batch(struct llama_context * ctx); - - // Set whether the model is in embeddings mode or not - // If true, embeddings will be returned but logits will not - LLAMA_API void llama_set_embeddings(struct llama_context * ctx, bool embeddings); - - // Set whether to use causal attention or not - // If set to true, the model will only attend to the past tokens - LLAMA_API void llama_set_causal_attn(struct llama_context * ctx, bool causal_attn); - - // Set abort callback - LLAMA_API void llama_set_abort_callback(struct llama_context * ctx, ggml_abort_callback abort_callback, void * abort_callback_data); - - // Wait until all computations are finished - // This is automatically done when using one of the functions below to obtain the computation results - // and is not necessary to call it explicitly in most cases - LLAMA_API void llama_synchronize(struct llama_context * ctx); - - // Token logits obtained from the last call to llama_decode() - // The logits for which llama_batch.logits[i] != 0 are stored contiguously - // in the order they have appeared in the batch. - // Rows: number of tokens for which llama_batch.logits[i] != 0 - // Cols: n_vocab - LLAMA_API float * llama_get_logits(struct llama_context * ctx); - - // Logits for the ith token. For positive indices, Equivalent to: - // llama_get_logits(ctx) + ctx->output_ids[i]*n_vocab - // Negative indicies can be used to access logits in reverse order, -1 is the last logit. - // returns NULL for invalid ids. - LLAMA_API float * llama_get_logits_ith(struct llama_context * ctx, int32_t i); - - // Get all output token embeddings. - // when pooling_type == LLAMA_POOLING_TYPE_NONE or when using a generative model, - // the embeddings for which llama_batch.logits[i] != 0 are stored contiguously - // in the order they have appeared in the batch. - // shape: [n_outputs*n_embd] - // Otherwise, returns NULL. - LLAMA_API float * llama_get_embeddings(struct llama_context * ctx); - - // Get the embeddings for the ith token. For positive indices, Equivalent to: - // llama_get_embeddings(ctx) + ctx->output_ids[i]*n_embd - // Negative indicies can be used to access embeddings in reverse order, -1 is the last embedding. - // shape: [n_embd] (1-dimensional) - // returns NULL for invalid ids. - LLAMA_API float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i); - - // Get the embeddings for a sequence id - // Returns NULL if pooling_type is LLAMA_POOLING_TYPE_NONE - // when pooling_type == LLAMA_POOLING_TYPE_RANK, returns float[1] with the rank of the sequence - // otherwise: float[n_embd] (1-dimensional) - LLAMA_API float * llama_get_embeddings_seq(struct llama_context * ctx, llama_seq_id seq_id); - - // - // Vocab - // - - LLAMA_API const char * llama_token_get_text(const struct llama_model * model, llama_token token); - - LLAMA_API float llama_token_get_score(const struct llama_model * model, llama_token token); - - LLAMA_API enum llama_token_attr llama_token_get_attr(const struct llama_model * model, llama_token token); - - // Check if the token is supposed to end generation (end-of-generation, eg. EOS, EOT, etc.) - LLAMA_API bool llama_token_is_eog(const struct llama_model * model, llama_token token); - - // Identify if Token Id is a control token or a render-able token - LLAMA_API bool llama_token_is_control(const struct llama_model * model, llama_token token); - - // Special tokens - LLAMA_API llama_token llama_token_bos(const struct llama_model * model); // beginning-of-sentence - LLAMA_API llama_token llama_token_eos(const struct llama_model * model); // end-of-sentence - LLAMA_API llama_token llama_token_eot(const struct llama_model * model); // end-of-turn - LLAMA_API llama_token llama_token_cls(const struct llama_model * model); // classification - LLAMA_API llama_token llama_token_sep(const struct llama_model * model); // sentence separator - LLAMA_API llama_token llama_token_nl (const struct llama_model * model); // next-line - LLAMA_API llama_token llama_token_pad(const struct llama_model * model); // padding - - LLAMA_API bool llama_add_bos_token(const struct llama_model * model); - LLAMA_API bool llama_add_eos_token(const struct llama_model * model); - - // infill tokens - DEPRECATED(LLAMA_API llama_token llama_token_prefix(const struct llama_model * model), "use llama_token_fim_pre instead"); - DEPRECATED(LLAMA_API llama_token llama_token_middle(const struct llama_model * model), "use llama_token_fim_mid instead"); - DEPRECATED(LLAMA_API llama_token llama_token_suffix(const struct llama_model * model), "use llama_token_fim_suf instead"); - - LLAMA_API llama_token llama_token_fim_pre(const struct llama_model * model); - LLAMA_API llama_token llama_token_fim_suf(const struct llama_model * model); - LLAMA_API llama_token llama_token_fim_mid(const struct llama_model * model); - LLAMA_API llama_token llama_token_fim_pad(const struct llama_model * model); - LLAMA_API llama_token llama_token_fim_rep(const struct llama_model * model); - LLAMA_API llama_token llama_token_fim_sep(const struct llama_model * model); - - // - // Tokenization - // - // The API is thread-safe. - // - - /// @details Convert the provided text into tokens. - /// @param tokens The tokens pointer must be large enough to hold the resulting tokens. - /// @return Returns the number of tokens on success, no more than n_tokens_max - /// @return Returns a negative number on failure - the number of tokens that would have been returned - /// @param add_special Allow to add BOS and EOS tokens if model is configured to do so. - /// @param parse_special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated - /// as plaintext. Does not insert a leading space. - LLAMA_API int32_t llama_tokenize( - const struct llama_model * model, - const char * text, - int32_t text_len, - llama_token * tokens, - int32_t n_tokens_max, - bool add_special, - bool parse_special); - - // Token Id -> Piece. - // Uses the vocabulary in the provided context. - // Does not write null terminator to the buffer. - // User can skip up to 'lstrip' leading spaces before copying (useful when encoding/decoding multiple tokens with 'add_space_prefix') - // @param special If true, special tokens are rendered in the output. - LLAMA_API int32_t llama_token_to_piece( - const struct llama_model * model, - llama_token token, - char * buf, - int32_t length, - int32_t lstrip, - bool special); - - /// @details Convert the provided tokens into text (inverse of llama_tokenize()). - /// @param text The char pointer must be large enough to hold the resulting text. - /// @return Returns the number of chars/bytes on success, no more than text_len_max. - /// @return Returns a negative number on failure - the number of chars/bytes that would have been returned. - /// @param remove_special Allow to remove BOS and EOS tokens if model is configured to do so. - /// @param unparse_special If true, special tokens are rendered in the output. - LLAMA_API int32_t llama_detokenize( - const struct llama_model * model, - const llama_token * tokens, - int32_t n_tokens, - char * text, - int32_t text_len_max, - bool remove_special, - bool unparse_special); - - // - // Chat templates - // - - /// Apply chat template. Inspired by hf apply_chat_template() on python. - /// Both "model" and "custom_template" are optional, but at least one is required. "custom_template" has higher precedence than "model" - /// NOTE: This function does not use a jinja parser. It only support a pre-defined list of template. See more: https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template - /// @param tmpl A Jinja template to use for this chat. If this is nullptr, the model’s default chat template will be used instead. - /// @param chat Pointer to a list of multiple llama_chat_message - /// @param n_msg Number of llama_chat_message in this chat - /// @param add_ass Whether to end the prompt with the token(s) that indicate the start of an assistant message. - /// @param buf A buffer to hold the output formatted prompt. The recommended alloc size is 2 * (total number of characters of all messages) - /// @param length The size of the allocated buffer - /// @return The total number of bytes of the formatted prompt. If is it larger than the size of buffer, you may need to re-alloc it and then re-apply the template. - LLAMA_API int32_t llama_chat_apply_template( - const struct llama_model * model, - const char * tmpl, - const struct llama_chat_message * chat, - size_t n_msg, - bool add_ass, - char * buf, - int32_t length); - - // Get list of built-in chat templates - LLAMA_API int32_t llama_chat_builtin_templates(const char ** output, size_t len); - - // - // Sampling API - // - // Sample usage: - // - // // prepare the sampling chain at the start - // auto sparams = llama_sampler_chain_default_params(); - // - // llama_sampler * smpl = llama_sampler_chain_init(sparams); - // - // llama_sampler_chain_add(smpl, llama_sampler_init_top_k(50)); - // llama_sampler_chain_add(smpl, llama_sampler_init_top_p(0.9, 1)); - // llama_sampler_chain_add(smpl, llama_sampler_init_temp (0.8)); - // - // // typically, the chain should end with a sampler such as "greedy", "dist" or "mirostat" - // // this sampler will be responsible to select the actual token - // llama_sampler_chain_add(smpl, llama_sampler_init_dist(seed)); - // - // ... - // - // // decoding loop: - // while (...) { - // ... - // - // llama_decode(ctx, batch); - // - // // sample from the logits of the last token in the batch - // const llama_token id = llama_sampler_sample(smpl, ctx, -1); - // - // // accepting the token updates the internal state of certain samplers (e.g. grammar, repetition, etc.) - // llama_sampler_accept(smpl, id); - // ... - // } - // - // llama_sampler_free(smpl); - // - // TODO: In the future, llama_sampler will be utilized to offload the sampling to the backends (e.g. GPU). - // TODO: in the future, the entire sampling API that uses llama_model should start using llama_vocab - // - - typedef void * llama_sampler_context_t; - - // user code can implement the interface below in order to create custom llama_sampler - struct llama_sampler_i { - const char * (*name) (const struct llama_sampler * smpl); // can be NULL - void (*accept)( struct llama_sampler * smpl, llama_token token); // can be NULL - void (*apply) ( struct llama_sampler * smpl, llama_token_data_array * cur_p); // required - void (*reset) ( struct llama_sampler * smpl); // can be NULL - struct llama_sampler * (*clone) (const struct llama_sampler * smpl); // can be NULL if ctx is NULL - void (*free) ( struct llama_sampler * smpl); // can be NULL if ctx is NULL - - // TODO: API for internal libllama usage for appending the sampling to an existing ggml_cgraph - //void (*apply_ggml) (struct llama_sampler * smpl, ...); - }; - - struct llama_sampler { - struct llama_sampler_i * iface; - llama_sampler_context_t ctx; - }; - - // mirror of llama_sampler_i: - LLAMA_API const char * llama_sampler_name (const struct llama_sampler * smpl); - LLAMA_API void llama_sampler_accept( struct llama_sampler * smpl, llama_token token); - LLAMA_API void llama_sampler_apply ( struct llama_sampler * smpl, llama_token_data_array * cur_p); - LLAMA_API void llama_sampler_reset ( struct llama_sampler * smpl); - LLAMA_API struct llama_sampler * llama_sampler_clone (const struct llama_sampler * smpl); - // important: do not free if the sampler has been added to a llama_sampler_chain (via llama_sampler_chain_add) - LLAMA_API void llama_sampler_free ( struct llama_sampler * smpl); - - // llama_sampler_chain - // a type of llama_sampler that can chain multiple samplers one after another - - LLAMA_API struct llama_sampler * llama_sampler_chain_init(struct llama_sampler_chain_params params); - - // important: takes ownership of the sampler object and will free it when llama_sampler_free is called - LLAMA_API void llama_sampler_chain_add( struct llama_sampler * chain, struct llama_sampler * smpl); - LLAMA_API struct llama_sampler * llama_sampler_chain_get(const struct llama_sampler * chain, int32_t i); - LLAMA_API int llama_sampler_chain_n (const struct llama_sampler * chain); - - // after removing a sampler, the chain will no longer own it, and it will not be freed when the chain is freed - LLAMA_API struct llama_sampler * llama_sampler_chain_remove( struct llama_sampler * chain, int32_t i); - - // available samplers: - - LLAMA_API struct llama_sampler * llama_sampler_init_greedy(void); - LLAMA_API struct llama_sampler * llama_sampler_init_dist (uint32_t seed); - - /// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits. - /// NOTE: Avoid using on the full vocabulary as the sorting can become slow. For example, apply top-k or top-p sampling first. - DEPRECATED(LLAMA_API struct llama_sampler * llama_sampler_init_softmax (void), - "will be removed in the future (see https://github.com/ggerganov/llama.cpp/pull/9896#discussion_r1800920915)"); - - /// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 - LLAMA_API struct llama_sampler * llama_sampler_init_top_k (int32_t k); - - /// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 - LLAMA_API struct llama_sampler * llama_sampler_init_top_p (float p, size_t min_keep); - - /// @details Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841 - LLAMA_API struct llama_sampler * llama_sampler_init_min_p (float p, size_t min_keep); - - /// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666. - LLAMA_API struct llama_sampler * llama_sampler_init_typical (float p, size_t min_keep); - - /// #details Updates the logits l_i` = l_i/t. When t <= 0.0f, the maximum logit is kept at it's original value, the rest are set to -inf - LLAMA_API struct llama_sampler * llama_sampler_init_temp (float t); - - /// @details Dynamic temperature implementation (a.k.a. entropy) described in the paper https://arxiv.org/abs/2309.02772. - LLAMA_API struct llama_sampler * llama_sampler_init_temp_ext (float t, float delta, float exponent); - - /// @details XTC sampler as described in https://github.com/oobabooga/text-generation-webui/pull/6335 - LLAMA_API struct llama_sampler * llama_sampler_init_xtc (float p, float t, size_t min_keep, uint32_t seed); - - /// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words. - /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text. - /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text. - /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates. - /// @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm. - /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal. - LLAMA_API struct llama_sampler * llama_sampler_init_mirostat( - int32_t n_vocab, - uint32_t seed, - float tau, - float eta, - int32_t m); - - /// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words. - /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text. - /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text. - /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates. - /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal. - LLAMA_API struct llama_sampler * llama_sampler_init_mirostat_v2( - uint32_t seed, - float tau, - float eta); - - LLAMA_API struct llama_sampler * llama_sampler_init_grammar( - const struct llama_model * model, - const char * grammar_str, - const char * grammar_root); - - /// NOTE: Avoid using on the full vocabulary as searching for repeated tokens can become slow. For example, apply top-k or top-p sampling first. - LLAMA_API struct llama_sampler * llama_sampler_init_penalties( - int32_t penalty_last_n, // last n tokens to penalize (0 = disable penalty, -1 = context size) - float penalty_repeat, // 1.0 = disabled - float penalty_freq, // 0.0 = disabled - float penalty_present); // 0.0 = disabled - - /// @details DRY sampler, designed by p-e-w, as described in: https://github.com/oobabooga/text-generation-webui/pull/5677, porting Koboldcpp implementation authored by pi6am: https://github.com/LostRuins/koboldcpp/pull/982 - LLAMA_API struct llama_sampler * llama_sampler_init_dry( - const struct llama_model * model, - float dry_multiplier, - float dry_base, - int32_t dry_allowed_length, - int32_t dry_penalty_last_n, - const char ** seq_breakers, - size_t num_breakers); - - LLAMA_API struct llama_sampler * llama_sampler_init_logit_bias( - int32_t n_vocab, - int32_t n_logit_bias, - const llama_logit_bias * logit_bias); - - // this sampler is meant to be used for fill-in-the-middle infilling - // it's supposed to be used after top_k + top_p sampling - // - // 1. if the sum of the EOG probs times the number of candidates is higher than the sum of the other probs -> pick EOG - // 2. combine probs of tokens that have the same prefix - // - // example: - // - // - before: - // "hel": 0.5 - // "hell": 0.2 - // "hello": 0.1 - // "dummy": 0.1 - // - // - after: - // "hel": 0.8 - // "dummy": 0.1 - // - // 3. discard non-EOG tokens with low prob - // 4. if no tokens are left -> pick EOT - // - LLAMA_API struct llama_sampler * llama_sampler_init_infill(const struct llama_model * model); - - // Returns the seed used by the sampler if applicable, LLAMA_DEFAULT_SEED otherwise - LLAMA_API uint32_t llama_sampler_get_seed(const struct llama_sampler * smpl); - - /// @details Sample and accept a token from the idx-th output of the last evaluation - // - // Shorthand for: - // const auto * logits = llama_get_logits_ith(ctx, idx); - // llama_token_data_array cur_p = { ... init from logits ... }; - // llama_sampler_apply(smpl, &cur_p); - // auto token = cur_p.data[cur_p.selected].id; - // llama_sampler_accept(smpl, token); - // return token; - // Returns the sampled token - LLAMA_API llama_token llama_sampler_sample(struct llama_sampler * smpl, struct llama_context * ctx, int32_t idx); - - // TODO: extend in the future - //LLAMA_API void llama_decode_with_sampler(struct llama_context * ctx, struct llama_sampler * smpl, struct llama_batch batch, ...); - - // - // Model split - // - - /// @details Build a split GGUF final path for this chunk. - /// llama_split_path(split_path, sizeof(split_path), "/models/ggml-model-q4_0", 2, 4) => split_path = "/models/ggml-model-q4_0-00002-of-00004.gguf" - // Returns the split_path length. - LLAMA_API int llama_split_path(char * split_path, size_t maxlen, const char * path_prefix, int split_no, int split_count); - - /// @details Extract the path prefix from the split_path if and only if the split_no and split_count match. - /// llama_split_prefix(split_prefix, 64, "/models/ggml-model-q4_0-00002-of-00004.gguf", 2, 4) => split_prefix = "/models/ggml-model-q4_0" - // Returns the split_prefix length. - LLAMA_API int llama_split_prefix(char * split_prefix, size_t maxlen, const char * split_path, int split_no, int split_count); - - // Print system information - LLAMA_API const char * llama_print_system_info(void); - - // Set callback for all future logging events. - // If this is not called, or NULL is supplied, everything is output on stderr. - LLAMA_API void llama_log_set(ggml_log_callback log_callback, void * user_data); - - // - // Performance utils - // - // NOTE: Used by llama.cpp examples, avoid using in third-party apps. Instead, do your own performance measurements. - // - - struct llama_perf_context_data { - double t_start_ms; - double t_load_ms; - double t_p_eval_ms; - double t_eval_ms; - - int32_t n_p_eval; - int32_t n_eval; - }; - - struct llama_perf_sampler_data { - double t_sample_ms; - - int32_t n_sample; - }; - - LLAMA_API struct llama_perf_context_data llama_perf_context (const struct llama_context * ctx); - LLAMA_API void llama_perf_context_print(const struct llama_context * ctx); - LLAMA_API void llama_perf_context_reset( struct llama_context * ctx); - - // NOTE: the following work only with samplers constructed via llama_sampler_chain_init - LLAMA_API struct llama_perf_sampler_data llama_perf_sampler (const struct llama_sampler * chain); - LLAMA_API void llama_perf_sampler_print(const struct llama_sampler * chain); - LLAMA_API void llama_perf_sampler_reset( struct llama_sampler * chain); - -#ifdef __cplusplus -} -#endif - -#endif // LLAMA_H diff --git a/vall_e.cpp/include/llama_hack.h b/vall_e.cpp/include/llama_hack.h index e1a07cd..eb1b7bd 100644 --- a/vall_e.cpp/include/llama_hack.h +++ b/vall_e.cpp/include/llama_hack.h @@ -8,14 +8,20 @@ #define LLAMA_MAX_LAYERS 512 #define LLAMA_MAX_EXPERTS 160 // DeepSeekV2 -enum e_model { - MODEL_UNKNOWN, +enum llm_type { + LLM_TYPE_UNKNOWN, }; enum llm_arch { LLM_ARCH_UNKNOWN, }; +enum llama_expert_gating_func_type { + LLAMA_EXPERT_GATING_FUNC_TYPE_NONE = 0, + LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX = 1, + LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID = 2, +}; + struct llama_hparams_posnet { uint32_t n_embd; uint32_t n_layer; @@ -28,116 +34,148 @@ struct llama_hparams_convnext { struct llama_hparams { bool vocab_only; - bool rope_finetuned; - bool use_par_res; - bool swin_norm; + bool rope_finetuned; + bool use_par_res; + bool swin_norm; - uint32_t n_vocab = 0; - uint32_t n_ctx_train; // context size the model was trained on - uint32_t n_embd; - uint32_t n_embd_features = 0; - uint32_t n_layer; - uint32_t n_rot; - uint32_t n_swa = 0; // sliding window attention (SWA) - uint32_t n_embd_head_k; // dimension of keys (d_k). d_q is assumed to be the same, but there are n_head q heads, and only n_head_kv k-v heads - uint32_t n_embd_head_v; // dimension of values (d_v) aka n_embd_head - uint32_t n_expert = 0; - uint32_t n_expert_used = 0; - uint32_t n_vocab_type = 0; // for BERT-style token types - uint32_t n_rel_attn_bkts = 0; + uint32_t n_ctx_train; // context size the model was trained on + uint32_t n_embd; + uint32_t n_embd_features = 0; + uint32_t n_layer; + uint32_t n_rot; + uint32_t n_swa = 0; // sliding window attention (SWA) + uint32_t n_swa_pattern = 1; // by default, all layers use non-sliding-window attention + uint32_t n_embd_head_k; // dimension of keys (d_k). d_q is assumed to be the same, but there are n_head q heads, and only n_head_kv k-v heads + uint32_t n_embd_head_v; // dimension of values (d_v) aka n_embd_head + uint32_t n_expert = 0; + uint32_t n_expert_used = 0; + uint32_t n_rel_attn_bkts = 0; - // for WavTokenizer - struct llama_hparams_posnet posnet; - struct llama_hparams_convnext convnext; + // for WavTokenizer + struct llama_hparams_posnet posnet; + struct llama_hparams_convnext convnext; - std::array n_head_arr; - std::array n_head_kv_arr; - std::array n_ff_arr; + std::array n_head_arr; + std::array n_head_kv_arr; + std::array n_ff_arr; - uint32_t n_layer_dense_lead = 0; - uint32_t n_lora_q = 0; - uint32_t n_lora_kv = 0; - uint32_t n_ff_exp = 0; - uint32_t n_ff_shexp = 0; - uint32_t n_expert_shared = 0; - float expert_weights_scale = 0.0; + uint32_t n_layer_dense_lead = 0; + uint32_t n_lora_q = 0; + uint32_t n_lora_kv = 0; + uint32_t n_ff_exp = 0; + uint32_t n_ff_shexp = 0; + uint32_t n_expert_shared = 0; + uint32_t n_norm_groups = 0; - float f_norm_eps; - float f_norm_rms_eps; - float f_norm_group_eps; + float expert_weights_scale = 0.0; + bool expert_weights_norm = false; + uint32_t expert_gating_func = LLAMA_EXPERT_GATING_FUNC_TYPE_NONE; - uint32_t n_norm_groups; + float f_norm_eps; + float f_norm_rms_eps; + float f_norm_group_eps; - float f_attn_logit_softcapping = 50.0f; - float f_final_logit_softcapping = 30.0f; + float f_attn_logit_softcapping = 50.0f; + float f_final_logit_softcapping = 30.0f; - // for RWKV - uint32_t rescale_every_n_layers = 0; - uint32_t time_mix_extra_dim = 0; - uint32_t time_decay_extra_dim = 0; - uint32_t wkv_head_size = 0; + // for RWKV + uint32_t rescale_every_n_layers = 0; + uint32_t time_mix_extra_dim = 0; + uint32_t time_decay_extra_dim = 0; + uint32_t wkv_head_size = 0; + uint32_t token_shift_count = 2; + uint32_t n_lora_decay = 0; + uint32_t n_lora_iclr = 0; + uint32_t n_lora_value_res_mix = 0; + uint32_t n_lora_gate = 0; - float rope_attn_factor = 1.0f; - float rope_freq_base_train; - float rope_freq_scale_train; - uint32_t n_ctx_orig_yarn; - float rope_yarn_log_mul; - int rope_sections[4]; + float rope_attn_factor = 1.0f; + float rope_freq_base_train; + float rope_freq_base_train_swa; + float rope_freq_scale_train; + float rope_freq_scale_train_swa; + uint32_t n_ctx_orig_yarn; + float rope_yarn_log_mul; - // for State Space Models - uint32_t ssm_d_conv = 0; - uint32_t ssm_d_inner = 0; - uint32_t ssm_d_state = 0; - uint32_t ssm_dt_rank = 0; - bool ssm_dt_b_c_rms = false; + std::array rope_sections; - float f_clamp_kqv = 0.0f; - float f_max_alibi_bias = 0.0f; - float f_logit_scale = 0.0f; + // for State Space Models + uint32_t ssm_d_conv = 0; + uint32_t ssm_d_inner = 0; + uint32_t ssm_d_state = 0; + uint32_t ssm_dt_rank = 0; - // Additional scale factors (Granite/Granite MoE) - float f_residual_scale = 0.0f; - float f_embedding_scale = 0.0f; - float f_attention_scale = 0.0f; + bool ssm_dt_b_c_rms = false; - bool causal_attn = true; - bool use_alibi = false; - bool attn_soft_cap = false; + float f_clamp_kqv = 0.0f; + float f_max_alibi_bias = 0.0f; + float f_logit_scale = 0.0f; - // needed by encoder-decoder models (e.g. T5, FLAN-T5) - // ref: https://github.com/ggerganov/llama.cpp/pull/8141 - llama_token dec_start_token_id = LLAMA_TOKEN_NULL; + // Additional scale factors (Granite/Granite MoE) + float f_residual_scale = 0.0f; + float f_embedding_scale = 0.0f; + float f_attention_scale = 0.0f; - enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_NONE; - enum llama_rope_type rope_type = LLAMA_ROPE_TYPE_NONE; - enum llama_rope_scaling_type rope_scaling_type_train = LLAMA_ROPE_SCALING_TYPE_NONE; + bool causal_attn = true; + bool use_alibi = false; + bool attn_soft_cap = false; + + // needed by encoder-decoder models (e.g. T5, FLAN-T5) + // ref: https://github.com/ggerganov/llama.cpp/pull/8141 + llama_token dec_start_token_id = LLAMA_TOKEN_NULL; + + enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_NONE; + enum llama_rope_type rope_type = LLAMA_ROPE_TYPE_NONE; + enum llama_rope_scaling_type rope_scaling_type_train = LLAMA_ROPE_SCALING_TYPE_NONE; + + uint32_t n_head(uint32_t il = 0) const; + + uint32_t n_head_kv(uint32_t il = 0) const; + + uint32_t n_ff(uint32_t il = 0) const; + + uint32_t n_gqa(uint32_t il = 0) const; + + // dimension of key embeddings across all k-v heads + uint32_t n_embd_k_gqa(uint32_t il = 0) const; + + // dimension of value embeddings across all k-v heads + uint32_t n_embd_v_gqa(uint32_t il = 0) const; + + // dimension of the rolling state embeddings + // corresponds to Mamba's conv_states size or RWKV's token_shift states size + uint32_t n_embd_k_s() const; + + // dimension of the recurrent state embeddings + uint32_t n_embd_v_s() const; + + bool is_swa(uint32_t il) const; }; struct llama_model { - e_model type = MODEL_UNKNOWN; - llm_arch arch = LLM_ARCH_UNKNOWN; - llama_ftype ftype = LLAMA_FTYPE_ALL_F32; + llm_type type = LLM_TYPE_UNKNOWN; + llm_arch arch = LLM_ARCH_UNKNOWN; - std::string name = "n/a"; + std::string name = "n/a"; - llama_hparams hparams = {}; - llama_vocab vocab; + llama_hparams hparams = {}; + llama_vocab vocab; - struct ggml_tensor * tok_embd = nullptr; - struct ggml_tensor * type_embd = nullptr; - struct ggml_tensor * pos_embd = nullptr; - struct ggml_tensor * tok_norm = nullptr; + struct ggml_tensor * tok_embd = nullptr; + struct ggml_tensor * type_embd = nullptr; + struct ggml_tensor * pos_embd = nullptr; + struct ggml_tensor * tok_norm = nullptr; struct ggml_tensor * tok_norm_b = nullptr; - struct ggml_tensor * output_norm = nullptr; - struct ggml_tensor * output_norm_b = nullptr; - struct ggml_tensor * output = nullptr; - struct ggml_tensor * output_b = nullptr; + struct ggml_tensor * output_norm = nullptr; + struct ggml_tensor * output_norm_b = nullptr; + struct ggml_tensor * output = nullptr; + struct ggml_tensor * output_b = nullptr; struct ggml_tensor * output_norm_enc = nullptr; // classifier - struct ggml_tensor * cls = nullptr; - struct ggml_tensor * cls_b = nullptr; + struct ggml_tensor * cls = nullptr; + struct ggml_tensor * cls_b = nullptr; struct ggml_tensor * cls_out = nullptr; struct ggml_tensor * cls_out_b = nullptr; @@ -145,6 +183,161 @@ struct llama_model { struct ggml_tensor * conv1d_b = nullptr; }; +struct llama_vocab_hack { + struct token_data { + std::string text; + float score; + llama_token_attr attr; + }; + + llama_vocab_hack(); + ~llama_vocab_hack(); + + void load(llama_model_loader & ml, const LLM_KV & kv); + + enum llama_vocab_type get_type() const; + enum llama_vocab_pre_type get_pre_type() const; + + uint32_t n_tokens() const; + uint32_t n_token_types() const; + + std::string type_name() const; + + bool is_normal (llama_token id) const; + bool is_unknown (llama_token id) const; + bool is_control (llama_token id) const; + bool is_byte (llama_token id) const; + bool is_user_defined(llama_token id) const; + bool is_unused (llama_token id) const; + bool is_eog (llama_token id) const; + + uint8_t token_to_byte(llama_token id) const; + llama_token byte_to_token(uint8_t ch) const; + + llama_token text_to_token(const std::string & text) const; + + const token_data & get_token_data(llama_token id) const; + + const char * token_get_text (llama_token id) const; + float token_get_score(llama_token id) const; + llama_token_attr token_get_attr (llama_token id) const; + + llama_token token_bos() const; + llama_token token_eos() const; + llama_token token_eot() const; + llama_token token_eom() const; + llama_token token_unk() const; + llama_token token_sep() const; + llama_token token_nl () const; + llama_token token_pad() const; + + llama_token token_prefix() const; + llama_token token_middle() const; + llama_token token_suffix() const; + + llama_token token_fim_pre() const; + llama_token token_fim_suf() const; + llama_token token_fim_mid() const; + llama_token token_fim_pad() const; + llama_token token_fim_rep() const; + llama_token token_fim_sep() const; + + bool get_add_space_prefix () const; + bool get_add_bos () const; + bool get_add_eos () const; + bool get_ignore_merges () const; + bool get_clean_spaces () const; + bool get_remove_extra_whitespaces () const; + bool get_escape_whitespaces () const; + bool get_treat_whitespace_as_suffix() const; + + int max_token_len() const; + + int find_bpe_rank(const std::string & token_left, const std::string & token_right) const; + + int32_t tokenize( + const char * text, + int32_t text_len, + llama_token * tokens, + int32_t n_tokens_max, + bool add_special, + bool parse_special) const; + + std::vector tokenize( + const std::string & raw_text, + bool add_special, + bool parse_special = false) const; + + // does not write null-terminator to buf + int32_t token_to_piece( + llama_token token, + char * buf, + int32_t length, + int32_t lstrip, + bool special) const; + + // use cached data + const std::string & token_to_piece(llama_token token) const; + + int32_t detokenize( + const llama_token * tokens, + int32_t n_tokens, + char * text, + int32_t text_len_max, + bool remove_special, + bool unparse_special) const; + + std::string detokenize( + const std::vector & tokens, + bool special) const; + + void print_info() const; + + struct impl { + uint32_t n_token_types = 0; // for BERT-style token types + + enum llama_vocab_type type = LLAMA_VOCAB_TYPE_SPM; + enum llama_vocab_pre_type pre_type = LLAMA_VOCAB_PRE_TYPE_DEFAULT; + + int max_token_len = 0; // used for optimizing longest token search + + // default LLaMA special tokens + // TODO: should we set all of these to LLAMA_TOKEN_NULL? + llama_token special_bos_id = 1; + llama_token special_eos_id = 2; + llama_token special_eot_id = LLAMA_TOKEN_NULL; + llama_token special_eom_id = LLAMA_TOKEN_NULL; + llama_token special_unk_id = 0; + llama_token special_sep_id = LLAMA_TOKEN_NULL; + llama_token special_pad_id = LLAMA_TOKEN_NULL; + llama_token special_mask_id = LLAMA_TOKEN_NULL; + + llama_token linefeed_id = 13; + + // fim tokens + llama_token special_fim_pre_id = LLAMA_TOKEN_NULL; + llama_token special_fim_suf_id = LLAMA_TOKEN_NULL; + llama_token special_fim_mid_id = LLAMA_TOKEN_NULL; + llama_token special_fim_pad_id = LLAMA_TOKEN_NULL; + llama_token special_fim_rep_id = LLAMA_TOKEN_NULL; // repo + llama_token special_fim_sep_id = LLAMA_TOKEN_NULL; // file separator + + // tokenizer flags + bool add_space_prefix = false; + bool add_bos = false; + bool add_eos = false; + bool ignore_merges = false; + bool clean_spaces = false; // clean_up_tokenization_spaces + bool remove_extra_whitespaces = false; + bool escape_whitespaces = true; + bool treat_whitespace_as_suffix = false; + + std::unordered_map token_to_id; + std::vector id_to_token; + }; + std::unique_ptr pimpl; +}; + /* BEGIN VALL-E SPECIFIC HELPERS */ struct ggml_tensor * llama_get_embedding_weights(struct llama_model * model) { return model->tok_embd; @@ -156,7 +349,9 @@ void llama_set_output_head(struct llama_model * model, struct ggml_tensor* tenso // set the output tensor model->output = tensor; // required to properly output logits - *const_cast(&model->hparams.n_vocab) = tensor->ne[1]; + llama_vocab_hack* vocab = (llama_vocab_hack*) const_cast(llama_model_get_vocab( model )); + vocab->pimpl->id_to_token.resize( tensor->ne[1] ); + // *const_cast(&model->hparams.n_vocab) = tensor->ne[1]; } /* END VALL-E SPECIFIC HELPERS */ diff --git a/vall_e.cpp/vall_e.cpp b/vall_e.cpp/vall_e.cpp index 7a636fd..e30cf16 100644 --- a/vall_e.cpp/vall_e.cpp +++ b/vall_e.cpp/vall_e.cpp @@ -141,8 +141,9 @@ int32_t vall_e_inputs_map_get_classifier_idx( io_map_t& io_map, const std::strin } void vall_e_inputs_map_init( io_map_t& io_map, llama_model* model ) { - auto n_embd = llama_n_embd( model ); - auto n_vocab = llama_n_vocab( model ); + auto vocab = llama_model_get_vocab( model ); + auto n_embd = llama_model_n_embd( model ); + auto n_vocab = llama_vocab_n_tokens( vocab ); io_map.n_embd = n_embd; io_map.n_vocab = n_vocab; @@ -543,10 +544,14 @@ std::vector generate( vall_e_context_t* ctx, vall_e_inputs_t& inputs, i sparams.no_perf = false; llama_sampler * smpl = llama_sampler_chain_init(sparams); - llama_sampler_chain_add(smpl, llama_sampler_init_top_k(0)); - llama_sampler_chain_add(smpl, llama_sampler_init_top_p(1.0, 1)); - llama_sampler_chain_add(smpl, llama_sampler_init_temp (1.0)); - llama_sampler_chain_add(smpl, llama_sampler_init_dist (LLAMA_DEFAULT_SEED)); + if ( mode == INFERENCE_MODE_LEN ) { + llama_sampler_chain_add(smpl, llama_sampler_init_greedy()); + } else { + llama_sampler_chain_add(smpl, llama_sampler_init_top_k(0)); + llama_sampler_chain_add(smpl, llama_sampler_init_top_p(1.0, 1)); + llama_sampler_chain_add(smpl, llama_sampler_init_temp (1.0)); + llama_sampler_chain_add(smpl, llama_sampler_init_dist (LLAMA_DEFAULT_SEED)); + } output_tokens.reserve(max_tokens); while ( output_tokens.size() < max_tokens ) { @@ -554,7 +559,7 @@ std::vector generate( vall_e_context_t* ctx, vall_e_inputs_t& inputs, i fprintf(stderr, "%s : failed to eval, return code %d\n", __func__, 1); return output_tokens; } - llama_kv_cache_clear(ctx->llama.ctx); // necessary for many reasons + llama_kv_self_clear(ctx->llama.ctx); // necessary for many reasons // sample token auto t = llama_sampler_sample(smpl, ctx->llama.ctx, -1); @@ -650,7 +655,7 @@ std::vector generate( vall_e_context_t* ctx, vall_e_inputs_t& inputs, i fprintf(stderr, "%s : failed to eval, return code %d\n", __func__, 1); return output_tokens; } - llama_kv_cache_clear(ctx->llama.ctx); // necessary for many reasons + llama_kv_self_clear(ctx->llama.ctx); // necessary for many reasons // copy null probabilities std::vector null_logits(n_outputs * n_vocab, 0.0f); memcpy( null_logits.data(), llama_get_logits( ctx->llama.ctx ), sizeof(float) * n_vocab * n_outputs ); @@ -660,7 +665,7 @@ std::vector generate( vall_e_context_t* ctx, vall_e_inputs_t& inputs, i fprintf(stderr, "%s : failed to eval, return code %d\n", __func__, 1); return output_tokens; } - llama_kv_cache_clear(ctx->llama.ctx); // necessary for many reasons + llama_kv_self_clear(ctx->llama.ctx); // necessary for many reasons auto sparams = llama_sampler_chain_default_params(); sparams.no_perf = false; @@ -709,7 +714,7 @@ std::vector generate( vall_e_context_t* ctx, vall_e_inputs_t& inputs, i fprintf(stderr, "%s : failed to eval, return code %d\n", __func__, 1); return output_tokens; } - llama_kv_cache_clear(ctx->llama.ctx); // necessary for many reasons + llama_kv_self_clear(ctx->llama.ctx); // necessary for many reasons auto sparams = llama_sampler_chain_default_params(); sparams.no_perf = false; @@ -837,7 +842,7 @@ void vall_e_print_usage( char** argv, vall_e_context_params_t& params, vall_e_ar fprintf(stderr, " -l TEXT, --language TEXT\n"); fprintf(stderr, " Language for input text / output response (default: %s)\n", args.language.c_str()); fprintf(stderr, " -ts TASK, --task TASK\n"); - fprintf(stderr, " Inferencing task (default: %s, accepts ['tts', 'stt', 'ns', 'sr'])\n", args.task); + fprintf(stderr, " Inferencing task (default: %s, accepts ['tts', 'stt', 'ns', 'sr'])\n", args.task.c_str()); fprintf(stderr, " -mode MODE, --modality MODE\n"); fprintf(stderr, " Modality for inferencing (default: %s, accepts ['ar+nar', 'nar-len'])\n", args.modality == MODALITY_NAR_LEN ? "nar-len" : "ar+nar"); fprintf(stderr, " -ms N, --max-steps N\n"); @@ -873,7 +878,7 @@ bool vall_e_args_parse( int argc, char** argv, vall_e_context_params_t& params, } else if (arg == "-ts" || arg == "--task") { args.task = argv[++i]; } else if (arg == "-mode" || arg == "--modality") { - args.modality = argv[++i] == "ar+nar" ? MODALITY_AR_NAR : MODALITY_NAR_LEN; + args.modality = std::string(argv[++i]) == "ar+nar" ? MODALITY_AR_NAR : MODALITY_NAR_LEN; } else if (arg == "-ms" || arg == "--max-steps") { args.max_steps = std::stoi(argv[++i]); } else if (arg == "-md" || arg == "--max-duration") { @@ -908,7 +913,7 @@ vall_e_context_t* vall_e_load( const vall_e_context_params_t& params ) { llama_model_params model_params = llama_model_default_params(); model_params.n_gpu_layers = params.gpu_layers; - ctx->llama.model = llama_load_model_from_file(params.model_path.c_str(), model_params); + ctx->llama.model = llama_model_load_from_file(params.model_path.c_str(), model_params); if ( !ctx->llama.model ) { fprintf(stderr , "%s: error: unable to load model\n" , __func__); return ctx; @@ -924,7 +929,7 @@ vall_e_context_t* vall_e_load( const vall_e_context_params_t& params ) { ctx_params.no_perf = false; ctx_params.attention_type = LLAMA_ATTENTION_TYPE_CAUSAL; - ctx->llama.ctx = llama_new_context_with_model(ctx->llama.model, ctx_params); + ctx->llama.ctx = llama_init_from_model(ctx->llama.model, ctx_params); if ( !ctx->llama.ctx ) { fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__); return ctx; @@ -1022,7 +1027,7 @@ void vall_e_free( vall_e_context_t* ctx ) { espeak_Terminate(); encodec_free(ctx->encodec.ctx); llama_free(ctx->llama.ctx); - llama_free_model(ctx->llama.model); + llama_model_free(ctx->llama.model); ggml_free(ctx->io_map->ctx); delete ctx->io_map; delete ctx; diff --git a/vall_e/emb/qnt.py b/vall_e/emb/qnt.py index 845c0b1..0248fea 100755 --- a/vall_e/emb/qnt.py +++ b/vall_e/emb/qnt.py @@ -21,29 +21,34 @@ from tqdm import tqdm from torch.nn.utils.rnn import pad_sequence AVAILABLE_AUDIO_BACKENDS = [] +ERRORED_BACKENDS = {} try: from .codecs.encodec import * AVAILABLE_AUDIO_BACKENDS.append("encodec") except Exception as e: _logger.warning(str(e)) + ERRORED_BACKENDS["encodec"] = e try: from .codecs.vocos import * AVAILABLE_AUDIO_BACKENDS.append("vocos") except Exception as e: _logger.warning(str(e)) + ERRORED_BACKENDS["vocos"] = e try: from .codecs.dac import * AVAILABLE_AUDIO_BACKENDS.append("dac") except Exception as e: _logger.warning(str(e)) + ERRORED_BACKENDS["dac"] = e try: from .codecs.nemo import * AVAILABLE_AUDIO_BACKENDS.append("nemo") except Exception as e: _logger.warning(str(e)) + ERRORED_BACKENDS["nemo"] = e @cache def _load_encodec_model(device="cuda", dtype=None, levels=0): @@ -152,6 +157,9 @@ def _load_model(device="cuda", backend=None, dtype=None): if not backend: backend = cfg.audio_backend + if ERRORED_BACKENDS[backend]: + raise ERRORED_BACKENDS[backend] + if cfg.inference.amp: dtype = None diff --git a/vall_e/export.py b/vall_e/export.py index 3508984..1b075b4 100644 --- a/vall_e/export.py +++ b/vall_e/export.py @@ -330,7 +330,7 @@ def main(): callback = None if args.hf: - callback = convert_to_hf_custom + callback = convert_to_hf_llama elif args.export_lora: callback = extract_lora elif args.split_classifiers: