Skip to content

Instantly share code, notes, and snippets.

@phronmophobic
Created September 24, 2024 21:34
Show Gist options
  • Select an option

  • Save phronmophobic/ee367a0e846868b13be41e7e5af82d49 to your computer and use it in GitHub Desktop.

Select an option

Save phronmophobic/ee367a0e846868b13be41e7e5af82d49 to your computer and use it in GitHub Desktop.
Large interface
[[:Struct_33023E32 [{:n-elems 1, :datatype :int32, :name :m} {:n-elems 1, :datatype :int32, :name :n_iter} {:n-elems 1, :datatype :int32, :name :max_linesearch} {:n-elems 1, :datatype :float32, :name :eps} {:n-elems 1, :datatype :float32, :name :ftol} {:n-elems 1, :datatype :float32, :name :wolfe} {:n-elems 1, :datatype :float32, :name :min_step} {:n-elems 1, :datatype :float32, :name :max_step} {:n-elems 1, :datatype :int32, :name :linesearch}]] [:ggml_scratch [{:n-elems 1, :datatype :int64, :name :offs} {:n-elems 1, :datatype :int64, :name :size} {:n-elems 1, :datatype :pointer, :name :data}]] [:ggml_cplan [{:n-elems 1, :datatype :int64, :name :work_size} {:n-elems 1, :datatype :pointer, :name :work_data} {:n-elems 1, :datatype :int32, :name :n_threads} {:n-elems 1, :datatype :pointer, :name :abort_callback} {:n-elems 1, :datatype :pointer, :name :abort_callback_data}]] [:Struct_B6B81675 [{:n-elems 1, :datatype :int32, :name :n_iter} {:n-elems 1, :datatype :float32, :name :sched} {:n-elems 1, :datatype :float32, :name :decay} {:n-elems 1, :datatype :int32, :name :decay_min_ndim} {:n-elems 1, :datatype :float32, :name :alpha} {:n-elems 1, :datatype :float32, :name :beta1} {:n-elems 1, :datatype :float32, :name :beta2} {:n-elems 1, :datatype :float32, :name :eps} {:n-elems 1, :datatype :float32, :name :eps_f} {:n-elems 1, :datatype :float32, :name :eps_g} {:n-elems 1, :datatype :float32, :name :gclip}]] [:ggml_bf16_t [{:n-elems 1, :datatype :int16, :name :bits}]] [:ggml_type_traits_t [{:n-elems 1, :datatype :pointer, :name :type_name} {:n-elems 1, :datatype :int32, :name :blck_size} {:n-elems 1, :datatype :int64, :name :type_size} {:n-elems 1, :datatype :int8, :name :is_quantized} {:n-elems 1, :datatype :pointer, :name :to_float} {:n-elems 1, :datatype :pointer, :name :from_float} {:n-elems 1, :datatype :pointer, :name :from_float_reference} {:n-elems 1, :datatype :pointer, :name :vec_dot} {:n-elems 1, :datatype :int32, :name :vec_dot_type} {:n-elems 1, :datatype :int64, :name :nrows}]] [:ggml_init_params [{:n-elems 1, :datatype :int64, :name :mem_size} {:n-elems 1, :datatype :pointer, :name :mem_buffer} {:n-elems 1, :datatype :int8, :name :no_alloc}]] [:ggml_vk_device [{:n-elems 1, :datatype :int32, :name :index} {:n-elems 1, :datatype :int32, :name :type} {:n-elems 1, :datatype :int64, :name :heapSize} {:n-elems 1, :datatype :pointer, :name :name} {:n-elems 1, :datatype :pointer, :name :vendor} {:n-elems 1, :datatype :int32, :name :subgroupSize} {:n-elems 1, :datatype :int64, :name :bufferAlignment} {:n-elems 1, :datatype :int64, :name :maxAlloc}]] [:ggml_object [{:n-elems 1, :datatype :int64, :name :offs} {:n-elems 1, :datatype :int64, :name :size} {:n-elems 1, :datatype :pointer, :name :next} {:n-elems 1, :datatype :int32, :name :type} {:n-elems 4, :datatype :int8, :name :padding}]] [:ggml_backend_graph_copy [{:n-elems 1, :datatype :pointer, :name :buffer} {:n-elems 1, :datatype :pointer, :name :ctx_allocated} {:n-elems 1, :datatype :pointer, :name :ctx_unallocated} {:n-elems 1, :datatype :pointer, :name :graph}]] [:ggml_tensor [{:n-elems 1, :datatype :int32, :name :type} {:n-elems 1, :datatype :int32, :name :backend} {:n-elems 1, :datatype :pointer, :name :buffer} {:n-elems 4, :datatype :int64, :name :ne} {:n-elems 4, :datatype :int64, :name :nb} {:n-elems 1, :datatype :int32, :name :op} {:n-elems 16, :datatype :int32, :name :op_params} {:n-elems 1, :datatype :int32, :name :flags} {:n-elems 1, :datatype :pointer, :name :grad} {:n-elems 10, :datatype :int64, :name :src} {:n-elems 1, :datatype :pointer, :name :view_src} {:n-elems 1, :datatype :int64, :name :view_offs} {:n-elems 1, :datatype :pointer, :name :data} {:n-elems 64, :datatype :int8, :name :name} {:n-elems 1, :datatype :pointer, :name :extra}]] [:ggml_opt_params [{:n-elems 1, :datatype :int32, :name :type} {:n-elems 1, :datatype :int64, :name :graph_size} {:n-elems 1, :datatype :int32, :name :n_threads} {:n-elems 1, :datatype :int32, :name :past} {:n-elems 1, :datatype :float32, :name :delta} {:n-elems 1, :datatype :int32, :name :max_no_improvement} {:n-elems 1, :datatype :int8, :name :print_forward_graph} {:n-elems 1, :datatype :int8, :name :print_backward_graph} {:n-elems 1, :datatype :int32, :name :n_gradient_accumulation} {:n-elems 1, :datatype :Struct_B6B81675, :name :adam} {:n-elems 1, :datatype :Struct_33023E32, :name :lbfgs}]] [:ggml_tallocr [{:n-elems 1, :datatype :pointer, :name :buffer} {:n-elems 1, :datatype :pointer, :name :base} {:n-elems 1, :datatype :int64, :name :alignment} {:n-elems 1, :datatype :int64, :name :offset}]] [:Struct_A696EC15 [{:n-elems 1, :datatype :pointer, :name :x} {:n-elems 1, :datatype :pointer, :name :xp} {:n-elems 1, :datatype :pointer, :name :g} {:n-elems 1, :datatype :pointer, :name :gp} {:n-elems 1, :datatype :pointer, :name :d} {:n-elems 1, :datatype :pointer, :name :pf} {:n-elems 1, :datatype :pointer, :name :lmal} {:n-elems 1, :datatype :pointer, :name :lmys} {:n-elems 1, :datatype :pointer, :name :lms} {:n-elems 1, :datatype :pointer, :name :lmy} {:n-elems 1, :datatype :float32, :name :fx_best} {:n-elems 1, :datatype :float32, :name :step} {:n-elems 1, :datatype :int32, :name :j} {:n-elems 1, :datatype :int32, :name :k} {:n-elems 1, :datatype :int32, :name :end} {:n-elems 1, :datatype :int32, :name :n_no_improvement}]] [:Struct_A6B1AD3E [{:n-elems 1, :datatype :pointer, :name :g} {:n-elems 1, :datatype :pointer, :name :m} {:n-elems 1, :datatype :pointer, :name :v} {:n-elems 1, :datatype :pointer, :name :pf} {:n-elems 1, :datatype :float32, :name :fx_best} {:n-elems 1, :datatype :float32, :name :fx_prev} {:n-elems 1, :datatype :int32, :name :n_no_improvement}]] [:ggml_opt_context [{:n-elems 1, :datatype :pointer, :name :ctx} {:n-elems 1, :datatype :ggml_opt_params, :name :params} {:n-elems 1, :datatype :int32, :name :iter} {:n-elems 1, :datatype :int64, :name :nx} {:n-elems 1, :datatype :int8, :name :just_initialized} {:n-elems 1, :datatype :float32, :name :loss_before} {:n-elems 1, :datatype :float32, :name :loss_after} {:n-elems 1, :datatype :Struct_A6B1AD3E, :name :adam} {:n-elems 1, :datatype :Struct_A696EC15, :name :lbfgs}]] [:ggml_hash_set [{:n-elems 1, :datatype :int64, :name :size} {:n-elems 1, :datatype :pointer, :name :keys}]] [:ggml_cgraph [{:n-elems 1, :datatype :int32, :name :size} {:n-elems 1, :datatype :int32, :name :n_nodes} {:n-elems 1, :datatype :int32, :name :n_leafs} {:n-elems 1, :datatype :pointer, :name :nodes} {:n-elems 1, :datatype :pointer, :name :grads} {:n-elems 1, :datatype :pointer, :name :leafs} {:n-elems 1, :datatype :ggml_hash_set, :name :visited_hash_table} {:n-elems 1, :datatype :int32, :name :order}]]]
{:ggml_backend_reg_find_by_name {:rettype :int64, :argtypes [[name :pointer?]], :doc "unsigned long (const char * name)\n"}, :ggml_soft_max_ext {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [mask :pointer?] [scale :float32] [max_bias :float32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * mask, float scale, float max_bias)\n"}, :ggml_is_quantized {:rettype :int8, :argtypes [[type :int32]], :doc "_Bool (enum ggml_type type)\n"}, :ggml_conv_1d {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [s0 :int32] [p0 :int32] [d0 :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int s0, int p0, int d0)\n"}, :ggml_backend_event_synchronize {:rettype :void, :argtypes [[event :pointer?]], :doc "void (struct ggml_backend_event * event)\n"}, :ggml_backend_cuda_get_device_count {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_backend_event_free {:rettype :void, :argtypes [[event :pointer?]], :doc "void (struct ggml_backend_event * event)\n"}, :ggml_unary {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [op :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, enum ggml_unary_op op)\n"}, :ggml_gallocr_reserve {:rettype :int8, :argtypes [[galloc :pointer?] [graph :pointer?]], :doc "_Bool (struct ggml_gallocr * galloc, struct ggml_cgraph * graph)\n"}, :ggml_format_name {:rettype :pointer?, :argtypes [[tensor :pointer?] [fmt :pointer?]], :doc "struct ggml_tensor * (struct ggml_tensor * tensor, const char * fmt)\n"}, :ggml_rope_ext_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [c :pointer?] [n_dims :int32] [mode :int32] [n_ctx_orig :int32] [freq_base :float32] [freq_scale :float32] [ext_factor :float32] [attn_factor :float32] [beta_fast :float32] [beta_slow :float32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_tensor * c, int n_dims, int mode, int n_ctx_orig, float freq_base, float freq_scale, float ext_factor, float attn_factor, float beta_fast, float beta_slow)\n"}, :ggml_fp16_to_fp32_row {:rettype :void, :argtypes [[__unnamed_arg_0 :pointer?] [__unnamed_arg_1 :pointer?] [__unnamed_arg_2 :int64]], :doc "void (const unsigned short * , float * , long long )\n"}, :ggml_backend_reg_get_name {:rettype :pointer?, :argtypes [[i :int64]], :doc "const char * (unsigned long i)\n"}, :ggml_map_custom2_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [fun :pointer?] [n_tasks :int32] [userdata :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, void (*)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *, int, int, void *) fun, int n_tasks, void * userdata)\n"}, :ggml_backend_sycl_split_buffer_type {:rettype :pointer?, :argtypes [[tensor_split :pointer?]], :doc "struct ggml_backend_buffer_type * (const float * tensor_split)\n"}, :ggml_status_to_string {:rettype :pointer?, :argtypes [[status :int32]], :doc "const char * (enum ggml_status status)\n"}, :ggml_hardswish {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_graph_dump_dot {:rettype :void, :argtypes [[gb :pointer?] [gf :pointer?] [filename :pointer?]], :doc "void (const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename)\n"}, :ggml_is_scalar {:rettype :int8, :argtypes [[tensor :pointer?]], :doc "_Bool (const struct ggml_tensor * tensor)\n"}, :ggml_set_1d_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [offset :int64]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, unsigned long offset)\n"}, :ggml_backend_sycl_buffer_type {:rettype :pointer?, :argtypes [[device :int32]], :doc "struct ggml_backend_buffer_type * (int device)\n"}, :ggml_graph_dup {:rettype :pointer?, :argtypes [[ctx :pointer?] [cgraph :pointer?]], :doc "struct ggml_cgraph * (struct ggml_context * ctx, struct ggml_cgraph * cgraph)\n"}, :ggml_get_i32_1d {:rettype :int32, :argtypes [[tensor :pointer?] [i :int32]], :doc "int (const struct ggml_tensor * tensor, int i)\n"}, :ggml_elu_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_conv_2d_sk_p0 {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b)\n"}, :ggml_cpu_has_avx_vnni {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_init {:rettype :pointer?, :argtypes [[params (by-value :ggml_init_params)]], :doc "struct ggml_context * (struct ggml_init_params params)\n"}, :ggml_opt_resume_g {:rettype :int32, :argtypes [[ctx :pointer?] [opt :pointer?] [f :pointer?] [gf :pointer?] [gb :pointer?] [callback :pointer?] [callback_data :pointer?]], :doc "enum ggml_opt_result (struct ggml_context * ctx, struct ggml_opt_context * opt, struct ggml_tensor * f, struct ggml_cgraph * gf, struct ggml_cgraph * gb, void (*)(void *, int, float *, _Bool *) callback, void * callback_data)\n"}, :ggml_bf16_to_fp32_row {:rettype :void, :argtypes [[__unnamed_arg_0 :pointer?] [__unnamed_arg_1 :pointer?] [__unnamed_arg_2 :int64]], :doc "void (const ggml_bf16_t * , float * , long long )\n"}, :ggml_type_size {:rettype :int64, :argtypes [[type :int32]], :doc "unsigned long (enum ggml_type type)\n"}, :ggml_backend_blas_init {:rettype :pointer?, :argtypes [], :doc "struct ggml_backend * ()\n"}, :ggml_dup_tensor {:rettype :pointer?, :argtypes [[ctx :pointer?] [src :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, const struct ggml_tensor * src)\n"}, :ggml_cast {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [type :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, enum ggml_type type)\n"}, :ggml_backend_buffer_name {:rettype :pointer?, :argtypes [[buffer :pointer?]], :doc "const char * (struct ggml_backend_buffer * buffer)\n"}, :ggml_backend_view_init {:rettype :void, :argtypes [[tensor :pointer?]], :doc "void (struct ggml_tensor * tensor)\n"}, :ggml_silu {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_backend_kompute_init {:rettype :pointer?, :argtypes [[device :int32]], :doc "struct ggml_backend * (int device)\n"}, :ggml_map_custom1_f32 {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [fun :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, void (*)(struct ggml_tensor *, const struct ggml_tensor *) fun)\n"}, :ggml_ssm_scan {:rettype :pointer?, :argtypes [[ctx :pointer?] [s :pointer?] [x :pointer?] [dt :pointer?] [A :pointer?] [B :pointer?] [C :pointer?] [sq :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * s, struct ggml_tensor * x, struct ggml_tensor * dt, struct ggml_tensor * A, struct ggml_tensor * B, struct ggml_tensor * C, struct ggml_tensor * sq)\n"}, :ggml_opt_init {:rettype :void, :argtypes [[ctx :pointer?] [opt :pointer?] [params (by-value :ggml_opt_params)] [nx :int64]], :doc "void (struct ggml_context * ctx, struct ggml_opt_context * opt, struct ggml_opt_params params, long long nx)\n"}, :ggml_backend_kompute_buffer_type {:rettype :pointer?, :argtypes [[device :int32]], :doc "struct ggml_backend_buffer_type * (int device)\n"}, :ggml_quantize_chunk {:rettype :int64, :argtypes [[type :int32] [src :pointer?] [dst :pointer?] [start :int64] [nrows :int64] [n_per_row :int64] [imatrix :pointer?]], :doc "unsigned long (enum ggml_type type, const float * src, void * dst, long long start, long long nrows, long long n_per_row, const float * imatrix)\n"}, :ggml_backend_sched_get_n_splits {:rettype :int32, :argtypes [[sched :pointer?]], :doc "int (struct ggml_backend_sched * sched)\n"}, :ggml_get_rows_back {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [c :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_tensor * c)\n"}, :ggml_print_objects {:rettype :void, :argtypes [[ctx :pointer?]], :doc "void (const struct ggml_context * ctx)\n"}, :ggml_backend_graph_copy_free {:rettype :void, :argtypes [[copy (by-value :ggml_backend_graph_copy)]], :doc "void (struct ggml_backend_graph_copy copy)\n"}, :ggml_time_ms {:rettype :int64, :argtypes [], :doc "long long ()\n"}, :ggml_backend_sched_get_buffer_size {:rettype :int64, :argtypes [[sched :pointer?] [backend :pointer?]], :doc "unsigned long (struct ggml_backend_sched * sched, struct ggml_backend * backend)\n"}, :ggml_add_rel_pos_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [pw :pointer?] [ph :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * pw, struct ggml_tensor * ph)\n"}, :ggml_sqr_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_map_binary_inplace_f32 {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [fun :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, void (*)(int, float *, const float *, const float *) fun)\n"}, :ggml_mul_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b)\n"}, :ggml_mul_mat_id {:rettype :pointer?, :argtypes [[ctx :pointer?] [as :pointer?] [b :pointer?] [ids :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * as, struct ggml_tensor * b, struct ggml_tensor * ids)\n"}, :ggml_backend_cuda_buffer_type {:rettype :pointer?, :argtypes [[device :int32]], :doc "struct ggml_backend_buffer_type * (int device)\n"}, :ggml_cpu_has_arm_fma {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_conv_depthwise_2d {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [s0 :int32] [s1 :int32] [p0 :int32] [p1 :int32] [d0 :int32] [d1 :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int s0, int s1, int p0, int p1, int d0, int d1)\n"}, :ggml_cont {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_map_unary_inplace_f32 {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [fun :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, void (*)(int, float *, const float *) fun)\n"}, :ggml_soft_max_back_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b)\n"}, :ggml_backend_cuda_split_buffer_type {:rettype :pointer?, :argtypes [[tensor_split :pointer?]], :doc "struct ggml_backend_buffer_type * (const float * tensor_split)\n"}, :ggml_backend_cpu_buffer_type {:rettype :pointer?, :argtypes [], :doc "struct ggml_backend_buffer_type * ()\n"}, :ggml_set_param {:rettype :void, :argtypes [[ctx :pointer?] [tensor :pointer?]], :doc "void (struct ggml_context * ctx, struct ggml_tensor * tensor)\n"}, :ggml_group_norm_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [n_groups :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, int n_groups)\n"}, :ggml_sgn_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_is_numa {:rettype :int8, :argtypes [], :doc "_Bool ()\n"}, :ggml_log {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_backend_tensor_copy {:rettype :void, :argtypes [[src :pointer?] [dst :pointer?]], :doc "void (struct ggml_tensor * src, struct ggml_tensor * dst)\n"}, :ggml_backend_tensor_get {:rettype :void, :argtypes [[tensor :pointer?] [data :pointer?] [offset :int64] [size :int64]], :doc "void (const struct ggml_tensor * tensor, void * data, unsigned long offset, unsigned long size)\n"}, :ggml_flash_attn_ext_set_prec {:rettype :void, :argtypes [[a :pointer?] [prec :int32]], :doc "void (struct ggml_tensor * a, enum ggml_prec prec)\n"}, :ggml_add {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b)\n"}, :ggml_backend_is_blas {:rettype :int8, :argtypes [[backend :pointer?]], :doc "_Bool (struct ggml_backend * backend)\n"}, :ggml_backend_vk_host_buffer_type {:rettype :pointer?, :argtypes [], :doc "struct ggml_backend_buffer_type * ()\n"}, :ggml_backend_graph_compute {:rettype :int32, :argtypes [[backend :pointer?] [cgraph :pointer?]], :doc "enum ggml_status (struct ggml_backend * backend, struct ggml_cgraph * cgraph)\n"}, :ggml_fp32_to_bf16_row {:rettype :void, :argtypes [[__unnamed_arg_0 :pointer?] [__unnamed_arg_1 :pointer?] [__unnamed_arg_2 :int64]], :doc "void (const float * , ggml_bf16_t * , long long )\n"}, :ggml_is_contiguous {:rettype :int8, :argtypes [[tensor :pointer?]], :doc "_Bool (const struct ggml_tensor * tensor)\n"}, :ggml_scale_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [s :float32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, float s)\n"}, :ggml_backend_supports_op {:rettype :int8, :argtypes [[backend :pointer?] [op :pointer?]], :doc "_Bool (struct ggml_backend * backend, const struct ggml_tensor * op)\n"}, :ggml_rope {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [n_dims :int32] [mode :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int n_dims, int mode)\n"}, :ggml_opt_resume {:rettype :int32, :argtypes [[ctx :pointer?] [opt :pointer?] [f :pointer?]], :doc "enum ggml_opt_result (struct ggml_context * ctx, struct ggml_opt_context * opt, struct ggml_tensor * f)\n"}, :ggml_timestep_embedding {:rettype :pointer?, :argtypes [[ctx :pointer?] [timesteps :pointer?] [dim :int32] [max_period :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * timesteps, int dim, int max_period)\n"}, :ggml_get_first_tensor {:rettype :pointer?, :argtypes [[ctx :pointer?]], :doc "struct ggml_tensor * (const struct ggml_context * ctx)\n"}, :ggml_norm {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [eps :float32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, float eps)\n"}, :ggml_cpu_has_ssse3 {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_backend_sched_set_tensor_backend {:rettype :void, :argtypes [[sched :pointer?] [node :pointer?] [backend :pointer?]], :doc "void (struct ggml_backend_sched * sched, struct ggml_tensor * node, struct ggml_backend * backend)\n"}, :ggml_cpu_has_avx2 {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_are_same_shape {:rettype :int8, :argtypes [[t0 :pointer?] [t1 :pointer?]], :doc "_Bool (const struct ggml_tensor * t0, const struct ggml_tensor * t1)\n"}, :ggml_view_4d {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [ne0 :int64] [ne1 :int64] [ne2 :int64] [ne3 :int64] [nb1 :int64] [nb2 :int64] [nb3 :int64] [offset :int64]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, long long ne0, long long ne1, long long ne2, long long ne3, unsigned long nb1, unsigned long nb2, unsigned long nb3, unsigned long offset)\n"}, :ggml_cpy {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b)\n"}, :ggml_used_mem {:rettype :int64, :argtypes [[ctx :pointer?]], :doc "unsigned long (const struct ggml_context * ctx)\n"}, :ggml_map_custom2 {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [fun :pointer?] [n_tasks :int32] [userdata :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, void (*)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *, int, int, void *) fun, int n_tasks, void * userdata)\n"}, :ggml_backend_sched_get_n_copies {:rettype :int32, :argtypes [[sched :pointer?]], :doc "int (struct ggml_backend_sched * sched)\n"}, :ggml_soft_max_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_sycl_get_device_description {:rettype :void, :argtypes [[device :int32] [description :pointer?] [description_size :int64]], :doc "void (int device, char * description, unsigned long description_size)\n"}, :ggml_get_no_alloc {:rettype :int8, :argtypes [[ctx :pointer?]], :doc "_Bool (struct ggml_context * ctx)\n"}, :ggml_soft_max {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_pool_1d {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [op :int32] [k0 :int32] [s0 :int32] [p0 :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, enum ggml_op_pool op, int k0, int s0, int p0)\n"}, :ggml_backend_vk_get_device_count {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_leaky_relu {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [negative_slope :float32] [inplace :int8]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, float negative_slope, _Bool inplace)\n"}, :ggml_backend_buffer_get_alignment {:rettype :int64, :argtypes [[buffer :pointer?]], :doc "unsigned long (struct ggml_backend_buffer * buffer)\n"}, :ggml_set_f32_nd {:rettype :void, :argtypes [[tensor :pointer?] [i0 :int32] [i1 :int32] [i2 :int32] [i3 :int32] [value :float32]], :doc "void (const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value)\n"}, :ggml_backend_sched_synchronize {:rettype :void, :argtypes [[sched :pointer?]], :doc "void (struct ggml_backend_sched * sched)\n"}, :ggml_sub {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b)\n"}, :ggml_reshape_1d {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [ne0 :int64]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, long long ne0)\n"}, :ggml_graph_compute_with_ctx {:rettype :int32, :argtypes [[ctx :pointer?] [cgraph :pointer?] [n_threads :int32]], :doc "enum ggml_status (struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads)\n"}, :ggml_new_graph {:rettype :pointer?, :argtypes [[ctx :pointer?]], :doc "struct ggml_cgraph * (struct ggml_context * ctx)\n"}, :ggml_blck_size {:rettype :int32, :argtypes [[type :int32]], :doc "int (enum ggml_type type)\n"}, :ggml_mul_mat_set_prec {:rettype :void, :argtypes [[a :pointer?] [prec :int32]], :doc "void (struct ggml_tensor * a, enum ggml_prec prec)\n"}, :ggml_map_binary_f32 {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [fun :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, void (*)(int, float *, const float *, const float *) fun)\n"}, :ggml_abs_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_get_f32_1d {:rettype :float32, :argtypes [[tensor :pointer?] [i :int32]], :doc "float (const struct ggml_tensor * tensor, int i)\n"}, :ggml_gelu {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_backend_is_vk {:rettype :int8, :argtypes [[backend :pointer?]], :doc "_Bool (struct ggml_backend * backend)\n"}, :ggml_rms_norm_back {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [eps :float32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, float eps)\n"}, :ggml_flash_attn_back {:rettype :pointer?, :argtypes [[ctx :pointer?] [q :pointer?] [k :pointer?] [v :pointer?] [d :pointer?] [masked :int8]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * q, struct ggml_tensor * k, struct ggml_tensor * v, struct ggml_tensor * d, _Bool masked)\n"}, :ggml_acc_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [nb1 :int64] [nb2 :int64] [nb3 :int64] [offset :int64]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, unsigned long nb1, unsigned long nb2, unsigned long nb3, unsigned long offset)\n"}, :ggml_vk_current_device {:rettype (by-value :ggml_vk_device), :argtypes [], :doc "struct ggml_vk_device ()\n"}, :ggml_backend_reg_init_backend_from_str {:rettype :pointer?, :argtypes [[backend_str :pointer?]], :doc "struct ggml_backend * (const char * backend_str)\n"}, :ggml_backend_get_max_size {:rettype :int64, :argtypes [[backend :pointer?]], :doc "unsigned long (struct ggml_backend * backend)\n"}, :ggml_set_no_alloc {:rettype :void, :argtypes [[ctx :pointer?] [no_alloc :int8]], :doc "void (struct ggml_context * ctx, _Bool no_alloc)\n"}, :ggml_cpu_has_kompute {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_abs {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_backend_buft_name {:rettype :pointer?, :argtypes [[buft :pointer?]], :doc "const char * (struct ggml_backend_buffer_type * buft)\n"}, :ggml_backend_metal_capture_next_compute {:rettype :void, :argtypes [[backend :pointer?]], :doc "void (struct ggml_backend * backend)\n"}, :ggml_view_3d {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [ne0 :int64] [ne1 :int64] [ne2 :int64] [nb1 :int64] [nb2 :int64] [offset :int64]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, long long ne0, long long ne1, long long ne2, unsigned long nb1, unsigned long nb2, unsigned long offset)\n"}, :ggml_type_sizef {:rettype :float64, :argtypes [[type :int32]], :doc "double (enum ggml_type type)\n"}, :ggml_map_custom2_f32 {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [fun :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, void (*)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *) fun)\n"}, :ggml_view_2d {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [ne0 :int64] [ne1 :int64] [nb1 :int64] [offset :int64]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, long long ne0, long long ne1, unsigned long nb1, unsigned long offset)\n"}, :ggml_permute {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [axis0 :int32] [axis1 :int32] [axis2 :int32] [axis3 :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, int axis0, int axis1, int axis2, int axis3)\n"}, :ggml_backend_rpc_get_device_memory {:rettype :void, :argtypes [[endpoint :pointer?] [free :pointer?] [total :pointer?]], :doc "void (const char * endpoint, unsigned long * free, unsigned long * total)\n"}, :ggml_graph_overhead_custom {:rettype :int64, :argtypes [[size :int64] [grads :int8]], :doc "unsigned long (unsigned long size, _Bool grads)\n"}, :ggml_backend_buffer_get_base {:rettype :pointer?, :argtypes [[buffer :pointer?]], :doc "void * (struct ggml_backend_buffer * buffer)\n"}, :ggml_internal_get_type_traits {:rettype (by-value :ggml_type_traits_t), :argtypes [[type :int32]], :doc "ggml_type_traits_t (enum ggml_type type)\n"}, :ggml_concat {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [dim :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int dim)\n"}, :ggml_sqrt_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_new_tensor_3d {:rettype :pointer?, :argtypes [[ctx :pointer?] [type :int32] [ne0 :int64] [ne1 :int64] [ne2 :int64]], :doc "struct ggml_tensor * (struct ggml_context * ctx, enum ggml_type type, long long ne0, long long ne1, long long ne2)\n"}, :ggml_backend_name {:rettype :pointer?, :argtypes [[backend :pointer?]], :doc "const char * (struct ggml_backend * backend)\n"}, :ggml_log_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_bf16_to_fp32 {:rettype :float32, :argtypes [[__unnamed_arg_0 (by-value :ggml_bf16_t)]], :doc "float (ggml_bf16_t )\n"}, :ggml_is_matrix {:rettype :int8, :argtypes [[tensor :pointer?]], :doc "_Bool (const struct ggml_tensor * tensor)\n"}, :ggml_step {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_set_2d_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [nb1 :int64] [offset :int64]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, unsigned long nb1, unsigned long offset)\n"}, :ggml_op_name {:rettype :pointer?, :argtypes [[op :int32]], :doc "const char * (enum ggml_op op)\n"}, :ggml_backend_tensor_get_async {:rettype :void, :argtypes [[backend :pointer?] [tensor :pointer?] [data :pointer?] [offset :int64] [size :int64]], :doc "void (struct ggml_backend * backend, const struct ggml_tensor * tensor, void * data, unsigned long offset, unsigned long size)\n"}, :ggml_backend_sched_graph_compute_async {:rettype :int32, :argtypes [[sched :pointer?] [graph :pointer?]], :doc "enum ggml_status (struct ggml_backend_sched * sched, struct ggml_cgraph * graph)\n"}, :ggml_backend_reg_get_default_buffer_type {:rettype :pointer?, :argtypes [[i :int64]], :doc "struct ggml_backend_buffer_type * (unsigned long i)\n"}, :ggml_backend_sycl_get_device_memory {:rettype :void, :argtypes [[device :int32] [free :pointer?] [total :pointer?]], :doc "void (int device, unsigned long * free, unsigned long * total)\n"}, :ggml_im2col {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [s0 :int32] [s1 :int32] [p0 :int32] [p1 :int32] [d0 :int32] [d1 :int32] [is_2D :int8] [dst_type :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int s0, int s1, int p0, int p1, int d0, int d1, _Bool is_2D, enum ggml_type dst_type)\n"}, :ggml_gelu_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_tanh {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_div_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b)\n"}, :ggml_graph_reset {:rettype :void, :argtypes [[cgraph :pointer?]], :doc "void (struct ggml_cgraph * cgraph)\n"}, :ggml_rope_custom_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [n_dims :int32] [mode :int32] [n_ctx_orig :int32] [freq_base :float32] [freq_scale :float32] [ext_factor :float32] [attn_factor :float32] [beta_fast :float32] [beta_slow :float32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int n_dims, int mode, int n_ctx_orig, float freq_base, float freq_scale, float ext_factor, float attn_factor, float beta_fast, float beta_slow)\n"}, :ggml_rms_norm {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [eps :float32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, float eps)\n"}, :ggml_set_zero {:rettype :pointer?, :argtypes [[tensor :pointer?]], :doc "struct ggml_tensor * (struct ggml_tensor * tensor)\n"}, :ggml_gallocr_reserve_n {:rettype :int8, :argtypes [[galloc :pointer?] [graph :pointer?] [node_buffer_ids :pointer?] [leaf_buffer_ids :pointer?]], :doc "_Bool (struct ggml_gallocr * galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids)\n"}, :ggml_opt {:rettype :int32, :argtypes [[ctx :pointer?] [params (by-value :ggml_opt_params)] [f :pointer?]], :doc "enum ggml_opt_result (struct ggml_context * ctx, struct ggml_opt_params params, struct ggml_tensor * f)\n"}, :ggml_backend_buffer_is_host {:rettype :int8, :argtypes [[buffer :pointer?]], :doc "_Bool (struct ggml_backend_buffer * buffer)\n"}, :ggml_repeat_back {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b)\n"}, :ggml_backend_reg_get_count {:rettype :int64, :argtypes [], :doc "unsigned long ()\n"}, :ggml_backend_sycl_print_sycl_devices {:rettype :void, :argtypes [], :doc "void ()\n"}, :ggml_backend_rpc_init {:rettype :pointer?, :argtypes [[endpoint :pointer?]], :doc "struct ggml_backend * (const char * endpoint)\n"}, :ggml_mul_mat {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b)\n"}, :ggml_sum {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_get_i32_nd {:rettype :int32, :argtypes [[tensor :pointer?] [i0 :int32] [i1 :int32] [i2 :int32] [i3 :int32]], :doc "int (const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3)\n"}, :ggml_rms_norm_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [eps :float32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, float eps)\n"}, :ggml_fp32_to_fp16_row {:rettype :void, :argtypes [[__unnamed_arg_0 :pointer?] [__unnamed_arg_1 :pointer?] [__unnamed_arg_2 :int64]], :doc "void (const float * , unsigned short * , long long )\n"}, :ggml_backend_graph_plan_create {:rettype :pointer?, :argtypes [[backend :pointer?] [cgraph :pointer?]], :doc "void * (struct ggml_backend * backend, struct ggml_cgraph * cgraph)\n"}, :ggml_cross_entropy_loss_back {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [c :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_tensor * c)\n"}, :ggml_new_tensor_2d {:rettype :pointer?, :argtypes [[ctx :pointer?] [type :int32] [ne0 :int64] [ne1 :int64]], :doc "struct ggml_tensor * (struct ggml_context * ctx, enum ggml_type type, long long ne0, long long ne1)\n"}, :ggml_unravel_index {:rettype :void, :argtypes [[tensor :pointer?] [i :int64] [i0 :pointer?] [i1 :pointer?] [i2 :pointer?] [i3 :pointer?]], :doc "void (const struct ggml_tensor * tensor, long long i, long long * i0, long long * i1, long long * i2, long long * i3)\n"}, :ggml_map_custom2_inplace_f32 {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [fun :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, void (*)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *) fun)\n"}, :ggml_diag_mask_inf_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [n_past :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, int n_past)\n"}, :ggml_set_i32 {:rettype :pointer?, :argtypes [[tensor :pointer?] [value :int32]], :doc "struct ggml_tensor * (struct ggml_tensor * tensor, int value)\n"}, :ggml_backend_cuda_register_host_buffer {:rettype :int8, :argtypes [[buffer :pointer?] [size :int64]], :doc "_Bool (void * buffer, unsigned long size)\n"}, :ggml_backend_cpu_buffer_from_ptr {:rettype :pointer?, :argtypes [[ptr :pointer?] [size :int64]], :doc "struct ggml_backend_buffer * (void * ptr, unsigned long size)\n"}, :ggml_get_tensor {:rettype :pointer?, :argtypes [[ctx :pointer?] [name :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, const char * name)\n"}, :ggml_reshape_2d {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [ne0 :int64] [ne1 :int64]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, long long ne0, long long ne1)\n"}, :ggml_relu {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_relu_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_backend_metal_log_set_callback {:rettype :void, :argtypes [[log_callback :pointer?] [user_data :pointer?]], :doc "void (void (*)(enum ggml_log_level, const char *, void *) log_callback, void * user_data)\n"}, :ggml_backend_cuda_log_set_callback {:rettype :void, :argtypes [[log_callback :pointer?] [user_data :pointer?]], :doc "void (void (*)(enum ggml_log_level, const char *, void *) log_callback, void * user_data)\n"}, :ggml_out_prod {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b)\n"}, :ggml_unary_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [op :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, enum ggml_unary_op op)\n"}, :ggml_get_unary_op {:rettype :int32, :argtypes [[tensor :pointer?]], :doc "enum ggml_unary_op (const struct ggml_tensor * tensor)\n"}, :ggml_cont_4d {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [ne0 :int64] [ne1 :int64] [ne2 :int64] [ne3 :int64]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, long long ne0, long long ne1, long long ne2, long long ne3)\n"}, :ggml_backend_sched_get_n_backends {:rettype :int32, :argtypes [[sched :pointer?]], :doc "int (struct ggml_backend_sched * sched)\n"}, :ggml_backend_alloc_ctx_tensors {:rettype :pointer?, :argtypes [[ctx :pointer?] [backend :pointer?]], :doc "struct ggml_backend_buffer * (struct ggml_context * ctx, struct ggml_backend * backend)\n"}, :ggml_flash_attn_ext {:rettype :pointer?, :argtypes [[ctx :pointer?] [q :pointer?] [k :pointer?] [v :pointer?] [mask :pointer?] [scale :float32] [max_bias :float32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * q, struct ggml_tensor * k, struct ggml_tensor * v, struct ggml_tensor * mask, float scale, float max_bias)\n"}, :ggml_add_rel_pos {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [pw :pointer?] [ph :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * pw, struct ggml_tensor * ph)\n"}, :ggml_backend_sched_graph_compute {:rettype :int32, :argtypes [[sched :pointer?] [graph :pointer?]], :doc "enum ggml_status (struct ggml_backend_sched * sched, struct ggml_cgraph * graph)\n"}, :ggml_print_backtrace {:rettype :void, :argtypes [], :doc "void ()\n"}, :ggml_add_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b)\n"}, :ggml_backend_sched_free {:rettype :void, :argtypes [[sched :pointer?]], :doc "void (struct ggml_backend_sched * sched)\n"}, :ggml_backend_buffer_get_max_size {:rettype :int64, :argtypes [[buffer :pointer?]], :doc "unsigned long (struct ggml_backend_buffer * buffer)\n"}, :ggml_mul {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b)\n"}, :ggml_set_i32_nd {:rettype :void, :argtypes [[tensor :pointer?] [i0 :int32] [i1 :int32] [i2 :int32] [i3 :int32] [value :int32]], :doc "void (const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, int value)\n"}, :ggml_add1 {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b)\n"}, :ggml_diag {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_backend_is_kompute {:rettype :int8, :argtypes [[backend :pointer?]], :doc "_Bool (struct ggml_backend * backend)\n"}, :ggml_backend_vk_buffer_type {:rettype :pointer?, :argtypes [[dev_num :int64]], :doc "struct ggml_backend_buffer_type * (unsigned long dev_num)\n"}, :ggml_nrows {:rettype :int64, :argtypes [[tensor :pointer?]], :doc "long long (const struct ggml_tensor * tensor)\n"}, :ggml_reshape_4d {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [ne0 :int64] [ne1 :int64] [ne2 :int64] [ne3 :int64]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, long long ne0, long long ne1, long long ne2, long long ne3)\n"}, :ggml_backend_sched_reserve {:rettype :int8, :argtypes [[sched :pointer?] [measure_graph :pointer?]], :doc "_Bool (struct ggml_backend_sched * sched, struct ggml_cgraph * measure_graph)\n"}, :ggml_gallocr_alloc_graph {:rettype :int8, :argtypes [[galloc :pointer?] [graph :pointer?]], :doc "_Bool (struct ggml_gallocr * galloc, struct ggml_cgraph * graph)\n"}, :ggml_time_us {:rettype :int64, :argtypes [], :doc "long long ()\n"}, :ggml_free {:rettype :void, :argtypes [[ctx :pointer?]], :doc "void (struct ggml_context * ctx)\n"}, :ggml_backend_graph_compute_async {:rettype :int32, :argtypes [[backend :pointer?] [cgraph :pointer?]], :doc "enum ggml_status (struct ggml_backend * backend, struct ggml_cgraph * cgraph)\n"}, :ggml_backend_sycl_init {:rettype :pointer?, :argtypes [[device :int32]], :doc "struct ggml_backend * (int device)\n"}, :ggml_reshape {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b)\n"}, :ggml_vk_has_vulkan {:rettype :int8, :argtypes [], :doc "_Bool ()\n"}, :ggml_backend_buft_get_alignment {:rettype :int64, :argtypes [[buft :pointer?]], :doc "unsigned long (struct ggml_backend_buffer_type * buft)\n"}, :ggml_add1_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b)\n"}, :ggml_backend_cpu_set_abort_callback {:rettype :void, :argtypes [[backend_cpu :pointer?] [abort_callback :pointer?] [abort_callback_data :pointer?]], :doc "void (struct ggml_backend * backend_cpu, _Bool (*)(void *) abort_callback, void * abort_callback_data)\n"}, :ggml_get_data {:rettype :pointer?, :argtypes [[tensor :pointer?]], :doc "void * (const struct ggml_tensor * tensor)\n"}, :ggml_soft_max_back {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b)\n"}, :ggml_rope_custom {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [n_dims :int32] [mode :int32] [n_ctx_orig :int32] [freq_base :float32] [freq_scale :float32] [ext_factor :float32] [attn_factor :float32] [beta_fast :float32] [beta_slow :float32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int n_dims, int mode, int n_ctx_orig, float freq_base, float freq_scale, float ext_factor, float attn_factor, float beta_fast, float beta_slow)\n"}, :ggml_conv_transpose_2d_p0 {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [stride :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int stride)\n"}, :ggml_backend_cuda_get_device_description {:rettype :void, :argtypes [[device :int32] [description :pointer?] [description_size :int64]], :doc "void (int device, char * description, unsigned long description_size)\n"}, :ggml_backend_buffer_get_type {:rettype :pointer?, :argtypes [[buffer :pointer?]], :doc "struct ggml_backend_buffer_type * (struct ggml_backend_buffer * buffer)\n"}, :ggml_backend_vk_init {:rettype :pointer?, :argtypes [[dev_num :int64]], :doc "struct ggml_backend * (unsigned long dev_num)\n"}, :ggml_vk_has_device {:rettype :int8, :argtypes [], :doc "_Bool ()\n"}, :ggml_get_max_tensor_size {:rettype :int64, :argtypes [[ctx :pointer?]], :doc "unsigned long (const struct ggml_context * ctx)\n"}, :ggml_nelements {:rettype :int64, :argtypes [[tensor :pointer?]], :doc "long long (const struct ggml_tensor * tensor)\n"}, :ggml_gallocr_free {:rettype :void, :argtypes [[galloc :pointer?]], :doc "void (struct ggml_gallocr * galloc)\n"}, :ggml_cpu_has_avx {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_diag_mask_inf {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [n_past :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, int n_past)\n"}, :ggml_norm_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [eps :float32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, float eps)\n"}, :ggml_backend_tensor_set_async {:rettype :void, :argtypes [[backend :pointer?] [tensor :pointer?] [data :pointer?] [offset :int64] [size :int64]], :doc "void (struct ggml_backend * backend, struct ggml_tensor * tensor, const void * data, unsigned long offset, unsigned long size)\n"}, :ggml_backend_metal_buffer_from_ptr {:rettype :pointer?, :argtypes [[data :pointer?] [size :int64] [max_size :int64]], :doc "struct ggml_backend_buffer * (void * data, unsigned long size, unsigned long max_size)\n"}, :ggml_fp32_to_bf16 {:rettype (by-value :ggml_bf16_t), :argtypes [[__unnamed_arg_0 :float32]], :doc "ggml_bf16_t (float )\n"}, :ggml_gelu_quick {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_graph_import {:rettype :pointer?, :argtypes [[fname :pointer?] [ctx_data :pointer?] [ctx_eval :pointer?]], :doc "struct ggml_cgraph * (const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval)\n"}, :ggml_backend_offload_op {:rettype :int8, :argtypes [[backend :pointer?] [op :pointer?]], :doc "_Bool (struct ggml_backend * backend, const struct ggml_tensor * op)\n"}, :ggml_cpu_has_neon {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_silu_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_get_mem_buffer {:rettype :pointer?, :argtypes [[ctx :pointer?]], :doc "void * (const struct ggml_context * ctx)\n"}, :ggml_backend_graph_plan_free {:rettype :void, :argtypes [[backend :pointer?] [plan :pointer?]], :doc "void (struct ggml_backend * backend, void * plan)\n"}, :ggml_backend_supports_buft {:rettype :int8, :argtypes [[backend :pointer?] [buft :pointer?]], :doc "_Bool (struct ggml_backend * backend, struct ggml_backend_buffer_type * buft)\n"}, :ggml_backend_graph_copy {:rettype (by-value :ggml_backend_graph_copy), :argtypes [[backend :pointer?] [graph :pointer?]], :doc "struct ggml_backend_graph_copy (struct ggml_backend * backend, struct ggml_cgraph * graph)\n"}, :ggml_diag_mask_zero {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [n_past :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, int n_past)\n"}, :ggml_set_i32_1d {:rettype :void, :argtypes [[tensor :pointer?] [i :int32] [value :int32]], :doc "void (const struct ggml_tensor * tensor, int i, int value)\n"}, :ggml_map_custom3_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [c :pointer?] [fun :pointer?] [n_tasks :int32] [userdata :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_tensor * c, void (*)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *, int, int, void *) fun, int n_tasks, void * userdata)\n"}, :ggml_backend_sched_reset {:rettype :void, :argtypes [[sched :pointer?]], :doc "void (struct ggml_backend_sched * sched)\n"}, :ggml_cpu_has_avx512_vbmi {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_graph_overhead {:rettype :int64, :argtypes [], :doc "unsigned long ()\n"}, :ggml_validate_row_data {:rettype :int8, :argtypes [[type :int32] [data :pointer?] [nbytes :int64]], :doc "_Bool (enum ggml_type type, const void * data, unsigned long nbytes)\n"}, :ggml_cpu_has_sycl {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_backend_get_default_buffer_type {:rettype :pointer?, :argtypes [[backend :pointer?]], :doc "struct ggml_backend_buffer_type * (struct ggml_backend * backend)\n"}, :ggml_dup_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_backend_free {:rettype :void, :argtypes [[backend :pointer?]], :doc "void (struct ggml_backend * backend)\n"}, :ggml_map_unary_f32 {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [fun :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, void (*)(int, float *, const float *) fun)\n"}, :ggml_sum_rows {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_sqr {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_backend_metal_set_n_cb {:rettype :void, :argtypes [[backend :pointer?] [n_cb :int32]], :doc "void (struct ggml_backend * backend, int n_cb)\n"}, :ggml_graph_clear {:rettype :void, :argtypes [[cgraph :pointer?]], :doc "void (struct ggml_cgraph * cgraph)\n"}, :ggml_acc {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [nb1 :int64] [nb2 :int64] [nb3 :int64] [offset :int64]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, unsigned long nb1, unsigned long nb2, unsigned long nb3, unsigned long offset)\n"}, :ggml_backend_alloc_buffer {:rettype :pointer?, :argtypes [[backend :pointer?] [size :int64]], :doc "struct ggml_backend_buffer * (struct ggml_backend * backend, unsigned long size)\n"}, :ggml_is_contiguous_2 {:rettype :int8, :argtypes [[tensor :pointer?]], :doc "_Bool (const struct ggml_tensor * tensor)\n"}, :ggml_backend_compare_graph_backend {:rettype :int8, :argtypes [[backend1 :pointer?] [backend2 :pointer?] [graph :pointer?] [callback :pointer?] [user_data :pointer?]], :doc "_Bool (struct ggml_backend * backend1, struct ggml_backend * backend2, struct ggml_cgraph * graph, _Bool (*)(int, struct ggml_tensor *, struct ggml_tensor *, void *) callback, void * user_data)\n"}, :ggml_backend_tensor_set {:rettype :void, :argtypes [[tensor :pointer?] [data :pointer?] [offset :int64] [size :int64]], :doc "void (struct ggml_tensor * tensor, const void * data, unsigned long offset, unsigned long size)\n"}, :ggml_backend_event_new {:rettype :pointer?, :argtypes [[backend :pointer?]], :doc "struct ggml_backend_event * (struct ggml_backend * backend)\n"}, :ggml_set_f32_1d {:rettype :void, :argtypes [[tensor :pointer?] [i :int32] [value :float32]], :doc "void (const struct ggml_tensor * tensor, int i, float value)\n"}, :ggml_cont_2d {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [ne0 :int64] [ne1 :int64]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, long long ne0, long long ne1)\n"}, :ggml_n_dims {:rettype :int32, :argtypes [[tensor :pointer?]], :doc "int (const struct ggml_tensor * tensor)\n"}, :ggml_backend_synchronize {:rettype :void, :argtypes [[backend :pointer?]], :doc "void (struct ggml_backend * backend)\n"}, :ggml_backend_reg_alloc_buffer {:rettype :pointer?, :argtypes [[i :int64] [size :int64]], :doc "struct ggml_backend_buffer * (unsigned long i, unsigned long size)\n"}, :ggml_is_permuted {:rettype :int8, :argtypes [[tensor :pointer?]], :doc "_Bool (const struct ggml_tensor * tensor)\n"}, :ggml_gelu_quick_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_backend_graph_plan_compute {:rettype :int32, :argtypes [[backend :pointer?] [plan :pointer?]], :doc "enum ggml_status (struct ggml_backend * backend, void * plan)\n"}, :ggml_sigmoid {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_upscale_ext {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [ne0 :int32] [ne1 :int32] [ne2 :int32] [ne3 :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, int ne0, int ne1, int ne2, int ne3)\n"}, :ggml_ssm_conv {:rettype :pointer?, :argtypes [[ctx :pointer?] [s :pointer?] [x :pointer?] [c :pointer?] [sq :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * s, struct ggml_tensor * x, struct ggml_tensor * c, struct ggml_tensor * sq)\n"}, :ggml_get_f32_nd {:rettype :float32, :argtypes [[tensor :pointer?] [i0 :int32] [i1 :int32] [i2 :int32] [i3 :int32]], :doc "float (const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3)\n"}, :ggml_cycles {:rettype :int64, :argtypes [], :doc "long long ()\n"}, :ggml_are_same_stride {:rettype :int8, :argtypes [[t0 :pointer?] [t1 :pointer?]], :doc "_Bool (const struct ggml_tensor * t0, const struct ggml_tensor * t1)\n"}, :ggml_backend_sycl_get_device_count {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_quantize_init {:rettype :void, :argtypes [[type :int32]], :doc "void (enum ggml_type type)\n"}, :ggml_backend_tensor_copy_async {:rettype :void, :argtypes [[backend_src :pointer?] [backend_dst :pointer?] [src :pointer?] [dst :pointer?]], :doc "void (struct ggml_backend * backend_src, struct ggml_backend * backend_dst, struct ggml_tensor * src, struct ggml_tensor * dst)\n"}, :ggml_cpu_has_metal {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_backend_is_rpc {:rettype :int8, :argtypes [[backend :pointer?]], :doc "_Bool (struct ggml_backend * backend)\n"}, :ggml_sigmoid_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_backend_metal_init {:rettype :pointer?, :argtypes [], :doc "struct ggml_backend * ()\n"}, :ggml_backend_blas_set_n_threads {:rettype :void, :argtypes [[backend_blas :pointer?] [n_threads :int32]], :doc "void (struct ggml_backend * backend_blas, int n_threads)\n"}, :ggml_gallocr_get_buffer_size {:rettype :int64, :argtypes [[galloc :pointer?] [buffer_id :int32]], :doc "unsigned long (struct ggml_gallocr * galloc, int buffer_id)\n"}, :ggml_new_graph_custom {:rettype :pointer?, :argtypes [[ctx :pointer?] [size :int64] [grads :int8]], :doc "struct ggml_cgraph * (struct ggml_context * ctx, unsigned long size, _Bool grads)\n"}, :ggml_tallocr_new {:rettype (by-value :ggml_tallocr), :argtypes [[buffer :pointer?]], :doc "struct ggml_tallocr (struct ggml_backend_buffer * buffer)\n"}, :ggml_map_custom1_inplace_f32 {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [fun :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, void (*)(struct ggml_tensor *, const struct ggml_tensor *) fun)\n"}, :ggml_backend_sycl_host_buffer_type {:rettype :pointer?, :argtypes [], :doc "struct ggml_backend_buffer_type * ()\n"}, :ggml_is_contiguous_1 {:rettype :int8, :argtypes [[tensor :pointer?]], :doc "_Bool (const struct ggml_tensor * tensor)\n"}, :ggml_unary_op_name {:rettype :pointer?, :argtypes [[op :int32]], :doc "const char * (enum ggml_unary_op op)\n"}, :ggml_op_desc {:rettype :pointer?, :argtypes [[t :pointer?]], :doc "const char * (const struct ggml_tensor * t)\n"}, :ggml_backend_sched_set_eval_callback {:rettype :void, :argtypes [[sched :pointer?] [callback :pointer?] [user_data :pointer?]], :doc "void (struct ggml_backend_sched * sched, _Bool (*)(struct ggml_tensor *, _Bool, void *) callback, void * user_data)\n"}, :ggml_elu {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_transpose {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_reshape_3d {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [ne0 :int64] [ne1 :int64] [ne2 :int64]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, long long ne0, long long ne1, long long ne2)\n"}, :ggml_add_cast {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [type :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, enum ggml_type type)\n"}, :ggml_vk_available_devices {:rettype :pointer?, :argtypes [[memoryRequired :int64] [count :pointer?]], :doc "struct ggml_vk_device * (unsigned long memoryRequired, unsigned long * count)\n"}, :ggml_argmax {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_cont_3d {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [ne0 :int64] [ne1 :int64] [ne2 :int64]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, long long ne0, long long ne1, long long ne2)\n"}, :ggml_cpu_has_gpublas {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_pool_2d {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [op :int32] [k0 :int32] [k1 :int32] [s0 :int32] [s1 :int32] [p0 :float32] [p1 :float32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, enum ggml_op_pool op, int k0, int k1, int s0, int s1, float p0, float p1)\n"}, :ggml_neg {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_opt_default_params {:rettype (by-value :ggml_opt_params), :argtypes [[type :int32]], :doc "struct ggml_opt_params (enum ggml_opt_type type)\n"}, :ggml_conv_1d_ph {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [s :int32] [d :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int s, int d)\n"}, :ggml_is_contiguous_0 {:rettype :int8, :argtypes [[tensor :pointer?]], :doc "_Bool (const struct ggml_tensor * tensor)\n"}, :ggml_cpu_has_fma {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_build_backward_gradient_checkpointing {:rettype :void, :argtypes [[ctx :pointer?] [gf :pointer?] [gb :pointer?] [gb_tmp :pointer?] [checkpoints :pointer?] [n_checkpoints :int32]], :doc "void (struct ggml_context * ctx, struct ggml_cgraph * gf, struct ggml_cgraph * gb, struct ggml_cgraph * gb_tmp, struct ggml_tensor ** checkpoints, int n_checkpoints)\n"}, :ggml_set_1d {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [offset :int64]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, unsigned long offset)\n"}, :ggml_cpu_has_vulkan {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_backend_buft_is_host {:rettype :int8, :argtypes [[buft :pointer?]], :doc "_Bool (struct ggml_backend_buffer_type * buft)\n"}, :ggml_get_mem_size {:rettype :int64, :argtypes [[ctx :pointer?]], :doc "unsigned long (const struct ggml_context * ctx)\n"}, :ggml_cpu_has_avx512_vnni {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_quantize_free {:rettype :void, :argtypes [], :doc "void ()\n"}, :ggml_repeat {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b)\n"}, :ggml_build_forward_expand {:rettype :void, :argtypes [[cgraph :pointer?] [tensor :pointer?]], :doc "void (struct ggml_cgraph * cgraph, struct ggml_tensor * tensor)\n"}, :ggml_backend_metal_buffer_type {:rettype :pointer?, :argtypes [], :doc "struct ggml_backend_buffer_type * ()\n"}, :ggml_backend_tensor_alloc {:rettype :void, :argtypes [[buffer :pointer?] [tensor :pointer?] [addr :pointer?]], :doc "void (struct ggml_backend_buffer * buffer, struct ggml_tensor * tensor, void * addr)\n"}, :ggml_map_custom3_f32 {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [c :pointer?] [fun :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_tensor * c, void (*)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *) fun)\n"}, :ggml_set_input {:rettype :void, :argtypes [[tensor :pointer?]], :doc "void (struct ggml_tensor * tensor)\n"}, :ggml_set_scratch {:rettype :int64, :argtypes [[ctx :pointer?] [scratch (by-value :ggml_scratch)]], :doc "unsigned long (struct ggml_context * ctx, struct ggml_scratch scratch)\n"}, :ggml_get_next_tensor {:rettype :pointer?, :argtypes [[ctx :pointer?] [tensor :pointer?]], :doc "struct ggml_tensor * (const struct ggml_context * ctx, struct ggml_tensor * tensor)\n"}, :ggml_backend_buft_get_max_size {:rettype :int64, :argtypes [[buft :pointer?]], :doc "unsigned long (struct ggml_backend_buffer_type * buft)\n"}, :ggml_set_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [nb1 :int64] [nb2 :int64] [nb3 :int64] [offset :int64]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, unsigned long nb1, unsigned long nb2, unsigned long nb3, unsigned long offset)\n"}, :ggml_backend_cpu_init {:rettype :pointer?, :argtypes [], :doc "struct ggml_backend * ()\n"}, :ggml_backend_vk_get_device_memory {:rettype :void, :argtypes [[device :int32] [free :pointer?] [total :pointer?]], :doc "void (int device, unsigned long * free, unsigned long * total)\n"}, :ggml_gallocr_new_n {:rettype :pointer?, :argtypes [[bufts :pointer?] [n_bufs :int32]], :doc "struct ggml_gallocr * (struct ggml_backend_buffer_type ** bufts, int n_bufs)\n"}, :ggml_graph_print {:rettype :void, :argtypes [[cgraph :pointer?]], :doc "void (const struct ggml_cgraph * cgraph)\n"}, :ggml_get_rows {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b)\n"}, :ggml_cont_1d {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [ne0 :int64]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, long long ne0)\n"}, :ggml_cpu_has_avx512 {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_set_f32 {:rettype :pointer?, :argtypes [[tensor :pointer?] [value :float32]], :doc "struct ggml_tensor * (struct ggml_tensor * tensor, float value)\n"}, :ggml_cpu_has_blas {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_rope_back {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [c :pointer?] [n_dims :int32] [mode :int32] [n_ctx_orig :int32] [freq_base :float32] [freq_scale :float32] [ext_factor :float32] [attn_factor :float32] [beta_fast :float32] [beta_slow :float32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_tensor * c, int n_dims, int mode, int n_ctx_orig, float freq_base, float freq_scale, float ext_factor, float attn_factor, float beta_fast, float beta_slow)\n"}, :ggml_backend_cpu_set_n_threads {:rettype :void, :argtypes [[backend_cpu :pointer?] [n_threads :int32]], :doc "void (struct ggml_backend * backend_cpu, int n_threads)\n"}, :ggml_graph_view {:rettype (by-value :ggml_cgraph), :argtypes [[cgraph :pointer?] [i0 :int32] [i1 :int32]], :doc "struct ggml_cgraph (struct ggml_cgraph * cgraph, int i0, int i1)\n"}, :ggml_argsort {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [order :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, enum ggml_sort_order order)\n"}, :ggml_cpu_has_f16c {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_map_custom3 {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [c :pointer?] [fun :pointer?] [n_tasks :int32] [userdata :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_tensor * c, void (*)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *, int, int, void *) fun, int n_tasks, void * userdata)\n"}, :ggml_conv_2d_s1_ph {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b)\n"}, :ggml_backend_buffer_clear {:rettype :void, :argtypes [[buffer :pointer?] [value :int8]], :doc "void (struct ggml_backend_buffer * buffer, unsigned char value)\n"}, :ggml_cycles_per_ms {:rettype :int64, :argtypes [], :doc "long long ()\n"}, :ggml_backend_is_cuda {:rettype :int8, :argtypes [[backend :pointer?]], :doc "_Bool (struct ggml_backend * backend)\n"}, :ggml_op_symbol {:rettype :pointer?, :argtypes [[op :int32]], :doc "const char * (enum ggml_op op)\n"}, :ggml_rope_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [n_dims :int32] [mode :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int n_dims, int mode)\n"}, :ggml_ftype_to_ggml_type {:rettype :int32, :argtypes [[ftype :int32]], :doc "enum ggml_type (enum ggml_ftype ftype)\n"}, :ggml_is_transposed {:rettype :int8, :argtypes [[tensor :pointer?]], :doc "_Bool (const struct ggml_tensor * tensor)\n"}, :ggml_element_size {:rettype :int64, :argtypes [[tensor :pointer?]], :doc "unsigned long (const struct ggml_tensor * tensor)\n"}, :ggml_win_unpart {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [w0 :int32] [h0 :int32] [w :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, int w0, int h0, int w)\n"}, :ggml_view_tensor {:rettype :pointer?, :argtypes [[ctx :pointer?] [src :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * src)\n"}, :ggml_rope_ext {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [c :pointer?] [n_dims :int32] [mode :int32] [n_ctx_orig :int32] [freq_base :float32] [freq_scale :float32] [ext_factor :float32] [attn_factor :float32] [beta_fast :float32] [beta_slow :float32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_tensor * c, int n_dims, int mode, int n_ctx_orig, float freq_base, float freq_scale, float ext_factor, float attn_factor, float beta_fast, float beta_slow)\n"}, :ggml_cpu_has_fp16_va {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_tensor_overhead {:rettype :int64, :argtypes [], :doc "unsigned long ()\n"}, :ggml_backend_buffer_free {:rettype :void, :argtypes [[buffer :pointer?]], :doc "void (struct ggml_backend_buffer * buffer)\n"}, :ggml_set_name {:rettype :pointer?, :argtypes [[tensor :pointer?] [name :pointer?]], :doc "struct ggml_tensor * (struct ggml_tensor * tensor, const char * name)\n"}, :ggml_backend_rpc_buffer_type {:rettype :pointer?, :argtypes [[endpoint :pointer?]], :doc "struct ggml_backend_buffer_type * (const char * endpoint)\n"}, :ggml_vk_instance_init {:rettype :void, :argtypes [], :doc "void ()\n"}, :ggml_backend_alloc_ctx_tensors_from_buft {:rettype :pointer?, :argtypes [[ctx :pointer?] [buft :pointer?]], :doc "struct ggml_backend_buffer * (struct ggml_context * ctx, struct ggml_backend_buffer_type * buft)\n"}, :ggml_is_3d {:rettype :int8, :argtypes [[tensor :pointer?]], :doc "_Bool (const struct ggml_tensor * tensor)\n"}, :ggml_backend_sched_get_backend {:rettype :pointer?, :argtypes [[sched :pointer?] [i :int32]], :doc "struct ggml_backend * (struct ggml_backend_sched * sched, int i)\n"}, :ggml_backend_vk_get_device_description {:rettype :void, :argtypes [[device :int32] [description :pointer?] [description_size :int64]], :doc "void (int device, char * description, unsigned long description_size)\n"}, :ggml_map_custom1 {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [fun :pointer?] [n_tasks :int32] [userdata :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, void (*)(struct ggml_tensor *, const struct ggml_tensor *, int, int, void *) fun, int n_tasks, void * userdata)\n"}, :ggml_type_name {:rettype :pointer?, :argtypes [[type :int32]], :doc "const char * (enum ggml_type type)\n"}, :ggml_is_empty {:rettype :int8, :argtypes [[tensor :pointer?]], :doc "_Bool (const struct ggml_tensor * tensor)\n"}, :ggml_backend_metal_supports_family {:rettype :int8, :argtypes [[backend :pointer?] [family :int32]], :doc "_Bool (struct ggml_backend * backend, int family)\n"}, :ggml_backend_buft_alloc_buffer {:rettype :pointer?, :argtypes [[buft :pointer?] [size :int64]], :doc "struct ggml_backend_buffer * (struct ggml_backend_buffer_type * buft, unsigned long size)\n"}, :ggml_cpu_has_rpc {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_backend_sched_alloc_graph {:rettype :int8, :argtypes [[sched :pointer?] [graph :pointer?]], :doc "_Bool (struct ggml_backend_sched * sched, struct ggml_cgraph * graph)\n"}, :ggml_backend_buffer_set_usage {:rettype :void, :argtypes [[buffer :pointer?] [usage :int32]], :doc "void (struct ggml_backend_buffer * buffer, enum ggml_backend_buffer_usage usage)\n"}, :ggml_graph_export {:rettype :void, :argtypes [[cgraph :pointer?] [fname :pointer?]], :doc "void (const struct ggml_cgraph * cgraph, const char * fname)\n"}, :ggml_new_tensor {:rettype :pointer?, :argtypes [[ctx :pointer?] [type :int32] [n_dims :int32] [ne :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, enum ggml_type type, int n_dims, const long long * ne)\n"}, :ggml_backend_buffer_init_tensor {:rettype :void, :argtypes [[buffer :pointer?] [tensor :pointer?]], :doc "void (struct ggml_backend_buffer * buffer, struct ggml_tensor * tensor)\n"}, :ggml_pad {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [p0 :int32] [p1 :int32] [p2 :int32] [p3 :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, int p0, int p1, int p2, int p3)\n"}, :ggml_get_data_f32 {:rettype :pointer?, :argtypes [[tensor :pointer?]], :doc "float * (const struct ggml_tensor * tensor)\n"}, :ggml_sqrt {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_top_k {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [k :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, int k)\n"}, :ggml_sub_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b)\n"}, :ggml_nbytes {:rettype :int64, :argtypes [[tensor :pointer?]], :doc "unsigned long (const struct ggml_tensor * tensor)\n"}, :ggml_vk_get_device {:rettype :int8, :argtypes [[device :pointer?] [memoryRequired :int64] [name :pointer?]], :doc "_Bool (struct ggml_vk_device * device, unsigned long memoryRequired, const char * name)\n"}, :ggml_print_object {:rettype :void, :argtypes [[obj :pointer?]], :doc "void (const struct ggml_object * obj)\n"}, :ggml_clamp {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [min :float32] [max :float32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, float min, float max)\n"}, :ggml_new_f32 {:rettype :pointer?, :argtypes [[ctx :pointer?] [value :float32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, float value)\n"}, :ggml_backend_sched_get_tensor_backend {:rettype :pointer?, :argtypes [[sched :pointer?] [node :pointer?]], :doc "struct ggml_backend * (struct ggml_backend_sched * sched, struct ggml_tensor * node)\n"}, :ggml_backend_buffer_get_alloc_size {:rettype :int64, :argtypes [[buffer :pointer?] [tensor :pointer?]], :doc "unsigned long (struct ggml_backend_buffer * buffer, struct ggml_tensor * tensor)\n"}, :ggml_is_vector {:rettype :int8, :argtypes [[tensor :pointer?]], :doc "_Bool (const struct ggml_tensor * tensor)\n"}, :ggml_dup {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_new_i32 {:rettype :pointer?, :argtypes [[ctx :pointer?] [value :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, int value)\n"}, :ggml_view_1d {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [ne0 :int64] [offset :int64]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, long long ne0, unsigned long offset)\n"}, :ggml_scale {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [s :float32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, float s)\n"}, :ggml_fp32_to_fp16 {:rettype :int16, :argtypes [[__unnamed_arg_0 :float32]], :doc "unsigned short (float )\n"}, :ggml_nbytes_pad {:rettype :int64, :argtypes [[tensor :pointer?]], :doc "unsigned long (const struct ggml_tensor * tensor)\n"}, :ggml_cpu_has_sve {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_cross_entropy_loss {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b)\n"}, :ggml_time_init {:rettype :void, :argtypes [], :doc "void ()\n"}, :ggml_group_norm {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [n_groups :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, int n_groups)\n"}, :ggml_set_2d {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [nb1 :int64] [offset :int64]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, unsigned long nb1, unsigned long offset)\n"}, :ggml_win_part {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [w :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, int w)\n"}, :ggml_quantize_requires_imatrix {:rettype :int8, :argtypes [[type :int32]], :doc "_Bool (enum ggml_type type)\n"}, :ggml_sycl_get_gpu_list {:rettype :void, :argtypes [[id_list :pointer?] [max_len :int32]], :doc "void (int * id_list, int max_len)\n"}, :ggml_graph_compute {:rettype :int32, :argtypes [[cgraph :pointer?] [cplan :pointer?]], :doc "enum ggml_status (struct ggml_cgraph * cgraph, struct ggml_cplan * cplan)\n"}, :ggml_fp16_to_fp32 {:rettype :float32, :argtypes [[__unnamed_arg_0 :int16]], :doc "float (unsigned short )\n"}, :ggml_backend_buffer_reset {:rettype :void, :argtypes [[buffer :pointer?]], :doc "void (struct ggml_backend_buffer * buffer)\n"}, :ggml_cpu_has_matmul_int8 {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_hardsigmoid {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_gallocr_new {:rettype :pointer?, :argtypes [[buft :pointer?]], :doc "struct ggml_gallocr * (struct ggml_backend_buffer_type * buft)\n"}, :ggml_backend_cuda_get_device_memory {:rettype :void, :argtypes [[device :int32] [free :pointer?] [total :pointer?]], :doc "void (int device, unsigned long * free, unsigned long * total)\n"}, :ggml_get_name {:rettype :pointer?, :argtypes [[tensor :pointer?]], :doc "const char * (const struct ggml_tensor * tensor)\n"}, :ggml_cpu_has_cuda {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_new_tensor_4d {:rettype :pointer?, :argtypes [[ctx :pointer?] [type :int32] [ne0 :int64] [ne1 :int64] [ne2 :int64] [ne3 :int64]], :doc "struct ggml_tensor * (struct ggml_context * ctx, enum ggml_type type, long long ne0, long long ne1, long long ne2, long long ne3)\n"}, :ggml_cpu_has_wasm_simd {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_backend_buffer_get_size {:rettype :int64, :argtypes [[buffer :pointer?]], :doc "unsigned long (struct ggml_backend_buffer * buffer)\n"}, :ggml_map_custom1_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [fun :pointer?] [n_tasks :int32] [userdata :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, void (*)(struct ggml_tensor *, const struct ggml_tensor *, int, int, void *) fun, int n_tasks, void * userdata)\n"}, :ggml_backend_cuda_init {:rettype :pointer?, :argtypes [[device :int32]], :doc "struct ggml_backend * (int device)\n"}, :ggml_row_size {:rettype :int64, :argtypes [[type :int32] [ne :int64]], :doc "unsigned long (enum ggml_type type, long long ne)\n"}, :ggml_backend_reg_init_backend {:rettype :pointer?, :argtypes [[i :int64] [params :pointer?]], :doc "struct ggml_backend * (unsigned long i, const char * params)\n"}, :ggml_graph_plan {:rettype (by-value :ggml_cplan), :argtypes [[cgraph :pointer?] [n_threads :int32]], :doc "struct ggml_cplan (const struct ggml_cgraph * cgraph, int n_threads)\n"}, :ggml_tanh_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_backend_event_wait {:rettype :void, :argtypes [[backend :pointer?] [event :pointer?]], :doc "void (struct ggml_backend * backend, struct ggml_backend_event * event)\n"}, :ggml_tallocr_alloc {:rettype :void, :argtypes [[talloc :pointer?] [tensor :pointer?]], :doc "void (struct ggml_tallocr * talloc, struct ggml_tensor * tensor)\n"}, :ggml_build_backward_expand {:rettype :void, :argtypes [[ctx :pointer?] [gf :pointer?] [gb :pointer?] [keep :int8]], :doc "void (struct ggml_context * ctx, struct ggml_cgraph * gf, struct ggml_cgraph * gb, _Bool keep)\n"}, :ggml_new_tensor_1d {:rettype :pointer?, :argtypes [[ctx :pointer?] [type :int32] [ne0 :int64]], :doc "struct ggml_tensor * (struct ggml_context * ctx, enum ggml_type type, long long ne0)\n"}, :ggml_mean {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_get_rel_pos {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [qh :int32] [kh :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, int qh, int kh)\n"}, :ggml_graph_get_tensor {:rettype :pointer?, :argtypes [[cgraph :pointer?] [name :pointer?]], :doc "struct ggml_tensor * (struct ggml_cgraph * cgraph, const char * name)\n"}, :ggml_backend_is_cpu {:rettype :int8, :argtypes [[backend :pointer?]], :doc "_Bool (struct ggml_backend * backend)\n"}, :ggml_backend_get_alignment {:rettype :int64, :argtypes [[backend :pointer?]], :doc "unsigned long (struct ggml_backend * backend)\n"}, :ggml_backend_cuda_host_buffer_type {:rettype :pointer?, :argtypes [], :doc "struct ggml_backend_buffer_type * ()\n"}, :ggml_neg_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_conv_2d {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [s0 :int32] [s1 :int32] [p0 :int32] [p1 :int32] [d0 :int32] [d1 :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int s0, int s1, int p0, int p1, int d0, int d1)\n"}, :ggml_backend_event_record {:rettype :void, :argtypes [[event :pointer?]], :doc "void (struct ggml_backend_event * event)\n"}, :ggml_backend_is_metal {:rettype :int8, :argtypes [[backend :pointer?]], :doc "_Bool (struct ggml_backend * backend)\n"}, :ggml_backend_buft_get_alloc_size {:rettype :int64, :argtypes [[buft :pointer?] [tensor :pointer?]], :doc "unsigned long (struct ggml_backend_buffer_type * buft, struct ggml_tensor * tensor)\n"}, :ggml_backend_cuda_unregister_host_buffer {:rettype :void, :argtypes [[buffer :pointer?]], :doc "void (void * buffer)\n"}, :ggml_diag_mask_zero_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [n_past :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, int n_past)\n"}, :ggml_step_inplace {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_div {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b)\n"}, :ggml_numa_init {:rettype :void, :argtypes [[numa :int32]], :doc "void (enum ggml_numa_strategy numa)\n"}, :ggml_fopen {:rettype :pointer?, :argtypes [[fname :pointer?] [mode :pointer?]], :doc "struct __sFILE * (const char * fname, const char * mode)\n"}, :ggml_set_output {:rettype :void, :argtypes [[tensor :pointer?]], :doc "void (struct ggml_tensor * tensor)\n"}, :ggml_set {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [nb1 :int64] [nb2 :int64] [nb3 :int64] [offset :int64]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, unsigned long nb1, unsigned long nb2, unsigned long nb3, unsigned long offset)\n"}, :ggml_cpu_has_avx512_bf16 {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_upscale {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [scale_factor :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, int scale_factor)\n"}, :ggml_sgn {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a)\n"}, :ggml_graph_cpy {:rettype :void, :argtypes [[src :pointer?] [dst :pointer?]], :doc "void (struct ggml_cgraph * src, struct ggml_cgraph * dst)\n"}, :ggml_cpu_has_vsx {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_cpu_has_sse3 {:rettype :int32, :argtypes [], :doc "int ()\n"}, :ggml_arange {:rettype :pointer?, :argtypes [[ctx :pointer?] [start :float32] [stop :float32] [step :float32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, float start, float stop, float step)\n"}, :ggml_silu_back {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b)\n"}, :ggml_conv_transpose_1d {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [s0 :int32] [p0 :int32] [d0 :int32]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int s0, int p0, int d0)\n"}, :ggml_map_custom3_inplace_f32 {:rettype :pointer?, :argtypes [[ctx :pointer?] [a :pointer?] [b :pointer?] [c :pointer?] [fun :pointer?]], :doc "struct ggml_tensor * (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_tensor * c, void (*)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *) fun)\n"}, :ggml_backend_sched_new {:rettype :pointer?, :argtypes [[backends :pointer?] [bufts :pointer?] [n_backends :int32] [graph_size :int64] [parallel :int8]], :doc "struct ggml_backend_sched * (struct ggml_backend ** backends, struct ggml_backend_buffer_type ** bufts, int n_backends, unsigned long graph_size, _Bool parallel)\n"}}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment