@@ -35,7 +35,7 @@ void sigint_handler(int signo) {
3535
3636int get_embeddings (void * params_ptr, void * state_pr, float * res_embeddings) {
3737 gpt_params* params_p = (gpt_params*) params_ptr;
38- llama_state * state = (llama_state *) state_pr;
38+ llama_binding_state * state = (llama_binding_state *) state_pr;
3939 llama_context* ctx = state->ctx ;
4040 gpt_params params = *params_p;
4141
@@ -77,7 +77,7 @@ int get_embeddings(void* params_ptr, void* state_pr, float * res_embeddings) {
7777
7878int get_token_embeddings (void * params_ptr, void * state_pr, int *tokens, int tokenSize, float * res_embeddings) {
7979 gpt_params* params_p = (gpt_params*) params_ptr;
80- llama_state * state = (llama_state *) state_pr;
80+ llama_binding_state * state = (llama_binding_state *) state_pr;
8181 llama_context* ctx = state->ctx ;
8282 gpt_params params = *params_p;
8383
@@ -96,7 +96,7 @@ int get_token_embeddings(void* params_ptr, void* state_pr, int *tokens, int tok
9696
9797int eval (void * params_ptr,void * state_pr,char *text) {
9898 gpt_params* params_p = (gpt_params*) params_ptr;
99- llama_state * state = (llama_state *) state_pr;
99+ llama_binding_state * state = (llama_binding_state *) state_pr;
100100 llama_context* ctx = state->ctx ;
101101
102102 auto n_past = 0 ;
@@ -117,7 +117,7 @@ static llama_context ** g_ctx;
117117
118118int llama_predict (void * params_ptr, void * state_pr, char * result, bool debug) {
119119 gpt_params* params_p = (gpt_params*) params_ptr;
120- llama_state * state = (llama_state *) state_pr;
120+ llama_binding_state * state = (llama_binding_state *) state_pr;
121121 llama_context* ctx = state->ctx ;
122122
123123 gpt_params params = *params_p;
@@ -608,7 +608,7 @@ int llama_predict(void* params_ptr, void* state_pr, char* result, bool debug) {
608608}
609609
610610void llama_binding_free_model (void *state_ptr) {
611- llama_state * ctx = (llama_state *) state_ptr;
611+ llama_binding_state * ctx = (llama_binding_state *) state_ptr;
612612 llama_free (ctx->ctx );
613613 delete ctx->model ;
614614}
@@ -620,7 +620,7 @@ void llama_free_params(void* params_ptr) {
620620
621621int llama_tokenize_string (void * params_ptr, void * state_pr, int * result) {
622622 gpt_params* params_p = (gpt_params*) params_ptr;
623- llama_state * state = (llama_state *) state_pr;
623+ llama_binding_state * state = (llama_binding_state *) state_pr;
624624 llama_context* ctx = state->ctx ;
625625
626626 // TODO: add_bos
@@ -773,7 +773,7 @@ Keeping them here in sync to generate again patches if needed.
773773
774774common.h:
775775
776- struct llama_state {
776+ struct llama_binding_state {
777777 llama_context * ctx;
778778 llama_model * model;
779779};
@@ -796,8 +796,8 @@ void* load_binding_model(const char *fname, int n_ctx, int n_seed, bool memory_f
796796 // load the model
797797 gpt_params * lparams = create_gpt_params(fname);
798798 llama_model * model;
799- llama_state * state;
800- state = new llama_state ;
799+ llama_binding_state * state;
800+ state = new llama_binding_state ;
801801 llama_context * ctx;
802802 lparams->n_ctx = n_ctx;
803803 lparams->seed = n_seed;
0 commit comments