| | #include "arg.h" |
| | #include "base64.hpp" |
| | #include "log.h" |
| | #include "common.h" |
| | #include "sampling.h" |
| | #include "clip.h" |
| | #include "llava.h" |
| | #include "llama.h" |
| | #include "ggml.h" |
| |
|
| | #include <cstdio> |
| | #include <cstdlib> |
| | #include <cstring> |
| | #include <vector> |
| |
|
| | static bool eval_tokens(struct llama_context * ctx_llama, std::vector<llama_token> tokens, int n_batch, int * n_past) { |
| | int N = (int) tokens.size(); |
| | for (int i = 0; i < N; i += n_batch) { |
| | int n_eval = (int) tokens.size() - i; |
| | if (n_eval > n_batch) { |
| | n_eval = n_batch; |
| | } |
| | if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval))) { |
| | LOG_ERR("%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past); |
| | return false; |
| | } |
| | *n_past += n_eval; |
| | } |
| | return true; |
| | } |
| |
|
| | static bool eval_id(struct llama_context * ctx_llama, int id, int * n_past) { |
| | std::vector<llama_token> tokens; |
| | tokens.push_back(id); |
| | return eval_tokens(ctx_llama, tokens, 1, n_past); |
| | } |
| |
|
| | static bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){ |
| | std::string str2 = str; |
| | std::vector<llama_token> embd_inp = common_tokenize(ctx_llama, str2, add_bos, true); |
| | eval_tokens(ctx_llama, embd_inp, n_batch, n_past); |
| | return true; |
| | } |
| |
|
| | static const char * sample(struct common_sampler * smpl, |
| | struct llama_context * ctx_llama, |
| | int * n_past) { |
| | const llama_token id = common_sampler_sample(smpl, ctx_llama, -1); |
| | common_sampler_accept(smpl, id, true); |
| | static std::string ret; |
| | if (llama_token_is_eog(llama_get_model(ctx_llama), id)) { |
| | ret = "</s>"; |
| | } else { |
| | ret = common_token_to_piece(ctx_llama, id); |
| | } |
| | eval_id(ctx_llama, id, n_past); |
| | return ret.c_str(); |
| | } |
| |
|
| | static const char* IMG_BASE64_TAG_BEGIN = "<img src=\"data:image/jpeg;base64,"; |
| | static const char* IMG_BASE64_TAG_END = "\">"; |
| |
|
| | static void find_image_tag_in_prompt(const std::string& prompt, size_t& begin_out, size_t& end_out) { |
| | begin_out = prompt.find(IMG_BASE64_TAG_BEGIN); |
| | end_out = prompt.find(IMG_BASE64_TAG_END, (begin_out == std::string::npos) ? 0UL : begin_out); |
| | } |
| |
|
| | static bool prompt_contains_image(const std::string& prompt) { |
| | size_t begin, end; |
| | find_image_tag_in_prompt(prompt, begin, end); |
| | return (begin != std::string::npos); |
| | } |
| |
|
| | |
| | static llava_image_embed * llava_image_embed_make_with_prompt_base64(struct clip_ctx * ctx_clip, int n_threads, const std::string& prompt) { |
| | size_t img_base64_str_start, img_base64_str_end; |
| | find_image_tag_in_prompt(prompt, img_base64_str_start, img_base64_str_end); |
| | if (img_base64_str_start == std::string::npos || img_base64_str_end == std::string::npos) { |
| | LOG_ERR("%s: invalid base64 image tag. must be %s<base64 byte string>%s\n", __func__, IMG_BASE64_TAG_BEGIN, IMG_BASE64_TAG_END); |
| | return NULL; |
| | } |
| |
|
| | auto base64_bytes_start = img_base64_str_start + strlen(IMG_BASE64_TAG_BEGIN); |
| | auto base64_bytes_count = img_base64_str_end - base64_bytes_start; |
| | auto base64_str = prompt.substr(base64_bytes_start, base64_bytes_count ); |
| |
|
| | auto required_bytes = base64::required_encode_size(base64_str.size()); |
| | auto img_bytes = std::vector<unsigned char>(required_bytes); |
| | base64::decode(base64_str.begin(), base64_str.end(), img_bytes.begin()); |
| |
|
| | auto embed = llava_image_embed_make_with_bytes(ctx_clip, n_threads, img_bytes.data(), img_bytes.size()); |
| | if (!embed) { |
| | LOG_ERR("%s: could not load image from base64 string.\n", __func__); |
| | return NULL; |
| | } |
| |
|
| | return embed; |
| | } |
| |
|
| | static std::string remove_image_from_prompt(const std::string& prompt, const char * replacement = "") { |
| | size_t begin, end; |
| | find_image_tag_in_prompt(prompt, begin, end); |
| | if (begin == std::string::npos || end == std::string::npos) { |
| | return prompt; |
| | } |
| | auto pre = prompt.substr(0, begin); |
| | auto post = prompt.substr(end + strlen(IMG_BASE64_TAG_END)); |
| | return pre + replacement + post; |
| | } |
| |
|
| | struct llava_context { |
| | struct clip_ctx * ctx_clip = NULL; |
| | struct llama_context * ctx_llama = NULL; |
| | struct llama_model * model = NULL; |
| | }; |
| |
|
| | static void print_usage(int, char ** argv) { |
| | LOG("\n example usage:\n"); |
| | LOG("\n %s -m <llava-v1.5-7b/ggml-model-q5_k.gguf> --mmproj <llava-v1.5-7b/mmproj-model-f16.gguf> --image <path/to/an/image.jpg> --image <path/to/another/image.jpg> [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]); |
| | LOG("\n note: a lower temperature value like 0.1 is recommended for better quality.\n"); |
| | } |
| |
|
| | static struct llava_image_embed * load_image(llava_context * ctx_llava, common_params * params, const std::string & fname) { |
| |
|
| | |
| | llava_image_embed * embed = NULL; |
| | auto prompt = params->prompt; |
| | if (prompt_contains_image(prompt)) { |
| | if (!params->image.empty()) { |
| | LOG_INF("using base64 encoded image instead of command line image path\n"); |
| | } |
| | embed = llava_image_embed_make_with_prompt_base64(ctx_llava->ctx_clip, params->cpuparams.n_threads, prompt); |
| | if (!embed) { |
| | LOG_ERR("%s: can't load image from prompt\n", __func__); |
| | return NULL; |
| | } |
| | params->prompt = remove_image_from_prompt(prompt); |
| | } else { |
| | embed = llava_image_embed_make_with_filename(ctx_llava->ctx_clip, params->cpuparams.n_threads, fname.c_str()); |
| | if (!embed) { |
| | fprintf(stderr, "%s: is %s really an image file?\n", __func__, fname.c_str()); |
| | return NULL; |
| | } |
| | } |
| |
|
| | return embed; |
| | } |
| |
|
| | static void process_prompt(struct llava_context * ctx_llava, struct llava_image_embed * image_embed, common_params * params, const std::string & prompt) { |
| | int n_past = 0; |
| |
|
| | const int max_tgt_len = params->n_predict < 0 ? 256 : params->n_predict; |
| |
|
| | std::string system_prompt, user_prompt; |
| | size_t image_pos = prompt.find("<image>"); |
| | if (image_pos != std::string::npos) { |
| | |
| | system_prompt = prompt.substr(0, image_pos); |
| | user_prompt = prompt.substr(image_pos + std::string("<image>").length()); |
| | LOG_INF("system_prompt: %s\n", system_prompt.c_str()); |
| | if (params->verbose_prompt) { |
| | auto tmp = common_tokenize(ctx_llava->ctx_llama, system_prompt, true, true); |
| | for (int i = 0; i < (int) tmp.size(); i++) { |
| | LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str()); |
| | } |
| | } |
| | LOG_INF("user_prompt: %s\n", user_prompt.c_str()); |
| | if (params->verbose_prompt) { |
| | auto tmp = common_tokenize(ctx_llava->ctx_llama, user_prompt, true, true); |
| | for (int i = 0; i < (int) tmp.size(); i++) { |
| | LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str()); |
| | } |
| | } |
| | } else { |
| | |
| | system_prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER:"; |
| | user_prompt = prompt + "\nASSISTANT:"; |
| | if (params->verbose_prompt) { |
| | auto tmp = common_tokenize(ctx_llava->ctx_llama, user_prompt, true, true); |
| | for (int i = 0; i < (int) tmp.size(); i++) { |
| | LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str()); |
| | } |
| | } |
| | } |
| |
|
| | eval_string(ctx_llava->ctx_llama, system_prompt.c_str(), params->n_batch, &n_past, true); |
| | llava_eval_image_embed(ctx_llava->ctx_llama, image_embed, params->n_batch, &n_past); |
| | eval_string(ctx_llava->ctx_llama, user_prompt.c_str(), params->n_batch, &n_past, false); |
| |
|
| | |
| |
|
| | LOG("\n"); |
| |
|
| | struct common_sampler * smpl = common_sampler_init(ctx_llava->model, params->sampling); |
| | if (!smpl) { |
| | LOG_ERR("%s: failed to initialize sampling subsystem\n", __func__); |
| | exit(1); |
| | } |
| |
|
| | std::string response = ""; |
| | for (int i = 0; i < max_tgt_len; i++) { |
| | const char * tmp = sample(smpl, ctx_llava->ctx_llama, &n_past); |
| | response += tmp; |
| | if (strcmp(tmp, "</s>") == 0) break; |
| | if (strstr(tmp, "###")) break; |
| | LOG("%s", tmp); |
| | if (strstr(response.c_str(), "<|im_end|>")) break; |
| | if (strstr(response.c_str(), "<|im_start|>")) break; |
| | if (strstr(response.c_str(), "USER:")) break; |
| |
|
| | fflush(stdout); |
| | } |
| |
|
| | common_sampler_free(smpl); |
| | LOG("\n"); |
| | } |
| |
|
| | static struct llama_model * llava_init(common_params * params) { |
| | llama_backend_init(); |
| | llama_numa_init(params->numa); |
| |
|
| | llama_model_params model_params = common_model_params_to_llama(*params); |
| |
|
| | llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params); |
| | if (model == NULL) { |
| | LOG_ERR("%s: unable to load model\n" , __func__); |
| | return NULL; |
| | } |
| | return model; |
| | } |
| |
|
| | static struct llava_context * llava_init_context(common_params * params, llama_model * model) { |
| | const char * clip_path = params->mmproj.c_str(); |
| |
|
| | auto prompt = params->prompt; |
| | if (prompt.empty()) { |
| | prompt = "describe the image in detail."; |
| | } |
| |
|
| | auto ctx_clip = clip_model_load(clip_path, 1); |
| |
|
| |
|
| | llama_context_params ctx_params = common_context_params_to_llama(*params); |
| | ctx_params.n_ctx = params->n_ctx < 2048 ? 2048 : params->n_ctx; |
| |
|
| | llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params); |
| |
|
| | if (ctx_llama == NULL) { |
| | LOG_ERR("%s: failed to create the llama_context\n" , __func__); |
| | return NULL; |
| | } |
| |
|
| | auto * ctx_llava = (struct llava_context *)malloc(sizeof(llava_context)); |
| |
|
| | ctx_llava->ctx_llama = ctx_llama; |
| | ctx_llava->ctx_clip = ctx_clip; |
| | ctx_llava->model = model; |
| | return ctx_llava; |
| | } |
| |
|
| | static void llava_free(struct llava_context * ctx_llava) { |
| | if (ctx_llava->ctx_clip) { |
| | clip_free(ctx_llava->ctx_clip); |
| | ctx_llava->ctx_clip = NULL; |
| | } |
| |
|
| | llama_free(ctx_llava->ctx_llama); |
| | llama_free_model(ctx_llava->model); |
| | llama_backend_free(); |
| | } |
| |
|
| | int main(int argc, char ** argv) { |
| | ggml_time_init(); |
| |
|
| | common_params params; |
| |
|
| | if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_LLAVA, print_usage)) { |
| | return 1; |
| | } |
| |
|
| | common_init(); |
| |
|
| | if (params.mmproj.empty() || (params.image.empty() && !prompt_contains_image(params.prompt))) { |
| | print_usage(argc, argv); |
| | return 1; |
| | } |
| |
|
| | auto * model = llava_init(¶ms); |
| | if (model == NULL) { |
| | fprintf(stderr, "%s: error: failed to init llava model\n", __func__); |
| | return 1; |
| | } |
| |
|
| | if (prompt_contains_image(params.prompt)) { |
| | auto * ctx_llava = llava_init_context(¶ms, model); |
| |
|
| | auto * image_embed = load_image(ctx_llava, ¶ms, ""); |
| |
|
| | |
| | process_prompt(ctx_llava, image_embed, ¶ms, params.prompt); |
| |
|
| | llama_perf_context_print(ctx_llava->ctx_llama); |
| | llava_image_embed_free(image_embed); |
| | ctx_llava->model = NULL; |
| | llava_free(ctx_llava); |
| | } else { |
| | for (auto & image : params.image) { |
| | auto * ctx_llava = llava_init_context(¶ms, model); |
| |
|
| | auto * image_embed = load_image(ctx_llava, ¶ms, image); |
| | if (!image_embed) { |
| | LOG_ERR("%s: failed to load image %s. Terminating\n\n", __func__, image.c_str()); |
| | return 1; |
| | } |
| |
|
| | |
| | process_prompt(ctx_llava, image_embed, ¶ms, params.prompt); |
| |
|
| | llama_perf_context_print(ctx_llava->ctx_llama); |
| | llava_image_embed_free(image_embed); |
| | ctx_llava->model = NULL; |
| | llava_free(ctx_llava); |
| | } |
| | } |
| |
|
| | llama_free_model(model); |
| |
|
| | return 0; |
| | } |
| |
|