{"id":680,"date":"2024-05-29T23:36:59","date_gmt":"2024-05-29T14:36:59","guid":{"rendered":"https:\/\/blog.gurees.net\/?p=680"},"modified":"2024-05-29T23:36:59","modified_gmt":"2024-05-29T14:36:59","slug":"centos-stream9%e3%81%a8llama-cpp%e3%81%a7%e3%83%ad%e3%83%bc%e3%82%ab%e3%83%abllm%e5%8b%95%e3%81%8b%e3%81%97%e3%81%a6%e3%81%bf%e3%82%8b","status":"publish","type":"post","link":"https:\/\/blog.gurees.net\/?p=680","title":{"rendered":"CentOS Stream9\u3068llama.cpp\u3067\u30ed\u30fc\u30ab\u30ebLLM\u52d5\u304b\u3057\u3066\u307f\u308b"},"content":{"rendered":"\n<p>DeskMini A300(Ryzen5 2400G)\u304c\u4f59\u3063\u3066\u3044\u305f\u306e\u3067\u3001CentOS Stream 9\u3092\u30a4\u30f3\u30b9\u30c8\u30fc\u30ebllama.cpp\u3067LLM\u3092\u52d5\u304b\u3057\u3066\u307f\u305f\u8a18\u9332\u3067\u3059\u3002<\/p>\n\n\n\n<p>A300\u306fAPU\u306eVRAM\u309216G\u306b\u6307\u5b9a\u3067\u304d\u308b\u306e\u3067\u3001ROCm\u304c\u3064\u304b\u308c\u3070\u3088\u304b\u3063\u305f\u306e\u3067\u3059\u304c\u3001\u3069\u3046\u306b\u3082\u3046\u307e\u304f\u52d5\u304b\u305b\u306a\u304b\u3063\u305f\u306e\u3067\u3001\u3068\u308a\u3042\u3048\u305a\u306fCPU\u3067\u30ed\u30fc\u30ab\u30ebLLM\u3092\u52d5\u304b\u3057\u3066\u307f\u307e\u3059\u3002<\/p>\n\n\n\n<!--more-->\n\n\n\n<p>LLM\u3092\u52d5\u304b\u3059\u305f\u3081\u306b\u3001llama.cpp\u3092\u30b3\u30f3\u30d1\u30a4\u30eb\u3057\u307e\u3059\u3002-j 4\u306f\u30b3\u30a2\u306b\u5fdc\u3058\u3066\u304a\u597d\u307f\u3067\u6307\u5b9a\u3057\u3066\u304f\u3060\u3055\u3044\u3002<\/p>\n\n\n\n<pre class=\"EnlighterJSRAW\" data-enlighter-language=\"generic\" data-enlighter-theme=\"\" data-enlighter-highlight=\"\" data-enlighter-linenumbers=\"\" data-enlighter-lineoffset=\"\" data-enlighter-title=\"\" data-enlighter-group=\"\"># git clone https:\/\/github.com\/ggerganov\/llama.cpp\n# cd llama.cpp\n# make -j 4<\/pre>\n\n\n\n<p>GGUF\u5f62\u5f0f\u306e\u30e2\u30c7\u30eb\u3092\u30c0\u30a6\u30f3\u30ed\u30fc\u30c9\u3057\u307e\u3059\u3002\u4eca\u56de\u306f\u4ee5\u4e0b\u3092\u4f7f\u308f\u305b\u3066\u3044\u305f\u3060\u304d\u307e\u3057\u305f\u3002<br><a href=\"https:\/\/huggingface.co\/mmnga\/ELYZA-japanese-Llama-2-7b-fast-instruct-gguf\">https:\/\/huggingface.co\/mmnga\/ELYZA-japanese-Llama-2-7b-fast-instruct-gguf<\/a><\/p>\n\n\n\n<pre class=\"EnlighterJSRAW\" data-enlighter-language=\"generic\" data-enlighter-theme=\"\" data-enlighter-highlight=\"\" data-enlighter-linenumbers=\"\" data-enlighter-lineoffset=\"\" data-enlighter-title=\"\" data-enlighter-group=\"\"># wget https:\/\/huggingface.co\/mmnga\/ELYZA-japanese-Llama-2-7b-instruct-gguf\/resolve\/main\/ELYZA-japanese-Llama-2-7b-instruct-q4_K_M.gguf<\/pre>\n\n\n\n<p>-m\u3067\u30e2\u30c7\u30eb\u3092\u6307\u5b9a\u3057\u3066\u3001 -p\u3067\u30d7\u30ed\u30f3\u30d7\u30c8\u3092\u6e21\u3057\u307e\u3059\u3002<\/p>\n\n\n\n<pre class=\"EnlighterJSRAW\" data-enlighter-language=\"generic\" data-enlighter-theme=\"\" data-enlighter-highlight=\"\" data-enlighter-linenumbers=\"\" data-enlighter-lineoffset=\"\" data-enlighter-title=\"\" data-enlighter-group=\"\"># .\/main -m '\/home\/root\/ELYZA-japanese-Llama-2-7b-instruct-q4_K_M.gguf' -p '[INST] &lt;&lt;SYS>>\u3042\u306a\u305f\u306f\u8aa0\u5b9f\u3067\u512a\u79c0\u306a\u65e5\u672c\u4eba\u306e\u30a2\u30b7\u30b9\u30bf\u30f3\u30c8\u3067\u3059\u3002&lt;&lt;\/SYS>>Python\u3067mysql\u304b\u3089select\u3059\u308b\u30b5\u30f3\u30d7\u30eb\u3092\u5b9f\u88c5\u3057\u3066[\/INST]'\nLog start\nmain: build = 3022 (edc29433)\nmain: built with cc (GCC) 11.4.1 20231218 (Red Hat 11.4.1-3) for x86_64-redhat-linux\nmain: seed  = 1716989694\nllama_model_loader: loaded meta data with 21 key-value pairs and 291 tensors from \/home\/root\/ELYZA-japanese-Llama-2-7b-instruct-q4_K_M.gguf (version GGUF V2)\nllama_model_loader: Dumping metadata keys\/values. Note: KV overrides do not apply in this output.\nllama_model_loader: - kv   0:                       general.architecture str              = llama\nllama_model_loader: - kv   1:                               general.name str              = ELYZA-japanese-Llama-2-7b-instruct\nllama_model_loader: - kv   2:       general.source.hugginface.repository str              = elyza\/ELYZA-japanese-Llama-2-7b-instruct\nllama_model_loader: - kv   3:                   llama.tensor_data_layout str              = Meta AI original pth\nllama_model_loader: - kv   4:                       llama.context_length u32              = 4096\nllama_model_loader: - kv   5:                     llama.embedding_length u32              = 4096\nllama_model_loader: - kv   6:                          llama.block_count u32              = 32\nllama_model_loader: - kv   7:                  llama.feed_forward_length u32              = 11008\nllama_model_loader: - kv   8:                 llama.rope.dimension_count u32              = 128\nllama_model_loader: - kv   9:                 llama.attention.head_count u32              = 32\nllama_model_loader: - kv  10:              llama.attention.head_count_kv u32              = 32\nllama_model_loader: - kv  11:     llama.attention.layer_norm_rms_epsilon f32              = 0.000001\nllama_model_loader: - kv  12:                       tokenizer.ggml.model str              = llama\nllama_model_loader: - kv  13:                      tokenizer.ggml.tokens arr[str,32000]   = [\"&lt;unk>\", \"&lt;s>\", \"&lt;\/s>\", \"&lt;0x00>\", \"&lt;...\nllama_model_loader: - kv  14:                      tokenizer.ggml.scores arr[f32,32000]   = [0.000000, 0.000000, 0.000000, 0.0000...\nllama_model_loader: - kv  15:                  tokenizer.ggml.token_type arr[i32,32000]   = [2, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, ...\nllama_model_loader: - kv  16:                tokenizer.ggml.bos_token_id u32              = 1\nllama_model_loader: - kv  17:                tokenizer.ggml.eos_token_id u32              = 2\nllama_model_loader: - kv  18:            tokenizer.ggml.unknown_token_id u32              = 0\nllama_model_loader: - kv  19:               general.quantization_version u32              = 2\nllama_model_loader: - kv  20:                          general.file_type u32              = 15\nllama_model_loader: - type  f32:   65 tensors\nllama_model_loader: - type q4_K:  193 tensors\nllama_model_loader: - type q6_K:   33 tensors\nllm_load_vocab: special tokens definition check successful ( 259\/32000 ).\nllm_load_print_meta: format           = GGUF V2\nllm_load_print_meta: arch             = llama\nllm_load_print_meta: vocab type       = SPM\nllm_load_print_meta: n_vocab          = 32000\nllm_load_print_meta: n_merges         = 0\nllm_load_print_meta: n_ctx_train      = 4096\nllm_load_print_meta: n_embd           = 4096\nllm_load_print_meta: n_head           = 32\nllm_load_print_meta: n_head_kv        = 32\nllm_load_print_meta: n_layer          = 32\nllm_load_print_meta: n_rot            = 128\nllm_load_print_meta: n_embd_head_k    = 128\nllm_load_print_meta: n_embd_head_v    = 128\nllm_load_print_meta: n_gqa            = 1\nllm_load_print_meta: n_embd_k_gqa     = 4096\nllm_load_print_meta: n_embd_v_gqa     = 4096\nllm_load_print_meta: f_norm_eps       = 0.0e+00\nllm_load_print_meta: f_norm_rms_eps   = 1.0e-06\nllm_load_print_meta: f_clamp_kqv      = 0.0e+00\nllm_load_print_meta: f_max_alibi_bias = 0.0e+00\nllm_load_print_meta: f_logit_scale    = 0.0e+00\nllm_load_print_meta: n_ff             = 11008\nllm_load_print_meta: n_expert         = 0\nllm_load_print_meta: n_expert_used    = 0\nllm_load_print_meta: causal attn      = 1\nllm_load_print_meta: pooling type     = 0\nllm_load_print_meta: rope type        = 0\nllm_load_print_meta: rope scaling     = linear\nllm_load_print_meta: freq_base_train  = 10000.0\nllm_load_print_meta: freq_scale_train = 1\nllm_load_print_meta: n_yarn_orig_ctx  = 4096\nllm_load_print_meta: rope_finetuned   = unknown\nllm_load_print_meta: ssm_d_conv       = 0\nllm_load_print_meta: ssm_d_inner      = 0\nllm_load_print_meta: ssm_d_state      = 0\nllm_load_print_meta: ssm_dt_rank      = 0\nllm_load_print_meta: model type       = 7B\nllm_load_print_meta: model ftype      = Q4_K - Medium\nllm_load_print_meta: model params     = 6.74 B\nllm_load_print_meta: model size       = 3.80 GiB (4.84 BPW)\nllm_load_print_meta: general.name     = ELYZA-japanese-Llama-2-7b-instruct\nllm_load_print_meta: BOS token        = 1 '&lt;s>'\nllm_load_print_meta: EOS token        = 2 '&lt;\/s>'\nllm_load_print_meta: UNK token        = 0 '&lt;unk>'\nllm_load_print_meta: LF token         = 13 '&lt;0x0A>'\nllm_load_tensors: ggml ctx size =    0.15 MiB\nllm_load_tensors:        CPU buffer size =  3891.24 MiB\n..................................................................................................\nllama_new_context_with_model: n_ctx      = 512\nllama_new_context_with_model: n_batch    = 512\nllama_new_context_with_model: n_ubatch   = 512\nllama_new_context_with_model: flash_attn = 0\nllama_new_context_with_model: freq_base  = 10000.0\nllama_new_context_with_model: freq_scale = 1\nllama_kv_cache_init:        CPU KV buffer size =   256.00 MiB\nllama_new_context_with_model: KV self size  =  256.00 MiB, K (f16):  128.00 MiB, V (f16):  128.00 MiB\nllama_new_context_with_model:        CPU  output buffer size =     0.12 MiB\nllama_new_context_with_model:        CPU compute buffer size =    70.50 MiB\nllama_new_context_with_model: graph nodes  = 1030\nllama_new_context_with_model: graph splits = 1\n\nsystem_info: n_threads = 4 \/ 8 | AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 |\nsampling:\n        repeat_last_n = 64, repeat_penalty = 1.000, frequency_penalty = 0.000, presence_penalty = 0.000\n        top_k = 40, tfs_z = 1.000, top_p = 0.950, min_p = 0.050, typical_p = 1.000, temp = 0.800\n        mirostat = 0, mirostat_lr = 0.100, mirostat_ent = 5.000\nsampling order:\nCFG -> Penalties -> top_k -> tfs_z -> typical_p -> top_p -> min_p -> temperature\ngenerate: n_ctx = 512, n_batch = 2048, n_predict = -1, n_keep = 1\n\n\n [INST] &lt;&lt;SYS>>\u3042\u306a\u305f\u306f\u8aa0\u5b9f\u3067\u512a\u79c0\u306a\u65e5\u672c\u4eba\u306e\u30a2\u30b7\u30b9\u30bf\u30f3\u30c8\u3067\u3059\u3002&lt;&lt;\/SYS>>Python\u3067mysql\u304b\u3089select\u3059\u308b\u30b5\u30f3\u30d7\u30eb\u3092\u5b9f\u88c5\u3057\u3066[\/INST]  ```\n# MySQLdb \u3092import\nfrom mysql.connector import connect\n\n# \u63a5\u7d9a\u5148\u306ehost,user,password,db\u540d\u3092\u6307\u5b9a\nhost = 'localhost'\nuser = 'root'\npassword = 'your_password_here'\ndb_name = 'your_db_name_here'\n\n# \u63a5\u7d9a\u3092\u78ba\u7acb\nconn = connect(host=host, user=user, passwd=password, dbname=db_name)\n\n# SQL\u3092\u5b9f\u884c\ncur = conn.cursor()\ncur.execute(\"SELECT * FROM your_table_name\")\n\n# \u7d50\u679c\u3092\u53d6\u5f97\nresults = cur.fetchall()\n\n# \u7d50\u679c\u3092print\nfor row in results:\n    print(row)\n``` [end of text]\n\nllama_print_timings:        load time =     567.82 ms\nllama_print_timings:      sample time =      11.22 ms \/   181 runs   (    0.06 ms per token, 16127.60 tokens per second)\nllama_print_timings: prompt eval time =    5741.58 ms \/    61 tokens (   94.12 ms per token,    10.62 tokens per second)\nllama_print_timings:        eval time =   25670.07 ms \/   180 runs   (  142.61 ms per token,     7.01 tokens per second)\nllama_print_timings:       total time =   31485.32 ms \/   241 tokens\nLog end<\/pre>\n\n\n\n<p>\u51fa\u529b\u7d50\u679c\u306e\u30b3\u30fc\u30c9\u306f\u4ee5\u4e0b\u306a\u306e\u3067\u3001\u5272\u3068\u305d\u308c\u3063\u307d\u3044\u3067\u3059\u3002<\/p>\n\n\n\n<pre class=\"EnlighterJSRAW\" data-enlighter-language=\"generic\" data-enlighter-theme=\"\" data-enlighter-highlight=\"\" data-enlighter-linenumbers=\"\" data-enlighter-lineoffset=\"\" data-enlighter-title=\"\" data-enlighter-group=\"\"># MySQLdb \u3092import\nfrom mysql.connector import connect\n\n# \u63a5\u7d9a\u5148\u306ehost,user,password,db\u540d\u3092\u6307\u5b9a\nhost = 'localhost'\nuser = 'root'\npassword = 'your_password_here'\ndb_name = 'your_db_name_here'\n\n# \u63a5\u7d9a\u3092\u78ba\u7acb\nconn = connect(host=host, user=user, passwd=password, dbname=db_name)\n\n# SQL\u3092\u5b9f\u884c\ncur = conn.cursor()\ncur.execute(\"SELECT * FROM your_table_name\")\n\n# \u7d50\u679c\u3092\u53d6\u5f97\nresults = cur.fetchall()\n\n# \u7d50\u679c\u3092print\nfor row in results:\n    print(row)<\/pre>\n\n\n\n<p>\u307e\u305f\u3001\u30b9\u30d4\u30fc\u30c9\u30827.01 tokens per second\u3060\u3063\u305f\u306e\u3067\u3001\u305d\u3053\u305d\u3053\u306e\u30b9\u30d4\u30fc\u30c9\u304c\u3042\u308b\u611f\u3058\u3067\u3057\u305f\u3002\u30e1\u30e2\u30ea\u3082CPU buffer size = 3891.24 MiB\u306a\u306e\u3067\u7d044GB\u7a0b\u5ea6\u3067\u6e08\u307f\u307e\u3057\u305f\u3002<\/p>\n\n\n\n<p>7B\u306e\u30e2\u30c7\u30eb\u30674GB\u7a0b\u5ea6\u3067\u6e08\u3080\u306a\u3089\u3082\u3046\u5c11\u3057\u5927\u304d\u304413B\u306e\u30e2\u30c7\u30eb\u3082\u8a66\u3057\u3066\u307f\u307e\u3059\u3002<\/p>\n\n\n\n<pre class=\"EnlighterJSRAW\" data-enlighter-language=\"generic\" data-enlighter-theme=\"\" data-enlighter-highlight=\"\" data-enlighter-linenumbers=\"\" data-enlighter-lineoffset=\"\" data-enlighter-title=\"\" data-enlighter-group=\"\"># wget https:\/\/huggingface.co\/mmnga\/ELYZA-japanese-Llama-2-13b-fast-instruct-gguf\/resolve\/main\/ELYZA-japanese-Llama-2-13b-fast-instruct-q4_K_M.gguf?download=true\n# .\/main -m '\/home\/root\/ELYZA-japanese-Llama-2-13b-fast-instruct-q4_K_M.gguf' -p '[INST] &lt;&lt;SYS>>\u3042\u306a\u305f\u306f\u8aa0\u5b9f\u3067\u512a\u79c0\u306a\u65e5\u672c\u4eba\u306e\u30a2\u30b7\u30b9\u30bf\u30f3\u30c8\u3067\u3059\u3002&lt;&lt;\/SYS>>Python\u3067mysql\u304b\u3089select\u3059\u308b\u30b5\u30f3\u30d7\u30eb\u3092\u5b9f\u88c5\u3057\u3066[\/INST]'\nLog start\nmain: build = 3022 (edc29433)\nmain: built with cc (GCC) 11.4.1 20231218 (Red Hat 11.4.1-3) for x86_64-redhat-linux\nmain: seed  = 1716991570\nllama_model_loader: loaded meta data with 22 key-value pairs and 363 tensors from \/home\/root\/ELYZA-japanese-Llama-2-13b-fast-instruct-q4_K_M.gguf (version GGUF V3 (latest))\nllama_model_loader: Dumping metadata keys\/values. Note: KV overrides do not apply in this output.\nllama_model_loader: - kv   0:                       general.architecture str              = llama\nllama_model_loader: - kv   1:                               general.name str              = LLaMA v2\nllama_model_loader: - kv   2:                       llama.context_length u32              = 4096\nllama_model_loader: - kv   3:                     llama.embedding_length u32              = 5120\nllama_model_loader: - kv   4:                          llama.block_count u32              = 40\nllama_model_loader: - kv   5:                  llama.feed_forward_length u32              = 13824\nllama_model_loader: - kv   6:                 llama.rope.dimension_count u32              = 128\nllama_model_loader: - kv   7:                 llama.attention.head_count u32              = 40\nllama_model_loader: - kv   8:              llama.attention.head_count_kv u32              = 40\nllama_model_loader: - kv   9:     llama.attention.layer_norm_rms_epsilon f32              = 0.000010\nllama_model_loader: - kv  10:                       llama.rope.freq_base f32              = 10000.000000\nllama_model_loader: - kv  11:                          general.file_type u32              = 15\nllama_model_loader: - kv  12:                       tokenizer.ggml.model str              = llama\nllama_model_loader: - kv  13:                      tokenizer.ggml.tokens arr[str,44581]   = [\"&lt;unk>\", \"&lt;s>\", \"&lt;\/s>\", \"&lt;0x00>\", \"&lt;...\nllama_model_loader: - kv  14:                      tokenizer.ggml.scores arr[f32,44581]   = [0.000000, 0.000000, 0.000000, 0.0000...\nllama_model_loader: - kv  15:                  tokenizer.ggml.token_type arr[i32,44581]   = [2, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, ...\nllama_model_loader: - kv  16:                      tokenizer.ggml.merges arr[str,73695]   = [\"\u3066 \u3044\", \"\u3057 \u305f\", \"\u307e \u3059\", \"\u795f..\nllama_model_loader: - kv  17:                tokenizer.ggml.bos_token_id u32              = 1\nllama_model_loader: - kv  18:                tokenizer.ggml.eos_token_id u32              = 2\nllama_model_loader: - kv  19:            tokenizer.ggml.unknown_token_id u32              = 0\nllama_model_loader: - kv  20:            tokenizer.ggml.padding_token_id u32              = 2\nllama_model_loader: - kv  21:               general.quantization_version u32              = 2\nllama_model_loader: - type  f32:   81 tensors\nllama_model_loader: - type q4_K:  241 tensors\nllama_model_loader: - type q6_K:   41 tensors\nllm_load_vocab: special tokens definition check successful ( 259\/44581 ).\nllm_load_print_meta: format           = GGUF V3 (latest)\nllm_load_print_meta: arch             = llama\nllm_load_print_meta: vocab type       = SPM\nllm_load_print_meta: n_vocab          = 44581\nllm_load_print_meta: n_merges         = 0\nllm_load_print_meta: n_ctx_train      = 4096\nllm_load_print_meta: n_embd           = 5120\nllm_load_print_meta: n_head           = 40\nllm_load_print_meta: n_head_kv        = 40\nllm_load_print_meta: n_layer          = 40\nllm_load_print_meta: n_rot            = 128\nllm_load_print_meta: n_embd_head_k    = 128\nllm_load_print_meta: n_embd_head_v    = 128\nllm_load_print_meta: n_gqa            = 1\nllm_load_print_meta: n_embd_k_gqa     = 5120\nllm_load_print_meta: n_embd_v_gqa     = 5120\nllm_load_print_meta: f_norm_eps       = 0.0e+00\nllm_load_print_meta: f_norm_rms_eps   = 1.0e-05\nllm_load_print_meta: f_clamp_kqv      = 0.0e+00\nllm_load_print_meta: f_max_alibi_bias = 0.0e+00\nllm_load_print_meta: f_logit_scale    = 0.0e+00\nllm_load_print_meta: n_ff             = 13824\nllm_load_print_meta: n_expert         = 0\nllm_load_print_meta: n_expert_used    = 0\nllm_load_print_meta: causal attn      = 1\nllm_load_print_meta: pooling type     = 0\nllm_load_print_meta: rope type        = 0\nllm_load_print_meta: rope scaling     = linear\nllm_load_print_meta: freq_base_train  = 10000.0\nllm_load_print_meta: freq_scale_train = 1\nllm_load_print_meta: n_yarn_orig_ctx  = 4096\nllm_load_print_meta: rope_finetuned   = unknown\nllm_load_print_meta: ssm_d_conv       = 0\nllm_load_print_meta: ssm_d_inner      = 0\nllm_load_print_meta: ssm_d_state      = 0\nllm_load_print_meta: ssm_dt_rank      = 0\nllm_load_print_meta: model type       = 13B\nllm_load_print_meta: model ftype      = Q4_K - Medium\nllm_load_print_meta: model params     = 13.14 B\nllm_load_print_meta: model size       = 7.41 GiB (4.84 BPW)\nllm_load_print_meta: general.name     = LLaMA v2\nllm_load_print_meta: BOS token        = 1 '&lt;s>'\nllm_load_print_meta: EOS token        = 2 '&lt;\/s>'\nllm_load_print_meta: UNK token        = 0 '&lt;unk>'\nllm_load_print_meta: PAD token        = 2 '&lt;\/s>'\nllm_load_print_meta: LF token         = 13 '&lt;0x0A>'\nllm_load_tensors: ggml ctx size =    0.18 MiB\nllm_load_tensors:        CPU buffer size =  7585.80 MiB\n...................................................................................................\nllama_new_context_with_model: n_ctx      = 512\nllama_new_context_with_model: n_batch    = 512\nllama_new_context_with_model: n_ubatch   = 512\nllama_new_context_with_model: flash_attn = 0\nllama_new_context_with_model: freq_base  = 10000.0\nllama_new_context_with_model: freq_scale = 1\nllama_kv_cache_init:        CPU KV buffer size =   400.00 MiB\nllama_new_context_with_model: KV self size  =  400.00 MiB, K (f16):  200.00 MiB, V (f16):  200.00 MiB\nllama_new_context_with_model:        CPU  output buffer size =     0.17 MiB\nllama_new_context_with_model:        CPU compute buffer size =    97.07 MiB\nllama_new_context_with_model: graph nodes  = 1286\nllama_new_context_with_model: graph splits = 1\n\nsystem_info: n_threads = 4 \/ 8 | AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 |\nsampling:\n        repeat_last_n = 64, repeat_penalty = 1.000, frequency_penalty = 0.000, presence_penalty = 0.000\n        top_k = 40, tfs_z = 1.000, top_p = 0.950, min_p = 0.050, typical_p = 1.000, temp = 0.800\n        mirostat = 0, mirostat_lr = 0.100, mirostat_ent = 5.000\nsampling order:\nCFG -> Penalties -> top_k -> tfs_z -> typical_p -> top_p -> min_p -> temperature\ngenerate: n_ctx = 512, n_batch = 2048, n_predict = -1, n_keep = 1\n\n\n [INST] &lt;&lt;SYS>>\u3042\u306a\u305f\u306f\u8aa0\u5b9f\u3067\u512a\u79c0\u306a\u65e5\u672c\u4eba\u306e\u30a2\u30b7\u30b9\u30bf\u30f3\u30c8\u3067\u3059\u3002&lt;&lt;\/SYS>>Python\u3067mysql\u304b\u3089select\u3059\u308b\u30b5\u30f3\u30d7\u30eb\u3092\u5b9f\u88c5\u3057\u3066[\/INST] Python \u3067MySQL\u304b\u3089\u30c7\u30fc\u30bf\u3092\u53d6\u5f97\u3059\u308b\u30b5\u30f3\u30d7\u30eb\u30b3\u30fc\u30c9\u3092\u4f5c\u6210\u3057\u307e\u3059\u3002\n```\nimport mysql.connector\n\n# \u30c7\u30fc\u30bf\u30d9\u30fc\u30b9\u63a5\u7d9a\ncnx = mysql.connector.connect(\n    host='&lt;\u30b5\u30fc\u30d0\u30fc\u540d>',\n    user='&lt;\u30e6\u30fc\u30b6\u30fc\u540d>',\n    password='&lt;\u30d1\u30b9\u30ef\u30fc\u30c9>',\n    database='&lt;\u30c7\u30fc\u30bf\u30d9\u30fc\u30b9\u540d>'\n)\n\n# SQL\u6587\u3092\u8a2d\u5b9a\nsql = \"SELECT * FROM items\"\n\n# \u7d50\u679c\u3092\u30c7\u30fc\u30bf\u30d5\u30ec\u30fc\u30e0\u3068\u3057\u3066\u53d6\u5f97\ndf = mysql.connector.cursor(cnx)\ndf.execute(sql)\nresults = df.fetchall()\n\n# \u7d50\u679c\u306e\u8868\u793a\nfor row in results:\n    print(row)\n\n# \u30c7\u30fc\u30bf\u30d9\u30fc\u30b9\u306e\u63a5\u7d9a\u3092\u5207\u65ad\ncnx.close()\n```\n\u4e0a\u8a18\u306e\u30b3\u30fc\u30c9\u3067\u306f\u3001Python\u306eMySQL Driver\u3092\u7528\u3044\u3066\u30c7\u30fc\u30bf\u30d9\u30fc\u30b9\u306b\u63a5\u7d9a\u3057\u3001SELECT\u6587\u3067\u30c7\u30fc\u30bf\u3092\u53d6\u5f97\u3057\u3066\u7d50\u679c\u3092\u30c7\u30fc\u30bf\u30d5\u30ec\u30fc\u30e0\u3068\u3057\u3066\u53d6\u5f97\u3001\u8868 \u793a\u3057\u3066\u3044\u307e\u3059\u3002 [end of text]\n\nllama_print_timings:        load time =    1067.76 ms\nllama_print_timings:      sample time =      13.68 ms \/   197 runs   (    0.07 ms per token, 14404.80 tokens per second)\nllama_print_timings: prompt eval time =    6825.69 ms \/    37 tokens (  184.48 ms per token,     5.42 tokens per second)\nllama_print_timings:        eval time =   50811.35 ms \/   196 runs   (  259.24 ms per token,     3.86 tokens per second)\nllama_print_timings:       total time =   57740.04 ms \/   233 tokens\nLog end<\/pre>\n\n\n\n<p>\u30e1\u30e2\u30ea\u306e\u4f7f\u7528\u91cf\u306f\u500d\u306b\u306a\u3063\u3066\u3001\u30b9\u30d4\u30fc\u30c9\u306f\u534a\u5206\u306b\u306a\u3063\u305f\u611f\u3058\u3067\u3059\u306d\u3002\u305d\u306e\u4ee3\u308f\u308a\u51fa\u529b\u306e\u611f\u3058\u306f7B\u3088\u308a\u81ea\u7136\u306b\u611f\u3058\u307e\u3057\u305f\u3002<\/p>\n\n\n\n<p>\u6b21\u306bCodellama\u306e7B\u3092\u8a66\u3057\u3066\u307f\u307e\u3059\u3002<\/p>\n\n\n\n<pre class=\"EnlighterJSRAW\" data-enlighter-language=\"generic\" data-enlighter-theme=\"\" data-enlighter-highlight=\"\" data-enlighter-linenumbers=\"\" data-enlighter-lineoffset=\"\" data-enlighter-title=\"\" data-enlighter-group=\"\"># wget https:\/\/huggingface.co\/mmnga\/ELYZA-japanese-CodeLlama-7b-instruct-gguf\/resolve\/main\/ELYZA-japanese-CodeLlama-7b-instruct-q4_K_M.gguf\n\n# .\/main -m '\/home\/root\/ELYZA-japanese-CodeLlama-7b-instruct-q4_K_M.gguf' -p '[INST] &lt;&lt;SYS>>\u3042\u306a\u305f\u306f\u8aa0\u5b9f\u3067\u512a\u79c0\u306a\u65e5\u672c\u4eba\u306e\u30a2\u30b7\u30b9\u30bf\u30f3\u30c8\u3067\u3059\u3002&lt;&lt;\/SYS>>Python\u3067mysql\u304b\u3089select\u3059\u308b\u30b5\u30f3\u30d7\u30eb\u3092\u5b9f\u88c5\u3057\u3066[\/INST]'\n~~~~~~~~~~~~~\u4e2d\u7565~~~~~~~~~~~~~\n [INST] &lt;&lt;SYS>>\u3042\u306a\u305f\u306f\u8aa0\u5b9f\u3067\u512a\u79c0\u306a\u65e5\u672c\u4eba\u306e\u30a2\u30b7\u30b9\u30bf\u30f3\u30c8\u3067\u3059\u3002&lt;&lt;\/SYS>>Python\u3067mysql\u304b\u3089select\u3059\u308b\u30b5\u30f3\u30d7\u30eb\u3092\u5b9f\u88c5\u3057\u3066[\/INST]  MySQL \u304b\u3089\u30c7\u30fc\u30bf\u3092\u53d6\u5f97\u3059\u308b\u30b5\u30f3\u30d7\u30eb\u3092\u5b9f\u88c5\u3057\u307e\u3059\u3002\n\n\u4ee5\u4e0b\u306f\u3001`sample.py`\u3068\u3044\u3046\u30d5\u30a1\u30a4\u30eb\u3092\u4f5c\u6210\u3057\u3066\u5b9f\u88c5\u3059\u308b\u30b5\u30f3\u30d7\u30eb\u3067\u3059\u3002\n\n```python\nimport pymysql\n\ndef select():\n    db = pymysql.connect(\"localhost\", \"root\", \"password\", \"mydatabase\")\n    cursor = db.cursor()\n\n    cursor.execute(\"SELECT * FROM mytable\")\n\n    results = cursor.fetchall()\n\n    print(results)\n\n    cursor.close()\n    db.close()\n\nif __name__ == '__main__':\n    select()\n```\n\n\u3053\u306e\u30b5\u30f3\u30d7\u30eb\u3067\u306f\u3001`pymysql`\u3092\u4f7f\u7528\u3057\u3066MySQL\u30c7\u30fc\u30bf\u30d9\u30fc\u30b9\u306b\u63a5\u7d9a\u3057\u3001`mytable`\u30c6\u30fc\u30d6\u30eb\u304b\u3089\u3059\u3079\u3066\u306e\u30c7\u30fc\u30bf\u3092\u53d6\u5f97\u3057\u307e\u3059\u3002`fetchall()`\u30e1\u30bd\u30c3\u30c9\u3092\u4f7f\u7528\u3057\u3066\u3001\u30c7\u30fc\u30bf\u3092\u3059\u3079\u3066\u53d6\u5f97\u3057\u307e\u3059\u3002 [end of text]\n\nllama_print_timings:        load time =     563.36 ms\nllama_print_timings:      sample time =      15.64 ms \/   253 runs   (    0.06 ms per token, 16171.30 tokens per second)\nllama_print_timings: prompt eval time =    5658.46 ms \/    61 tokens (   92.76 ms per token,    10.78 tokens per second)\nllama_print_timings:        eval time =   35243.45 ms \/   252 runs   (  139.85 ms per token,     7.15 tokens per second)\nllama_print_timings:       total time =   41003.83 ms \/   313 tokens\nLog end\n<\/pre>\n\n\n\n<p>\u30b3\u30fc\u30c9\u7279\u5316\u3060\u3051\u3042\u308b\u3088\u3046\u306a\u51fa\u529b\u306a\u6c17\u304c\u3057\u307e\u3059\u3002<\/p>\n\n\n\n<p>\u6614\u306bCerebras-GPT\u3092\u4f7f\u3063\u305f\u8a18\u4e8b\u304c\u3042\u308a\u307e\u3059\u304c\u3001\u305d\u306e\u6642\u306713B\u306f30GB\u304c\u5fc5\u8981\u3060\u3063\u305f\u898b\u305f\u3044\u306a\u306e\u3067\u3001\u91cf\u5b50\u5316\u30674\u5206\u306e\u4e00\u7a0b\u5ea6\u306b\u5fc5\u8981\u30ea\u30bd\u30fc\u30b9\u304c\u6e1b\u3063\u3066\u307e\u3059\u306d\u300264GB\u7a0b\u5ea6\u30e1\u30e2\u30ea\u7a4d\u3093\u3060\u308970B\u3050\u3089\u3044\u306e\u30e2\u30c7\u30eb\u304c\u52d5\u304d\u305d\u3046\u306a\u96f0\u56f2\u6c17\u304c\u3042\u308a\u307e\u3059\u3002<\/p>\n","protected":false},"excerpt":{"rendered":"<p>Ryzen APU LLM Linux CentOS ChatGPT <\/p>\n","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"footnotes":""},"categories":[21,19,25],"tags":[],"class_list":["post-680","post","type-post","status-publish","format-standard","hentry","category-deskmini","category-linux","category-programming"],"_links":{"self":[{"href":"https:\/\/blog.gurees.net\/index.php?rest_route=\/wp\/v2\/posts\/680","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/blog.gurees.net\/index.php?rest_route=\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/blog.gurees.net\/index.php?rest_route=\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/blog.gurees.net\/index.php?rest_route=\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/blog.gurees.net\/index.php?rest_route=%2Fwp%2Fv2%2Fcomments&post=680"}],"version-history":[{"count":1,"href":"https:\/\/blog.gurees.net\/index.php?rest_route=\/wp\/v2\/posts\/680\/revisions"}],"predecessor-version":[{"id":681,"href":"https:\/\/blog.gurees.net\/index.php?rest_route=\/wp\/v2\/posts\/680\/revisions\/681"}],"wp:attachment":[{"href":"https:\/\/blog.gurees.net\/index.php?rest_route=%2Fwp%2Fv2%2Fmedia&parent=680"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/blog.gurees.net\/index.php?rest_route=%2Fwp%2Fv2%2Fcategories&post=680"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/blog.gurees.net\/index.php?rest_route=%2Fwp%2Fv2%2Ftags&post=680"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}