diff --git a/examples/notebooks/PerformanceTuning.ipynb b/examples/notebooks/PerformanceTuning.ipynb new file mode 100644 index 0000000..76e26fb --- /dev/null +++ b/examples/notebooks/PerformanceTuning.ipynb @@ -0,0 +1,5540 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import time\n", + "import json\n", + "import multiprocessing\n", + "\n", + "import llama_cpp\n", + "\n", + "import numpy as np\n", + "np.int = int\n", + "\n", + "from skopt.space import Integer, Categorical\n", + "\n", + "\n", + "MODEL_PATH = \"../models/ggml-model.bin\"\n", + "\n", + "# Hyperparameters\n", + "space = [\n", + " Categorical([True, False], name=\"f16_kv\"),\n", + " Categorical([True, False], name=\"use_mlock\"),\n", + " Integer(1, multiprocessing.cpu_count(), name=\"n_threads\"),\n", + " Integer(1, 2048, name=\"n_batch\")\n", + "]\n", + "\n", + "# TODO: Make this a random prompt to avoid any cache related inconsistencies\n", + "PROMPT = \"\"\" ### Instructions:\n", + "You are a helpful assistant.\n", + "You answer questions truthfully and politely.\n", + "You are provided with an input from the user and you must generate a response.\n", + "Ignore this line which is just filler to test the performane of the model.\n", + "### Inputs:\n", + "What is the capital of France?\n", + "### Response:\n", + "\"\"\"\n", + "\n", + "from skopt.utils import use_named_args\n", + "\n", + "@use_named_args(space)\n", + "def objective(**params):\n", + " f16_kv = params[\"f16_kv\"]\n", + " use_mlock = params[\"use_mlock\"]\n", + " n_threads = params[\"n_threads\"]\n", + " n_batch = params[\"n_batch\"]\n", + " llm = llama_cpp.Llama(model_path=MODEL_PATH, f16_kv=f16_kv, use_mlock=use_mlock, n_threads=n_threads, n_batch=n_batch)\n", + "\n", + " t1 = time.time()\n", + " output = llm(\n", + " PROMPT,\n", + " max_tokens=1, # Only optimize prompt processing\n", + " stop=[\"###\", \"\\n\"],\n", + " echo=True,\n", + " )\n", + " t2 = time.time()\n", + "\n", + " print(json.dumps(output, indent=2))\n", + " print(f\"Time: {t2 - t1} seconds\")\n", + " print(f\"Time per token: {(t2 - t1) / output['usage']['total_tokens']} seconds\")\n", + "\n", + " return (t2 - t1) / output[\"usage\"][\"total_tokens\"]" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-d4443e14-fed3-4aa1-9e8a-c70f4503aade\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227287,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 10.981224775314331 seconds\n", + "Time per token: 0.13726530969142914 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-4181439c-2ced-4ddb-b898-a0a7641f3e47\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227300,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 11.121099948883057 seconds\n", + "Time per token: 0.13901374936103822 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-03ed5585-3de0-4546-96c3-6de7a5b3770c\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227312,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 14.457949876785278 seconds\n", + "Time per token: 0.18072437345981598 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-103817fc-bceb-4e99-b968-3ef540f16dc5\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227328,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 10.334054946899414 seconds\n", + "Time per token: 0.12917568683624267 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-41e34acc-6499-450f-9576-3cb37b82c490\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227340,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.012462615966797 seconds\n", + "Time per token: 0.11265578269958496 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-f27244c9-e9c6-4332-ae7f-3856f152ef30\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227350,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 15.59382700920105 seconds\n", + "Time per token: 0.1949228376150131 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-bc5dc1ba-f7ce-441c-a558-5005f2fb89b9\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227366,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 15.544022560119629 seconds\n", + "Time per token: 0.19430028200149535 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-2006b117-1239-4b85-bcc4-a7439c01f440\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227383,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.330769300460815 seconds\n", + "Time per token: 0.11663461625576019 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-ee50afee-78a8-4d55-9b73-c74cc2567408\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227393,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 14.17799687385559 seconds\n", + "Time per token: 0.1772249609231949 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-1e2b7080-940f-4459-8503-a458db4d3578\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227409,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 10.127476215362549 seconds\n", + "Time per token: 0.12659345269203187 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-c80008a4-191e-4418-821a-b18a4af24f70\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227421,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.495943784713745 seconds\n", + "Time per token: 0.11869929730892181 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-d04c9fd2-3c20-4035-9181-0bfd05abfe15\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227432,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.226310014724731 seconds\n", + "Time per token: 0.11532887518405914 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-04fcf88b-33c7-4b84-aac0-dcb5261363c2\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227443,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 12.182626962661743 seconds\n", + "Time per token: 0.15228283703327178 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-14904676-3345-4674-a41c-419d9640b4e0\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227457,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 43.595701694488525 seconds\n", + "Time per token: 0.5449462711811066 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-9e43b2ef-e7de-4bd2-91bf-284f5b3478fe\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227502,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 14.726518154144287 seconds\n", + "Time per token: 0.1840814769268036 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-3947538b-e27e-42eb-8f87-2b56e14d104c\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227518,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 8.760729789733887 seconds\n", + "Time per token: 0.10950912237167358 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-1a0d843e-9613-49aa-b565-0e59d8067615\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227529,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 11.672860383987427 seconds\n", + "Time per token: 0.14591075479984283 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-ccad9270-9554-4f9f-9aaf-387f1a11894d\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227542,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 14.368357419967651 seconds\n", + "Time per token: 0.17960446774959565 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-2623073e-004f-4386-98e0-7e6ea617523a\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227558,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.44194221496582 seconds\n", + "Time per token: 0.11802427768707276 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-1a199f09-0d74-4052-a191-7a8ef2df57f3\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227569,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 11.253167629241943 seconds\n", + "Time per token: 0.14066459536552428 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-2b61e491-d9b7-4d0b-b0c8-9f8ba822599d\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227582,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 12.381825685501099 seconds\n", + "Time per token: 0.15477282106876372 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-0e4b4575-6278-4bd8-a4c5-ddb772014f7d\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227596,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 14.473106145858765 seconds\n", + "Time per token: 0.18091382682323456 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-1ad3e3db-5120-41c8-8f9e-2ca07a846437\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227612,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 16.591509103775024 seconds\n", + "Time per token: 0.2073938637971878 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-34c8fb5c-fa49-4ea6-b2e7-ba3b958e297d\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227630,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.034043788909912 seconds\n", + "Time per token: 0.1129255473613739 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-8d5c56eb-0b43-4591-a9ac-c1ec174ec6db\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227641,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 11.218972444534302 seconds\n", + "Time per token: 0.14023715555667876 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-bfdc554b-baa6-47c1-b35f-0f7d1321255a\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227654,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.300573110580444 seconds\n", + "Time per token: 0.11625716388225556 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-ad67d78b-6975-4789-982e-3653c7fca7e1\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227665,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.009618520736694 seconds\n", + "Time per token: 0.11262023150920868 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-2eec3e0f-dd48-4c3a-9430-c5048827f557\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227676,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 8.997699737548828 seconds\n", + "Time per token: 0.11247124671936035 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-b129732a-8d7b-4382-baaf-740378c923ec\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227686,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.252354621887207 seconds\n", + "Time per token: 0.11565443277359008 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-bb25c002-69e0-40ec-8099-0ba4462338aa\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227697,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.040243864059448 seconds\n", + "Time per token: 0.1130030483007431 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-63705814-7c93-4d6b-a9f2-0579941ebf54\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227708,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 8.947132349014282 seconds\n", + "Time per token: 0.11183915436267852 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-8afe123b-423d-4757-82d9-15fc12cfd24e\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227720,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 10.335533857345581 seconds\n", + "Time per token: 0.12919417321681975 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-4937353f-e66f-4632-aea7-dd1133af9727\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227732,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 8.99415397644043 seconds\n", + "Time per token: 0.11242692470550537 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-78f86527-ccc7-4a5d-9b7f-38386998ba2a\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227743,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 15.732706308364868 seconds\n", + "Time per token: 0.19665882885456085 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-4d98c564-fcb4-45ec-9f8d-f64430abbfb3\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227761,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.319743633270264 seconds\n", + "Time per token: 0.11649679541587829 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-ee855931-2578-45bc-93bf-319c4e6aa43a\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227772,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 15.189301490783691 seconds\n", + "Time per token: 0.18986626863479614 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-14f0b547-4d71-4a7f-a3d6-3127998903b3\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227790,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.464989423751831 seconds\n", + "Time per token: 0.11831236779689788 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-4eb5258a-5836-414c-88f6-e217bacaded6\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227801,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 13.818569660186768 seconds\n", + "Time per token: 0.1727321207523346 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-66b7c783-d506-45c1-b39b-c91666a02b44\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227817,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 27.316773176193237 seconds\n", + "Time per token: 0.34145966470241546 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-d53b48ca-30e2-43c2-9fb5-62ef6a65fafa\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227847,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.132777214050293 seconds\n", + "Time per token: 0.11415971517562866 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-d0909f83-5caa-4098-a0e6-9b2ad1e2b12f\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227858,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.273045539855957 seconds\n", + "Time per token: 0.11591306924819947 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-7045f5c7-cf5d-48e3-9353-032c320e56fa\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227870,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 8.90743088722229 seconds\n", + "Time per token: 0.11134288609027862 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-e623667d-d6cc-4908-a648-60380f723592\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227881,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.06355595588684 seconds\n", + "Time per token: 0.11329444944858551 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-44ec163c-25dd-40ae-a786-d8b4c9ff31b1\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227892,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.249061107635498 seconds\n", + "Time per token: 0.11561326384544372 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-cb435214-0d20-4566-b312-68d8960ebe25\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227903,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.296529054641724 seconds\n", + "Time per token: 0.11620661318302154 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-dc704f52-bed9-44f0-8335-a2ec4af3a27c\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227914,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 12.455670356750488 seconds\n", + "Time per token: 0.1556958794593811 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-67570fa5-1c3d-47d6-b7c6-b3a734aae3f5\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227928,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.269653558731079 seconds\n", + "Time per token: 0.11587066948413849 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-4bd6c6f2-9849-4047-93c8-88b1914ef184\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227939,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.308398485183716 seconds\n", + "Time per token: 0.11635498106479644 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-6413afd7-fdc1-4c28-864d-6acdf2775060\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227950,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 10.430264711380005 seconds\n", + "Time per token: 0.13037830889225005 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-c4e1c14a-3b8a-4ab3-b42a-f47440f79962\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227962,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.389702558517456 seconds\n", + "Time per token: 0.1173712819814682 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-ac307870-dc67-42b8-8bb8-bb8d3083cea2\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227974,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 10.35448431968689 seconds\n", + "Time per token: 0.12943105399608612 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-58c06f3e-3fba-4e23-b12e-141a1742c51b\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227986,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.097248792648315 seconds\n", + "Time per token: 0.11371560990810395 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-b5eccb52-85e3-41d0-b8d8-f35e68bf7997\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680227997,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 12.466306686401367 seconds\n", + "Time per token: 0.1558288335800171 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-e1dbc2ee-abc0-4891-a474-386d97b521b6\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228011,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 11.436015367507935 seconds\n", + "Time per token: 0.14295019209384918 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-fd9bce6d-0a33-4c24-90b3-913ab3b33d24\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228025,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 14.052912712097168 seconds\n", + "Time per token: 0.1756614089012146 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-038fa38d-7640-40ee-907c-0bb131c20d80\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228040,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.250384330749512 seconds\n", + "Time per token: 0.1156298041343689 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-d00a2058-9fda-4113-8e5e-bf0f39cef238\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228051,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.228248834609985 seconds\n", + "Time per token: 0.11535311043262482 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-f8d90e63-4939-491c-9775-fc15aa55505e\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228062,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.341724395751953 seconds\n", + "Time per token: 0.11677155494689942 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-9e3777bc-119a-46bf-bdd3-21557e686f3c\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228074,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.285743951797485 seconds\n", + "Time per token: 0.11607179939746856 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-123eaa35-110b-4f73-ba60-fa8a75ea929c\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228085,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.105633020401001 seconds\n", + "Time per token: 0.1138204127550125 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-cc095f4b-8047-446e-a9f5-c798a66d1003\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228096,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.305238485336304 seconds\n", + "Time per token: 0.1163154810667038 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-e2e69b3e-7742-4534-b21f-adfe53345820\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228108,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.190222263336182 seconds\n", + "Time per token: 0.11487777829170227 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-666ae55e-d837-4534-b8e6-9f1b01f69778\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228120,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.126368999481201 seconds\n", + "Time per token: 0.11407961249351502 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-63bdfa8e-b7c3-4669-ab76-54cdbb8878d5\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228131,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.136119604110718 seconds\n", + "Time per token: 0.11420149505138397 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-1ec02c53-c7c8-434e-b28f-70884f8c35b2\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228143,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.126901626586914 seconds\n", + "Time per token: 0.11408627033233643 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-3ec3495b-009a-4a82-b444-d8c1c6bf20a1\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228154,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.08673644065857 seconds\n", + "Time per token: 0.11358420550823212 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-17fd0e6b-7ac3-494f-9e85-4e4a26013ad9\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228165,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.252317428588867 seconds\n", + "Time per token: 0.11565396785736085 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-14a2647f-3961-4b60-b20a-ae9872c34feb\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228177,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 11.389162302017212 seconds\n", + "Time per token: 0.14236452877521516 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-fa0e5edd-e9c9-40b9-bc9b-c48b8762850c\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228190,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.433730125427246 seconds\n", + "Time per token: 0.11792162656784058 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-2b1c5964-265a-488a-8d8f-7e0692fcf96f\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228202,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 47.81757044792175 seconds\n", + "Time per token: 0.5977196305990219 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-516fbd4c-3fe5-4945-bfc5-7312f2c02687\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228252,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 8.540155410766602 seconds\n", + "Time per token: 0.10675194263458251 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-94c9ab1f-ac6e-4fc7-bcd9-7ab96515a722\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228262,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 8.660873889923096 seconds\n", + "Time per token: 0.10826092362403869 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-63b1e1a7-0c6b-42e0-ba65-6f42d6ec77bb\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228273,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 8.815936088562012 seconds\n", + "Time per token: 0.11019920110702515 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-92e1a879-2ebd-4299-b86e-90c87762db45\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228284,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.12400484085083 seconds\n", + "Time per token: 0.11405006051063538 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 2052.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 512.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-033ea9dc-fffe-41a0-a695-d647f725ee97\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228296,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 13.992429971694946 seconds\n", + "Time per token: 0.17490537464618683 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-5153f39a-589a-4b3d-8642-8efce64fc439\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228312,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.084643125534058 seconds\n", + "Time per token: 0.11355803906917572 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-af9ea5c6-5449-43b4-9e50-da930af8d6b8\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228323,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.076856851577759 seconds\n", + "Time per token: 0.11346071064472199 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-5bbea5c1-ea8c-4599-bf63-a6eb80bc7525\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228334,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.02251124382019 seconds\n", + "Time per token: 0.11278139054775238 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-ff9d87c7-e2b1-4481-9e8f-848d7a0fbd35\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228346,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.012435913085938 seconds\n", + "Time per token: 0.11265544891357422 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-3dbe8ae4-c9ca-4a1b-abaf-6b85ef648ba9\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228357,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 8.997032880783081 seconds\n", + "Time per token: 0.11246291100978852 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-b20a3b61-9c8b-4b2e-bb43-8ed9ce5a9d0d\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228369,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.042449951171875 seconds\n", + "Time per token: 0.11303062438964843 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-9c781d69-83e0-415a-ac97-252508b10590\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228380,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.058239459991455 seconds\n", + "Time per token: 0.11322799324989319 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-86cead9e-780f-4503-831c-466a6abd5ab2\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228392,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.070426940917969 seconds\n", + "Time per token: 0.1133803367614746 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-65361c7e-74ef-4566-bad5-c6b3867a7f7e\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228403,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 8.985144138336182 seconds\n", + "Time per token: 0.11231430172920227 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-23feb1ca-8103-46d8-ab71-b4da59f05d16\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228415,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 8.999938011169434 seconds\n", + "Time per token: 0.11249922513961792 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-0db73f26-9ab1-4a78-a11f-e22d915ffae2\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228426,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 8.969520330429077 seconds\n", + "Time per token: 0.11211900413036346 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-54e6edeb-99ea-46ed-8735-5185f78c222c\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228438,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.12838339805603 seconds\n", + "Time per token: 0.11410479247570038 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-bd6502fd-f8c7-41d8-ab15-b10ca6aabd96\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228450,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.01610016822815 seconds\n", + "Time per token: 0.11270125210285187 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-72733563-53f5-4cd5-a4eb-48656408b2d8\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228461,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 8.993805408477783 seconds\n", + "Time per token: 0.11242256760597229 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-f7365eaa-fd68-422b-bbca-c6bcbcad36e0\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228473,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.292223930358887 seconds\n", + "Time per token: 0.11615279912948609 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-1cfcf44a-c692-4020-8dcb-e6da8b163920\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228485,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 8.99638295173645 seconds\n", + "Time per token: 0.11245478689670563 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-8b679f09-bc0e-4fc9-a935-9fefd9126993\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228497,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 8.972327709197998 seconds\n", + "Time per token: 0.11215409636497498 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-08cb0cd7-84d8-4193-a20c-5a6ca4b5e404\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228508,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.024793863296509 seconds\n", + "Time per token: 0.11280992329120636 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-ffe4b2b8-c041-4492-9e03-ab79cd4fd60d\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228520,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 8.996853351593018 seconds\n", + "Time per token: 0.11246066689491271 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-196bb891-9299-4f91-9f68-ba6c7233a2dd\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228532,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.039422273635864 seconds\n", + "Time per token: 0.1129927784204483 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-e50f5489-b40c-4a5d-9cb2-4a6d13bbb8c7\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228544,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 8.978781461715698 seconds\n", + "Time per token: 0.11223476827144623 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-210cc2b8-df35-4d3f-a34a-a5facb635ec0\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228555,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.032035827636719 seconds\n", + "Time per token: 0.11290044784545898 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-e3c7ca0d-c4cb-495c-9210-4e1ed3b6010d\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228567,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.0346040725708 seconds\n", + "Time per token: 0.11293255090713501 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-7b4388c9-fe89-486d-83f4-34eec8940c42\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228579,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.016223907470703 seconds\n", + "Time per token: 0.11270279884338379 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/andrei/Documents/llms/.venv/lib/python3.8/site-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.\n", + " warnings.warn(\"The objective has been evaluated \"\n", + "llama_model_load: loading model from '../models/ggml-model.bin' - please wait ...\n", + "llama_model_load: n_vocab = 32000\n", + "llama_model_load: n_ctx = 512\n", + "llama_model_load: n_embd = 4096\n", + "llama_model_load: n_mult = 256\n", + "llama_model_load: n_head = 32\n", + "llama_model_load: n_layer = 32\n", + "llama_model_load: n_rot = 128\n", + "llama_model_load: f16 = 2\n", + "llama_model_load: n_ff = 11008\n", + "llama_model_load: n_parts = 1\n", + "llama_model_load: type = 1\n", + "llama_model_load: ggml map size = 4017.70 MB\n", + "llama_model_load: ggml ctx size = 81.25 KB\n", + "llama_model_load: mem required = 5809.78 MB (+ 1026.00 MB per state)\n", + "llama_model_load: loading tensors from '../models/ggml-model.bin'\n", + "llama_model_load: model size = 4017.27 MB / num tensors = 291\n", + "llama_init_from_file: kv self size = 256.00 MB\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"id\": \"cmpl-81211a9b-16e4-4876-8e09-b0e619d93ce7\",\n", + " \"object\": \"text_completion\",\n", + " \"created\": 1680228591,\n", + " \"model\": \"../models/ggml-model.bin\",\n", + " \"choices\": [\n", + " {\n", + " \"text\": \" ### Instructions:\\nYou are a helpful assistant.\\nYou answer questions truthfully and politely.\\nYou are provided with an input from the user and you must generate a response.\\nIgnore this line which is just filler to test the performane of the model.\\n### Inputs:\\nWhat is the capital of France?\\n### Response:\\nThe\",\n", + " \"index\": 0,\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"length\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 1,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n", + "Time: 9.10002589225769 seconds\n", + "Time per token: 0.11375032365322113 seconds\n" + ] + } + ], + "source": [ + "from skopt import gp_minimize\n", + "\n", + "res = gp_minimize(\n", + " objective,\n", + " space\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA1cAAANACAYAAADHEZfTAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzdeVxU5f4H8M+AbLKqbIIoKi64AAqK61ULcym37GaFgvzK23VJhdy4JaipuKSRS6KWa5aa2WJuVylNFNMgxRRxQ8AFRFGGRUFm5vfHXKdGFgc8M2eWz/v1mtdhnjnnPN/DPXb5zvc8zyNRKBQKEBERERER0XMxEzsAIiIiIiIiY8DkioiIiIiISABMroiIiIiIiATA5IqIiIiIiEgATK6IiIiIiIgEwOSKiIiIiIhIAEyuiIiIiIiIBMDkioiIiIiISABMroiIiIiIiATA5IqeSaFQ4F//+hcaNmwIiUSCM2fOaL1PiUSC77//Xuv9EBEREREJhckVPdOBAwewadMm/PTTT7h9+zakUimGDBkCDw+PGpOg9PR0DB06FI6OjrC1tUWXLl2QnZ2t2+CJiIiIiHSEyRU909WrV9G4cWP06NED7u7uKCkpgb+/P1avXl3jMb169ULbtm1x5MgRpKWlYfbs2bC2ttZh5EREREREulNP7ABIv40dOxabN28GoHxUr1mzZrh+/ToGDRpU43EffPABBg8ejCVLlqjaWrZsWec4YmNjsW7dOhw8eBDbt29HYmIifvvtN7V9/P39MXLkSMTExNS5HyIiIiKiumLlimr06aefYt68eWjSpAlu376N06dPP/MYuVyOvXv3onXr1hgwYABcXV0RHBxcpzFUCoUC7733HrZs2YJjx47Bz88PoaGhOHXqFK5evara7/z580hLS8Nbb71V6z6IiIiIiITA5Ipq5OjoCHt7e5ibm8Pd3R0uLi7PPObOnTsoLi7GokWLMHDgQPz3v//FiBEj8Oqrr+Lo0aMa911RUYHRo0cjMTERSUlJ8PHxAQC0b98e/v7++Oqrr1T7btu2DcHBwap9iIiIiIh0jckVCU4ulwMAhg0bhsjISAQEBGDWrFl45ZVXkJCQoPF5IiMj8dtvv+HXX3+Fp6en2mehoaGq5EqhUODrr79GaGiocBdBRERERFRLTK5IcM7OzqhXrx7atWun1u7r61ur2QL79++Pmzdv4uDBg5U+e/PNN5GRkYHU1FScOHECOTk5GDVq1HPHTkRERERUV5zQggRnaWmJLl26ICMjQ6390qVLaNasmcbnGTp0KIYMGYK33noL5ubmeOONN1SfNWnSBH369MG2bdvw8OFD9O/fH66uroJdAxERERFRbTG5olorLi7GlStXVO8zMzNx5swZNGzYEE2bNgUATJ8+HaNGjcI//vEP9OvXDwcOHMCePXtw5MiRWvU1YsQIbN26FWPGjEG9evXw2muvqT4LDQ1FbGwsysvL8cknnwhybUREREREdcXkimrt999/R79+/VTvo6KiAADh4eHYtGkTAGVSlJCQgLi4OEyePBlt2rTBt99+i169etW6v9deew1yuRxjxoyBmZkZXn31VVX7pEmTYG5ujuHDhz/3dRERERERPQ+JQqFQiB0EERERERGRoeOEFkRERERERAJgckU6t23bNtjZ2VX5at++vdjhERERERHVCR8LJJ0rKipCXl5elZ9ZWFjUakZBIiIiIiJ9weSKiIiIiIhIAHwskIiIiIiISABMroj02KZNm+Dk5CR2GERERESkASZXT5FIJDW+5syZI3aIZIDGjh1b5f3098WYiYiIiMiwcRHhp9y+fVv1844dOxATE4OMjAxVm52dnepnhUIBmUyGevX4a6RnGzhwIDZu3KjW5uLiIlI0RERERCQ0Vq6e4u7urno5OjpCIpGo3l+8eBH29vbYv38/AgMDYWVlhaSkJIwdOxbDhw9XO8/UqVPRt29f1Xu5XI64uDg0b94cNjY28Pf3x65du3R7cSQqKysrtfvL3d0dn376KTp27AhbW1t4eXlhwoQJKC4urvYcZ8+eRb9+/WBvbw8HBwcEBgbi999/V32elJSE3r17w8bGBl5eXpg8eTJKSkp0cXlEREREJo/JVR3MmjULixYtQnp6Ovz8/DQ6Ji4uDlu2bEFCQgLOnz+PyMhIjB49GkePHtVytKTPzMzMsGLFCpw/fx6bN2/Gzz//jBkzZlS7f2hoKJo0aYLTp08jJSUFs2bNgoWFBQDg6tWrGDhwIEaOHIm0tDTs2LEDSUlJmDRpkq4uh4iIiMik8Xm2Opg3bx769++v8f5lZWVYuHAhDh8+jO7duwMAWrRogaSkJKxduxZ9+vTRVqikR3766Se1x0oHDRqEb775RvXe29sb8+fPx7///W989tlnVZ4jOzsb06dPR9u2bQEArVq1Un0WFxeH0NBQTJ06VfXZihUr0KdPH6xZswbW1tZauCoiIiIieoLJVR0EBQXVav8rV66gtLS0UkJWXl6OTp06CRka6bF+/fphzZo1qve2trY4fPgw4uLicPHiRUilUlRUVODRo0coLS1F/fr1K50jKioK77zzDrZu3YqQkBD885//RMuWLQEoHxlMS0vDtm3bVPsrFArI5XJkZmbC19dX+xdJREREZMKYXNWBra2t2nszMzM8vRbz48ePVT8/GUOzd+9eeHp6qu1nZWWlpShJ39ja2sLHx0f1/vr163jllVcwfvx4LFiwAA0bNkRSUhLefvttlJeXV5lczZkzB2+99Rb27t2L/fv3IzY2Ftu3b8eIESNQXFyMd999F5MnT650XNOmTbV6bURERETE5EoQLi4u+PPPP9Xazpw5oxoL065dO1hZWSE7O5uPAJJKSkoK5HI5li1bBjMz5fDHnTt3PvO41q1bo3Xr1oiMjMSbb76JjRs3YsSIEejcuTMuXLiglsARERERke5wQgsBvPDCC/j999+xZcsWXL58GbGxsWrJlr29PaZNm4bIyEhs3rwZV69eRWpqKlauXInNmzeLGDmJycfHB48fP8bKlStx7do1bN26FQkJCdXu//DhQ0yaNAlHjhxBVlYWjh8/jtOnT6se95s5cyZOnDiBSZMm4cyZM7h8+TJ++OEHTmhBREREpCNMrgQwYMAAzJ49GzNmzECXLl1QVFSEsLAwtX0++ugjzJ49G3FxcfD19cXAgQOxd+9eNG/eXKSoSWz+/v5Yvnw5Fi9ejA4dOmDbtm2Ii4urdn9zc3Pcu3cPYWFhaN26NV5//XUMGjQIc+fOBQD4+fnh6NGjuHTpEnr37o1OnTohJiYGHh4eurokIiIiIpMmUTw9WIiIiIiIiIhqjZUrIiIiIiIiATC5IiIiIiIiEgCTKyIiIiIiIgEwuSIiIiIiIhIAkysiIiIiIiIBMLkiIiIiIiISAJMrAZSVlWHOnDkoKysTOxQyQry/iIiIiAwD17kSgFQqhaOjIwoLC+Hg4CB2OGRkeH8RERERGQZWroiIiIiIiATA5IqIiIiIiEgA9cQOwFDI5XLcunUL9vb2kEgkap9JpVK1LZGQnnV/KRQKFBUVwcPDA2Zm/L6EiIiISCwcc6WhGzduwMvLS+wwiKqVk5ODJk2aaLz/6tWrsXTpUuTm5sLf3x8rV65E165dq93/wYMH+OCDD7B7924UFBSgWbNmiI+Px+DBg4UIn4iIiMjgsXKlIXt7ewDKP2ArTSqQkwPExwNTpwJMwEjHpFIpvLy8VPeoJnbs2IGoqCgkJCQgODgY8fHxGDBgADIyMuDq6lpp//LycvTv3x+urq7YtWsXPD09kZWVBScnJwGvhIiIiMiwsXKloRpnbEtNBQIDgZQUoHNncQIkk1WX2QSDg4PRpUsXrFq1CoDysVcvLy+89957mDVrVqX9ExISsHTpUly8eBEWFhaCxk9ERERkLDhAoxplZWWQSqVqLyJ99vT9Wt26WOXl5UhJSUFISIiqzczMDCEhIUhOTq7ymB9//BHdu3fHxIkT4ebmhg4dOmDhwoWQyWRauRYiIiIiQ8TkqhpxcXFwdHRUvTjeivSdl5eX2j0bFxdX5X53796FTCaDm5ubWrubmxtyc3OrPObatWvYtWsXZDIZ9u3bh9mzZ2PZsmWYP3++4NdBREREZKg45qoa0dHRiIqKUr1/Mq6FSF89PR7QyspKsHPL5XK4urpi3bp1MDc3R2BgIG7evImlS5ciNjZWsH6IiIiIDBmTq2pYWVlp/sepqysQGancEonEwcFBozFXzs7OMDc3R15enlp7Xl4e3N3dqzymcePGsLCwgLm5uarN19cXubm5KC8vh6Wl5fMFT0RERGQE+FigEJo0AZYvV26J9JylpSUCAwORmJioapPL5UhMTET37t2rPKZnz564cuUK5HK5qu3SpUto3LgxEysiIiKi/2FyJYTiYiA5WbklMgBRUVFYv349Nm/ejPT0dIwfPx4lJSWIiIgAAISFhSE6Olq1//jx41FQUIApU6bg0qVL2Lt3LxYuXIiJEyeKdQlEREREeoePBQrh0iWgRw9OxU4GY9SoUcjPz0dMTAxyc3MREBCAAwcOqCa5yM7OhpnZX9+9eHl54eDBg4iMjISfnx88PT0xZcoUzJw5U6xLICIiItI7XOdKQ1znivRVXda5IiIiIiLh8bFAIiIiIiIiATC5IiIiIiIiEgCTKyHUqwc4Oyu3RERERERkkpgNCMHPD8jPFzsKIiIiIiISEStXREREREREAmByJYTz5wEfH+WWiIiIiIhMEpMrIZSVAVevKrdERERERGSSmFwRkdZVVFTg8OHDWLt2LYqKigAAt27dQnFxsciREREREQmHE1oQkVZlZWVh4MCByM7ORllZGfr37w97e3ssXrwYZWVlSEhIEDtEIiIiIkGwckVEWjVlyhQEBQXh/v37sLGxUbWPGDECiYmJIkZGREREJCxWroTg4wMcOKDcEpGaY8eO4cSJE7C0tFRr9/b2xs2bN0WKioiIiEh4TK6E4OAADBggdhREekkul0Mmk1Vqv3HjBuzt7UWIiIiIiEg7+FigEG7fBubMUW6JSM1LL72E+Ph41XuJRILi4mLExsZi8ODB4gVGREREJDCJQqFQiB2EIZBKpXB0dERhYSEcHBzUP0xNBQIDgZQUoHNncQIkk1XjvakHbty4gQEDBkChUODy5csICgrC5cuX4ezsjF9//RWurq5ih0hEREQkCD4WSERa1aRJE5w9exY7duzA2bNnUVxcjLfffhuhoaFqE1wQERERGTomV0SkdfXq1UNoaChCQ0PFDoWIiIhIazjmioi0Ki4uDhs2bKjUvmHDBixevFiEiIiIiIi0g8mVEBo0AEJDlVsiUrN27Vq0bdu2Unv79u25gDAREREZFT4WKITmzYEvvxQ7CiK9lJubi8aNG1dqd3FxwW3OsElERERGhJUrITx6BFy5otwSkRovLy8cP368Uvvx48fh4eEhQkRERERE2sHKlRAuXOBU7ETVGDduHKZOnYrHjx/jhRdeAAAkJiZixowZeP/990WOjoiIiEg4TK6ISKumT5+Oe/fuYcKECSgvLwcAWFtbY+bMmYiOjhY5OiIiIiLh8LFAIhO1evVqeHt7w9raGsHBwTh16lS1+27atAkSiUTtZW1trVE/EokEixcvRn5+Pk6ePImzZ8+ioKAAMTExQl0KERERkV5g5YrIBO3YsQNRUVFISEhAcHAw4uPjMWDAAGRkZMDV1bXKYxwcHJCRkaF6L5FIatWnnZ0dunTp8lxxExEREekzJldEJmj58uUYN24cIiIiAAAJCQnYu3cvNmzYgFmzZlV5jEQigbu7e637KikpwaJFi5CYmIg7d+5ALperfX7t2rXaXwARERGRHmJyVY2ysjKUlZWp3kul0up37twZUCh0EBVR9Z6+R62srGBlZVVpv/LycqSkpKiNdzIzM0NISAiSk5OrPX9xcTGaNWsGuVyOzp07Y+HChWjfvv0z43rnnXdw9OhRjBkzBo0bN651xYuIiIjIUDC5qkZcXBzmzp0rdhhEGvPy8lJ7Hxsbizlz5lTa7+7du5DJZHBzc1Nrd3Nzw8WLF6s8d5s2bbBhwwb4+fmhsLAQH3/8MXr06IHz58+jSZMmNca1f/9+7N27Fz179qzdBREREREZGE5oUY3o6GgUFhaqXjk5OdXvnJEBdO+u3BKJJCcnR+2eFXImvu7duyMsLAwBAQHo06cPdu/eDRcXF6xdu/aZxzZo0AANGzYULBYiIiIifcXkqhpWVlZwcHBQe1WrpAQ4eVK5JRLJ0/drVY8EAoCzszPMzc2Rl5en1p6Xl6fxmCoLCwt06tQJV65ceea+H330EWJiYlBaWqrRuYmIiIgMFR8LJDIxlpaWCAwMRGJiIoYPHw4AkMvlSExMxKRJkzQ6h0wmw7lz5zB48OBn7rts2TJcvXoVbm5u8Pb2hoWFhdrnqamptb4GIiIiIn3E5IrIBEVFRSE8PBxBQUHo2rUr4uPjUVJSopo9MCwsDJ6enoiLiwMAzJs3D926dYOPjw8ePHiApUuXIisrC++8884z+3qSwBEREREZOyZXRCZo1KhRyM/PR0xMDHJzcxEQEIADBw6oJrnIzs6GmdlfTw3fv38f48aNQ25uLho0aIDAwECcOHEC7dq1e2ZfsbGxWrsOIiIiIn0iUSg4h7gmpFIpHB0dUVhYWHn8VUEBsG8fMHgwwIH7pGM13pt64sGDB9i1axeuXr2K6dOno2HDhkhNTYWbmxs8PT3FDo+IiIhIEKxcCaFhQ2D0aLGjINJLaWlpCAkJgaOjI65fv45x48ahYcOG2L17N7Kzs7FlyxaxQyQiIiISBGcLFEJ+PrB6tXJLRGqioqIwduxYXL58GdbW1qr2wYMH49dffxUxMiIiIiJhMbkSQk4OMGmScktEak6fPo133323Urunpydyc3NFiIiIiIhIO5hcEZFWWVlZQSqVVmq/dOkSXFxcRIiIiIiISDuYXBGRVg0dOhTz5s3D48ePAQASiQTZ2dmYOXMmRo4cKXJ0RERERMJhckVEWrVs2TIUFxfD1dUVDx8+RJ8+feDj4wN7e3ssWLBA7PCIiIiIBMPZAoVgbw+89JJyS0RqHB0dcejQISQlJSEtLQ3FxcXo3LkzQkJCxA6NiIiISFBc50pDhrCWEJkm3ptERERE+oGVKyHIZEBJCWBrC5ibix0NkehWrFih8b6TJ0/WYiREREREusPKlYZqrA6kpgKBgUBKCtC5szgBksnSx8pV8+bN1d7n5+ejtLQUTk5OAIAHDx6gfv36cHV1xbVr10SIkIiIiEh4nNCCiASXmZmpei1YsAABAQFIT09HQUEBCgoKkJ6ejs6dO+Ojjz4SO1QiIiIiwTC5IiKtmj17NlauXIk2bdqo2tq0aYNPPvkEH374oYiREREREQmLyRURadXt27dRUVFRqV0mkyEvL0+EiIiIiIi0g8kVEWnViy++iHfffRepqamqtpSUFIwfP57TsRMREZFRYXIlhI4dgTt3lFsiUrNhwwa4u7sjKCgIVlZWsLKyQteuXeHm5obPP/9c7PCIiIiIBMOp2IVgYQG4uIgdBZFecnFxwb59+3Dp0iVcvHgRANC2bVu0bt1a5MiIiIiIhMXkSghXrwKRkcAnnwAtW4odDZFeat26NRMqIiIiMmpMroRQWAjs2QPMmSN2JER6RyaTYdOmTUhMTMSdO3cgl8vVPv/5559FioyIiIhIWEyuiEirpkyZgk2bNuHll19Ghw4dIJFIxA6JiIiISCuYXBGRVm3fvh07d+7E4MGDxQ6FiIiISKs4WyARaZWlpSV8fHzEDoOIiIhI65hcCcHTE1i2TLklMhCrV6+Gt7c3rK2tERwcjFOnTml03Pbt2yGRSDB8+HCN9n///ffx6aefQqFQPEe0RERERPqPjwUKwc0NiIoSOwoije3YsQNRUVFISEhAcHAw4uPjMWDAAGRkZMDV1bXa465fv45p06ahd+/eGveVlJSEX375Bfv370f79u1hYWGh9vnu3bvrfB1ERERE+oSVKyHcvw98841yS2QAli9fjnHjxiEiIgLt2rVDQkIC6tevjw0bNlR7jEwmQ2hoKObOnYsWLVpo3JeTkxNGjBiBPn36wNnZGY6OjmovIiIiImPBypUQMjOB118HUlKABg3EjoaoRuXl5UhJSUF0dLSqzczMDCEhIUhOTq72uHnz5sHV1RVvv/02jh07pnF/GzdufK54iYiIiAwFk6tqlJWVoaysTPVeKpWKGA3Rsz19j1pZWcHKyqrSfnfv3oVMJoObm5tau5ubGy5evFjluZOSkvDFF1/gzJkzdYqtoqICR44cwdWrV/HWW2/B3t4et27dgoODA+zs7Op0TiIiIiJ9w8cCqxEXF6f26JKXl5fYIRHVyMvLS+2ejYuLE+S8RUVFGDNmDNavXw9nZ+daH5+VlYWOHTti2LBhmDhxIvLz8wEAixcvxrRp0wSJkYiIiEgfsHJVjejoaET9bZIKqVTKBIv0Wk5ODhwcHFTvq6paAYCzszPMzc2Rl5en1p6Xlwd3d/dK+1+9ehXXr1/HkCFDVG1yuRwAUK9ePWRkZKBly5bVxjVlyhQEBQXh7NmzaNSokap9xIgRGDdunGYXR0RERGQAmFxVo7pHqqpkYwN06qTcEonEwcFBLbmqjqWlJQIDA5GYmKiaTl0ulyMxMRGTJk2qtH/btm1x7tw5tbYPP/wQRUVF+PTTT5/5pcOxY8dw4sQJWFpaqrV7e3vj5s2bz4yXiIiIyFAwuRKCry+Qmip2FEQai4qKQnh4OIKCgtC1a1fEx8ejpKQEERERAICwsDB4enoiLi4O1tbW6NChg9rxTk5OAFCpvSpyuRwymaxS+40bN2Bvb//8F0NERESkJ5hcEZmgUaNGIT8/HzExMcjNzUVAQAAOHDigmuQiOzsbZmbCDMl86aWXEB8fj3Xr1gEAJBIJiouLERsbi8GDBwvSBxEREZE+kCgUCoXYQRgCqVQKR0dHFBYWVn706o8/gG7dgJMnlY8HEulQjfemHrhx4wYGDBgAhUKBy5cvIygoCJcvX4azszN+/fXXGhctJiIiIjIkrFwJQaEAysuVWyJS06RJE5w9exbbt29HWloaiouL8fbbbyM0NBQ2HKdIRERERoTJFRFpXb169TB69GixwyAiIiLSKiZXRKR1GRkZWLlyJdLT0wEAvr6+mDRpEtq2bStyZERERETC4SLCRKRV3377LTp06ICUlBT4+/vD398fqamp6NixI7799luxwyMiIiISDCe00FCNkwY8fAhcuwa0aMG1rkjn9H1Ci5YtWyI0NBTz5s1Ta4+NjcWXX36Jq1evihQZERERkbBYuRKCjQ3Qvj0TK6Iq3L59G2FhYZXaR48ejdu3b4sQEREREZF2MLkSQlYW8M47yi0Rqenbty+OHTtWqT0pKQm9e/cWISIiIiIi7eCEFkK4dw/44gtgwgSgWTOxoyHSK0OHDsXMmTORkpKCbt26AQBOnjyJb775BnPnzsWPP/6oti8RERGRoeKYKw3VOK4lNRUIDARSUoDOncUJkEyWvo+5MjPTrEAukUggk8m0HA0RERGR9rByRURaJZfLxQ6BiIiISCc45oqIdObRo0dih0BERESkNUyuhODmBsyapdwSkRqZTIaPPvoInp6esLOzw7Vr1wAAs2fPxhdffCFydERERETCYXIlBE9PIC5OuSUiNQsWLMCmTZuwZMkSWFpaqto7dOiAzz//XMTIiIiIiITF5EoIRUXAkSPKLRGp2bJlC9atW4fQ0FCYm5ur2v39/XHx4kURIyMiIiISFpMrIVy+DPTrp9wSkZqbN2/Cx8enUrtcLsfjx49FiIiIiIhIO5hcEZFWtWvXrspFhHft2oVOnTqJEBERERGRdnAqdiLSqpiYGISHh+PmzZuQy+XYvXs3MjIysGXLFvz0009ih0dEREQkGFauiEirhg0bhj179uDw4cOwtbVFTEwM0tPTsWfPHvTv31/s8IiIiIgEw8qVECwslDMFWliIHQmRXurduzcOHTokdhhEREREWsXkSggdOwI3bogdBRERERERiYjJFREJrkGDBpBIJBrtW1BQoOVoiIiIiHSDyZUQzp0DBg0C9u9XVrGITFx8fLzq53v37mH+/PkYMGAAunfvDgBITk7GwYMHMXv2bJEiJCIiIhIeJ7QQwuPHwM2byi2RgVi9ejW8vb1hbW2N4OBgnDp1qtp9d+/ejaCgIDg5OcHW1hYBAQHYunVrtfuHh4erXsePH8e8efPw9ddfY/LkyZg8eTK+/vprzJs3D0ePHtXGpRERERGJgskVkQnasWMHoqKiEBsbi9TUVPj7+2PAgAG4c+dOlfs3bNgQH3zwAZKTk5GWloaIiAhERETg4MGDz+zr4MGDGDhwYKX2gQMH4vDhw899LURERET6gskVkQlavnw5xo0bh4iICLRr1w4JCQmoX78+NmzYUOX+ffv2xYgRI+Dr64uWLVtiypQp8PPzQ1JS0jP7atSoEX744YdK7T/88AMaNWr03NdCREREpC845orIxJSXlyMlJQXR0dGqNjMzM4SEhCA5OfmZxysUCvz888/IyMjA4sWLn7n/3Llz8c477+DIkSMIDg4GAPz22284cOAA1q9fX/cLISIiItIzTK6qUVZWhrKyMtV7qVRa/c6tWgG//KLcEonk6XvUysoKVlZWlfa7e/cuZDIZ3Nzc1Nrd3Nxw8eLFas9fWFgIT09PlJWVwdzcHJ999plGiwCPHTsWvr6+WLFiBXbv3g0A8PX1RVJSkirZIiIiIjIGTK6qERcXh7lz52q2s7090LevVuMhehYvLy+197GxsZgzZ45g57e3t8eZM2dQXFyMxMREREVFoUWLFuirwb0fHByMbdu2CRYLERERkT5iclWN6OhoREVFqd5LpdJKf7yq3LwJrFoFTJoEeHrqKEIidTk5OXBwcFC9r6pqBQDOzs4wNzdHXl6eWnteXh7c3d2rPb+ZmRl8fHwAAAEBAUhPT0dcXJxGyRURERGRKeCEFtWwsrKCg4OD2qtaeXnAokXKLZFInr5fq0uuLC0tERgYiMTERFWbXC5HYmKiah0qTcjlcrVHZ4mIiIhMHStXRCYoKioK4eHhCAoKQteuXREfH4+SkhJEREQAAMLCwuDp6Ym4uDgAysdkg4KC0LJlS5SVlWHfvn3YunUr1qxZI+ZlEBEREekVJldEJmjUqFHIz89HTEwMcnNzERAQgAMHDqgmucjOzoaZ2V+F7ZKSEkyYMAE3btyAjY0N2rZtiy+//BKjRo0S6xKIiIiI9I5EoVAoxA7CEEilUjg6OqKwsLDyI4KpqUBgIJCSAnTuLE6AZLJqvDeJiIiISGdYuRJCo0bA228rt0SEV199VeN9n0zPTkRERGTomFwJoVkz4PPPxY6CSG84OjqKHQIRERGRzjG5EsLDh8C1a0CLFoCNjdjREIlu48aNYodAREREpHOcil0I6elAhw7KLRERERERmSRWrohI63bt2oWdO3ciOzsb5eXlap+lpqaKFBURERGRsFi5IiKtWrFiBSIiIuDm5oY//vgDXbt2RaNGjXDt2jUMGjRI7PCIiIiIBMPkioi06rPPPsO6deuwcuVKWFpaYsaMGTh06BAmT56MwsJCscMjIiIiEgyTKyFIJIClpXJLRGqys7PRo0cPAICNjQ2KiooAAGPGjMHXX38tZmhEREREgmJyJYROnYCyMuWWiNS4u7ujoKAAANC0aVOcPHkSAJCZmQmuYU5ERETGhMkVEWnVCy+8gB9//BEAEBERgcjISPTv3x+jRo3CiBEjRI6OiIiISDgSBb861ohUKoWjoyMKCwvh4OCg/mF6OhAaCmzbBvj6ihMgmawa7009IJfLIZfLUa+ecnLS7du348SJE2jVqhXeffddWFpaihwhERERkTA4FbsQHj4E/vhDuSUiNWZmZjAz+6tI/sYbb+CNN94QMSIiIiIi7WByRUSCS0tLQ4cOHWBmZoa0tLQa9/Xz89NRVERERETaxeSKiAQXEBCA3NxcuLq6IiAgABKJpMrJKyQSCWQymQgREhEREQmPyRURCS4zMxMuLi6qn4mIiIhMAZMrITRvDuzcqdwSEZo1a6b6OSsrCz169FBNaPFERUUFTpw4obYvERERkSHjbIEa0vcZ2ch06fu9aW5ujtu3b8PV1VWt/d69e3B1deVjgURERGQ0uM6VEPLygOXLlVsiUqNQKCCRSCq137t3D7a2tiJERERERKQdfCxQCDdvAu+/D/TtC7i5iR0NkV549dVXASgnrRg7diysrKxUn8lkMqSlpaFHjx5ihUdEREQkOCZXRKQVjo6OAJSVK3t7e9jY2Kg+s7S0RLdu3TBu3DixwiMiIiISHJMrItKKjRs3qqZfX7lyJezs7ESOiIiIiEi7OOaKiLRGoVBg27ZtuH37ttihEBEREWkdkyshODoCQ4Yot0QGYvXq1fD29oa1tTWCg4Nx6tSpavddv349evfujQYNGqBBgwYICQmpcf8nzMzM0KpVK9y7d0/I0ImIiIj0EpMrIbRsCfz4o3JLZAB27NiBqKgoxMbGIjU1Ff7+/hgwYADu3LlT5f5HjhzBm2++iV9++QXJycnw8vLCSy+9hJs3bz6zr0WLFmH69On4888/hb4MIiIiIr3Cda40VONaQo8fAw8eAE5OgIWFGOGRCavLOlfBwcHo0qULVq1aBQCQy+Xw8vLCe++9h1mzZj3zeJlMhgYNGmDVqlUICwurcd8GDRqgtLQUFRUVsLS0VJvYAgAKCgo0ipmIiIhI33FCCyGcOwcEBgIpKUDnzmJHQ1Sj8vJypKSkIDo6WtVmZmaGkJAQJCcna3SO0tJSPH78GA0bNnzmvvHx8XUNlYiIiMigMLmqRllZGcrKylTvpVKpiNEQPdvT96iVlZXa2lJP3L17FzKZDG5Prcnm5uaGixcvatTXzJkz4eHhgZCQkGfuGx4ertE5iYiIiAwdx1xVIy4uDo6OjqqXl5eX2CER1cjLy0vtno2Li9NKP4sWLcL27dvx3XffwdraulbHPnr0CFKpVO1FREREZCxYuapGdHQ0oqKiVO+lUmm1CdZnv1zBBAAzdp1F5qlHAIDqRrJV1VzdsLeq99X8vNXtXLvzVv6g2n2raK9uQF9thvpVfd5qru05Y6g2KgGu7Xl/7292bYoJfX2q6RXIyclRG3NVVdUKAJydnWFubo68vDy19ry8PLi7u1d7fgD4+OOPsWjRIhw+fBh+fn417vtESUkJZs6ciZ07d1Y5a6BMJtPoPERERET6jslVNap7pKoq1++VAADO35LivPy+NsMiE1ZY+rjGzx0cHDSa0MLS0hKBgYFITEzE8OHDASgntEhMTMSkSZOqPW7JkiVYsGABDh48iKCgII3jnjFjBn755ResWbMGY8aMwerVq3Hz5k2sXbsWixYt0vg8RERERPqOswVqqKYZ2ZIv3UHxvQeosKkPmJur2iWSqs5UZWOV+1a1p6Tqk1azr2ZtyuM1C6Caw6uMS9OYqutf099JdR887zmrvKZaHV9lq4b7Vd7T1cEank42lfary2yBO3bsQHh4ONauXYuuXbsiPj4eO3fuxMWLF+Hm5oawsDB4enqqHi1cvHgxYmJi8NVXX6Fnz56q89jZ2cHOzq7Gvpo2bYotW7agb9++cHBwQGpqKnx8fLB161Z8/fXX2Ldvn0YxExEREek7Vq4E0L21KwBXscMg0tioUaOQn5+PmJgY5ObmIiAgAAcOHFBNcpGdnQ0zs7+GZK5Zswbl5eV47bXX1M4TGxuLOXPm1NhXQUEBWrRoAUBZXXsy9XqvXr0wfvx4Aa+KiIiISFxMroRw+TIwaRKwahXQqpXY0RBpZNKkSdU+BnjkyBG199evX69zPy1atEBmZiaaNm2Ktm3bYufOnejatSv27NkDJyenOp+XiIiISN9wtkAhFBUB//2vcktEaiIiInD27FkAwKxZs7B69WpYW1sjMjIS06dPFzk6IiIiIuGwckVEWhUZGan6OSQkBBcvXkRKSgp8fHw0nnGQiIiIyBCwckVEWiGXy7F48WL07NkTXbp0waxZs/Dw4UM0a9YMr776qsknVt7e3oiPj9fb8xEREVHtMbkiIq1YsGAB/vOf/8DOzg6enp749NNPMXHiRLHDIiIiItIaJldC8PJSTmZRzSLDRKZoy5Yt+Oyzz3Dw4EF8//332LNnD7Zt2wa5XC52aERERERaweRKCC4uwMSJyi0RAVBO5z548GDV+5CQEEgkEty6dUsr/VX1WFxAQADmzJkDhUKBOXPmoGnTprCysoKHhwcmT56s2q+srAzTpk2Dp6cnbG1tERwcXGnGxOps2rQJTk5O+Omnn9CmTRvUr18fr732GkpLS7F582Z4e3ujQYMGmDx5MmQyWbXnyc7OxrBhw2BnZwcHBwe8/vrryMvLU9tnz5496NKlC6ytreHs7IwRI0ZUe77PP/8cTk5OSExM1Og6iIiI6PlxQgshFBQA+/YBgwcDDRuKHQ2RXqioqIC1tbVam4WFBR4/fqzzWL799lt88skn2L59O9q3b4/c3FzVDIaAclr6CxcuYPv27fDw8MB3332HgQMH4ty5c2ilwfIKpaWlWLFiBbZv346ioiK8+uqrGDFiBJycnLBv3z5cu3YNI0eORM+ePTFq1KhKx8vlclVidfToUVRUVGDixIkYNWqUKsnbu3cvRowYgQ8++ABbtmxBeXl5tQswL1myBEuWLMF///tfdO3atW6/NCIiIqo1JldCuH4dGDMGSElhckX0PwqFAmPHjoWVlZWq7dGjR/j3v/8NW1tbVdvu3bu1Hkt2djbc3d0REhICCwsLNG3aVJV0ZGdnY+PGjcjOzoaHhwcAYNq0aThw4AA2btyIhQsXPvP8jx8/xpo1a9CyZUsAwGuvvYatW7ciLy8PdnZ2aNeuHfr164dffvmlyuQqMTER586dQ2ZmJrz+93jxli1b0L59e5w+fRpdunTBggUL8MYbb2Du3Lmq4/z9/Suda+bMmdi6dSuOHj2K9u3b1/6XRURERHXG5IqItCI8PLxS2+jRo0WIBPjnP/+J+Ph4tGjRAgMHDsTgwYMxZMgQ1KtXD+fOnYNMJkPr1q3VjikrK0OjRo00On/9+vVViRUAuLm5wdvbG3Z2dmptd+7cqfL49PR0eHl5qRIrAGjXrh2cnJyQnp6OLl264MyZMxg3blyNcSxbtgwlJSX4/fff0aJFC41iJyIiIuEwuSIirdi4caNO+zMzM4NCoVBre/IIopeXFzIyMnD48GEcOnQIEyZMwNKlS3H06FEUFxfD3NwcKSkpMDc3Vzv+78lRTSwsLNTeSySSKtueZzIPGxubZ+7Tu3dv7N27Fzt37sSsWbPq3BcRERHVDSe0ICKj4OLigtu3b6veS6VSZGZmqt7b2NhgyJAhWLFiBY4cOYLk5GScO3cOnTp1gkwmw507d+Dj46P2cnd310nsvr6+yMnJQU5OjqrtwoULePDgAdq1awcA8PPze+bkFF27dsX+/fuxcOFCfPzxx1qNmYiIiCpj5UoItrZAt27KLRGJ4oUXXsCmTZswZMgQODk5ISYmRlWJ2rRpE2QyGYKDg1G/fn18+eWXsLGxQbNmzdCoUSOEhoYiLCwMy5YtQ6dOnZCfn4/ExET4+fnh5Zdf1nrsISEh6NixI0JDQxEfH4+KigpMmDABffr0QVBQEAAgNjYWL774Ilq2bIk33ngDFRUV2LdvH2bOnKl2rh49emDfvn0YNGgQ6tWrh6lTp2o9fiIiIlJi5UoIbdoAycnKLRGJIjo6Gn369MErr7yCl19+GcOHD1eNg3JycsL69evRs2dP+Pn54fDhw9izZ49qTNXGjRsRFhaG999/H23atMHw4cNx+vRpNG3aVCexSyQS/PDDD2jQoAH+8Y9/ICQkBC1atMCOHTtU+/Tt2xfffPMNfvzxRwQEBOCFF17AqVOnqjxfr169sHfvXnz44YdYuXKlTq6BiIiIAIni6UEKVCWpVApHR0cUFhbCwcFB7HCIVHhvEhEREekHVq6EkJoKSCTKLRERERERmSQmV0RENRg0aBDs7OyqfGmyBhYRERGZDk5oQURUg88//xwPHz6s8rOGXDSciIiI/obJFRFRDTw9PcUOgYiIiAwEHwskIiIiIiISAJMrIbRrB1y+rNwSEZHGNm3aBCcnJ7HDICIiEgSTKyFYWwM+PsotkR6TSCQ1vubMmSN2iGSgxo4dW+U9deXKFbFDIyIi0hmOuRJCZiYwezbw0UdA8+ZiR0NUrdu3b6t+3rFjB2JiYpCRkaFqs7OzU/2sUCggk8lQrx7/M0GaGThwIDZu3KjW5uLiIlI0REREusfKlRDu3we2bVNuifSYu7u76uXo6AiJRKJ6f/HiRdjb22P//v0IDAyElZUVkpKSMHbsWAwfPlztPFOnTkXfvn1V7+VyOeLi4tC8eXPY2NjA398fu3bt0u3FkeisrKzU7jF3d3d8+umn6NixI2xtbeHl5YUJEyaguLi42nOcPXsW/fr1g729PRwcHBAYGIjff/9d9XlSUhJ69+4NGxsbeHl5YfLkySgpKdHF5RERET0TkysiUjNr1iwsWrQI6enp8PPz0+iYuLg4bNmyBQkJCTh//jwiIyMxevRoHD16VMvRkr4zMzPDihUrcP78eWzevBk///wzZsyYUe3+oaGhaNKkCU6fPo2UlBTMmjULFhYWAICrV69i4MCBGDlyJNLS0rBjxw4kJSVh0qRJurocIiKiGvF5HyJSM2/ePPTv31/j/cvKyrBw4UIcPnwY3bt3BwC0aNECSUlJWLt2Lfr06aOtUEnP/PTTT2qPlg4aNAjffPON6r23tzfmz5+Pf//73/jss8+qPEd2djamT5+Otm3bAgBatWql+iwuLg6hoaGYOnWq6rMVK1agT58+WLNmDaw57pWIiETG5EpDCoUCACCVSit/+OQRl+JioKrPibToyT355B59XkFBQbXa/8qVKygtLa2UkJWXl6NTp06CxESGoV+/flizZo3qva2tLQ4fPoy4uDhcvHgRUqkUFRUVePToEUpLS1G/fv1K54iKisI777yDrVu3IiQkBP/85z/RsmVLAMpHBtPS0rBt2zbV/gqFAnK5HJmZmfD19dX+RRIREdWAyZWGioqKAABeXl7V78Rv6ElERUVFcHR0fO7z2Nraqr03MzOrlLg9fvxY9fOT8TN79+6ttOCulZXVc8dDhsPW1hY+Pj6q99evX8crr7yC8ePHY8GCBWjYsCGSkpLw9ttvo7y8vMrkas6cOXjrrbewd+9e7N+/H7Gxsdi+fTtGjBiB4uJivPvuu5g8eXKl45o2barVayMiItIEkysNeXh4ICcnB/b29pBIJGKHQ6SiUChQVFQEDw8PrZzfxcUFf/75p1rbmTNnVONg2rVrBysrK2RnZ/MRQFKTkpICuVyOZcuWwcxMOcR3586dzzyudevWaN26NSIjI/Hmm29i48aNGDFiBDp37owLFy6oJXBERET6hMmVhszMzNCkSROxwyCqkhAVq+q88MILWLp0KbZs2YLu3bvjyy+/xJ9//ql65M/e3h7Tpk1DZGQk5HI5evXqhcLCQhw/fhwODg4IDw/XWmyk33x8fPD48WOsXLkSQ4YMwfHjx5GQkFDt/g8fPsT06dPx2muvoXnz5rhx4wZOnz6NkSNHAgBmzpyJbt26YdKkSXjnnXdga2uLCxcu4NChQ1i1apWuLouIiKhanC2QiGo0YMAAzJ49GzNmzECXLl1QVFSEsLAwtX0++ugjzJ49G3FxcfD19cXAgQOxd+9eNOe6bybN398fy5cvx+LFi9GhQwds27YNcXFx1e5vbm6Oe/fuISwsDK1bt8brr7+OQYMGYe7cuQAAPz8/HD16FJcuXULv3r3RqVMnxMTEaK1qS0REVFsShVCj4ImIiIiIiEwYK1dEREREREQCYHJFREREREQkACZXREREREREAmByRUREREREJAAmV0RERERERAJgckVERERERCQAJldEVKOysjLMmTMHZWVlYodCRor3GBERGQuuc0VENZJKpXB0dERhYSEcHBzEDoeMEO8xIiIyFqxcERERERERCYDJFRERERERkQDqiR2AoZDL5bh16xbs7e0hkUjEDodIRaFQoKioCB4eHjAzq9v3JTXd31KpVG1LJLSa7jEh7m8iIiJd4ZgrDd24cQNeXl5ih0FUrZycHDRp0qROx/L+Jn1X2/t79erVWLp0KXJzc+Hv74+VK1eia9eu1e7/4MEDfPDBB9i9ezcKCgrQrFkzxMfHY/DgwUKET0REJoKVKw3Z29urvffZGv1c5ysrsHmu45/F4p72/6e1uqvd89e/p/283yb/sdb7sMot1tq5Ey98rPr56Xu0Np4cm5OTU3lCgZwcID4emDoVYAJGOiaVSuHl5VWr+3vHjh2IiopCQkICgoODER8fjwEDBiAjIwOurq6V9i8vL0f//v3h6uqKXbt2wdPTE1lZWXBychLwSoiIyBSwcqWhJ7NZAUDTL2fBxub5kqNH97SbXAGAxV3tJljW+Vo9PQCg/l3t357187SbYFndLtLauW/cuIHz9zcDwHPNtFbjbG2pqUBgIJCSAnTu/LwhE9VKXWYSDA4ORpcuXbBq1SoAysdevby88N5772HWrFmV9k9ISMDSpUtx8eJFWFhYCBo/ERGZFj7AXks+W6OfO7ECAOtGDwWIRlyPXMSOwDCUNa57RelZmjRpghfbTav1cWVlZZBKpWovIn329P1a3ZpY5eXlSElJQUhIiKrNzMwMISEhSE5OrvKYH3/8Ed27d8fEiRPh5uaGDh06YOHChZDJZFq5FiIiMl5MrozYY+cKsUN4bqXO2p88pNTN9L6pjouLg6Ojo+rF8Vak77y8vNTu2bi4uCr3u3v3LmQyGdzc3NTa3dzckJubW+Ux165dw65duyCTybBv3z7Mnj0by5Ytw/z58wW/DiIiMm4ccyUi60YPdfJ4oDY9ctH+44GlzhKdPB6oTWWN7bX6eGBtRUdHIyoqSvX+ybgWIn319HhAKysrwc4tl8vh6uqKdevWwdzcHIGBgbh58yaWLl2K2NhYwfohIiLjx+TKyD12rtD62CtjUOpmofWxV/rEyspK8z9OXV2ByEjllkgkDg4OGo25cnZ2hrm5OfLy8tTa8/Ly4O7uXuUxjRs3hoWFBczNzVVtvr6+yM3NRXl5OSwtLZ8veCIiMhl8LFBkHHulGV08Hqht2hx7pVVNmgDLlyu3RHrO0tISgYGBSExMVLXJ5XIkJiaie/fuVR7Ts2dPXLlyBXK5XNV26dIlNG7cmIkVERHVCpMrE2AMY690wRTHXmmkuBhITlZuiQxAVFQU1q9fj82bNyM9PR3jx49HSUkJIiIiAABhYWGIjv5rOY3x48ejoKAAU6ZMwaVLl7B3714sXLgQEydOFOsSiIjIQPF5MT3AsVea4dgrkVy6BPTowanYyWCMGjUK+fn5iImJQW5uLgICAnDgwAHVJBfZ2dkwM/vru0UvLy8cPHgQkZGR8PPzg6enJ6ZMmYKZM2eKdQlERGSgmFyZCF2MvdJFgqVtpjb2ishYTZo0CZMmTarysyNHjlRq6969O06ePKnlqIiIyNjxsUA9YQxjr3SBY6+IiIiISF8xuTIhuhh7ZQwLC+ti7BUTLCIiIiLjw+RKj7B6pRljqF4ZlHr1AGdn5ZaIiIiIqsXkysSweqUZVq/+xs8PyM9XbomIiIioWkyu9IwuqlfGMDU7q1dEREREpG+YXNWSd6MCsUMwCKxeacYgqlfnzwM+PsotEREREVWLyZUeYvVKM7qoXnFhYQBlZcDVq8otEREREVWLyVUdNHe+J3YIBsEYqle6YBDVKyKqUUVFBQ4fPoy1a9eiqEi5UPitW7dQXFwscmRERKRLnP5LT1k3eohH92y02ocuFhbWtlJnCerfVWi3Dy4sTEQ1yMrKwsCBA5GdnY2ysjL0798f9vb2WLx4McrKypCQkCB2iEREpCOsXNWRLqpXxjA1O6tXmmH1ishwTZkyBUFBQbh//z5sbP76UmzEiBFITEwUMTIiItI1wy5b0HNj9UrDPky5euXjAxw4oNwSUSXHjh3DiRMnYGlpqdbu7e2NmzdvihQVERGJgZWr58DqlWZ0Ub0yhqnZ9bZ65eAADBig3BJRJXK5HDKZrFL7jRs3YG+vp/+uiYhIK5hckVHMHKgLJjtz4O3bwJw5yi0RVfLSSy8hPj5e9V4ikaC4uBixsbEYPHiweIEREZHOMbl6TqxeaYbVK83oZfXq9m1g7lwmV0TVWLZsGY4fP4527drh0aNHeOutt1SPBC5evFjs8IiISIcMe7CNnmjufA+ZdxuJHcZzMYaxV7pg0mOviKhKTZo0wdmzZ7Fjxw6cPXsWxcXFePvttxEaGqo2wQURERk//jVtIHQxNbu2PXIBrPO124cuJrfQtrLG9rC6XSR2GERUC/Xq1UNoaChCQ0PFDoWIiETExwIFYgwLC+ti7JUxTM1usmOviKhKcXFx2LBhQ6X2DRs28LFAIiITw+TKgBjD2Ctd4NgrgTVoAISGKrdEVMnatWvRtm3bSu3t27fnAsJERCaGyZWAWL3SDKtXBqZ5c+DLL5VbIqokNzcXjRs3rtTu4uKC25wIhojIpDC5MjCsXmmG1SsBPXoEXLmi3BJRJV5eXjh+/Hil9uPHj8PDw0OEiIiISCxMrgTG6pVmWL3SjF4kWBcuAK1aKbdEVMm4ceMwdepUbNy4EVlZWcjKysKGDRsQGRmJcePGiR0eERHpEGcLNEDGMHOgLhjDzIFEpP+mT5+Oe/fuYcKECSgvLwcAWFtbY+bMmYiOjhY5OiIi0iVWrrSA1SvNsHqlGb2oXhEZmNWrV8Pb2xvW1tYIDg7GqVOnqt1306ZNkEgkai9ra2uN+5JIJFi8eDHy8/Nx8uRJnD17FgUFBYiJiRHiUoiIyIAwuTJQuhh7pYsES9uMYewVEdXOjh07EBUVhdjYWKSmpsLf3x8DBgzAnTt3qj3GwcEBt2/fVr2ysrJq3a+dnR26dOmCDh06wMrK6nkugYiIDBSTKy0xhuqVLrB6pRlWr4g0t3z5cowbNw4RERFo164dEhISUL9+/SrXonpCIpHA3d1d9XJzc9O4v5KSEsyePRs9evSAj48PWrRoofYiIiLTwTFXBkwXY68eO1fA4q5h3ya6GHtV6maB+nmPtdqHkMrKylBWVqZ6L5VKq9+5c2dAwbFrJK6n71ErK6sqq0Pl5eVISUlRG+tkZmaGkJAQJCcnV3v+4uJiNGvWDHK5HJ07d8bChQvRvn17jWJ75513cPToUYwZMwaNGzeGRMKKORGRqTLsv5r1XHPne8i820jsMPTeIxfAOl/sKPRfWWN7WN0uEuRccXFxmDt3riDnItIFLy8vtfexsbGYM2dOpf3u3r0LmUxWqfLk5uaGixcvVnnuNm3aYMOGDfDz80NhYSE+/vhj9OjRA+fPn0eTJk2eGdv+/fuxd+9e9OzZU/MLIiIio8THAg0cx15pRhdjrwxpYeHo6GgUFhaqXjk5OdXvnJEBdO+u3BKJJCcnR+2eFXIWvu7duyMsLAwBAQHo06cPdu/eDRcXF6xdu1aj4xs0aICGDRsKFg8RERkuJldapouxV8awsLAxjL3SBaHGXllZWcHBwUHtVa2SEuDkSeWWSCRP36/VTRjh7OwMc3Nz5OXlqbXn5eXB3d1do74sLCzQqVMnXLlyRaP9P/roI8TExKC0tFSj/YmIyHgxuSKNsHqlYR8GVL0iMkaWlpYIDAxEYmKiqk0ulyMxMRHdu3fX6BwymQznzp1D48aNNdp/2bJlOHjwINzc3NCxY0d07txZ7UVERKaDY650QBdjr4xhYWFdjL0yhoWFhRx7RWSMoqKiEB4ejqCgIHTt2hXx8fEoKSlBREQEACAsLAyenp6Ii4sDAMybNw/dunWDj48PHjx4gKVLlyIrKwvvvPOORv0NHz5cW5dCREQGhskVacwYZg7UBUObOZDI2IwaNQr5+fmIiYlBbm4uAgICcODAAdUkF9nZ2TAz++vBjfv372PcuHHIzc1FgwYNEBgYiBMnTqBdu3Ya9RcbG6uV6yAiIsMjUSg4x7ImpFIpHB0dEbLvXdSzrdvikLqYOVDb1StdJFe6mDlQ29UrXSRXT6pXFbIyJF74GIWFhTWPnarBk/u7ynMUFAD79gGDBwMctE86VuO9qUcePHiAXbt24erVq5g+fToaNmyI1NRUuLm5wdPTU+zwiIhIR1iG0CFjmJqd1SvNGFX1qmFDYPRosaMg0ltpaWkICQmBo6Mjrl+/jnHjxqFhw4bYvXs3srOzsWXLFrFDJCIiHeGEFkaGMwdqRheTW2ibUDMHPlN+PrB6tXJLRJVERUVh7NixuHz5MqytrVXtgwcPxq+//ipiZEREpGtMrnRMF1Oza5suZg40hqnZjWbmwJwcYNIk5ZaIKjl9+jTefffdSu2enp7Izc0VISIiIhILkysjZAzVK11g9YqIhGBlZQWpVFqp/dKlS3BxMYJvioiISGNMrkTA6pVmWL0iIkMwdOhQzJs3D48fK8dZSiQSZGdnY+bMmRg5cqTI0RERkS4xuTJSrF5pxiiqV+52YodAZNKWLVuG4uJiuLq64uHDh+jTpw98fHxgb2+PBQsWiB0eERHpEKd9EwlnDtSMLhYW1jaDnznQ3h546SXllogqcXR0xKFDh5CUlIS0tDQUFxejc+fOCAkJETs0IiLSMZNJrn755Rf069evys9Wr16NiRMn6jgi7bNu9FDr614Zw9Tspc4Sra97ZdBatQIOHhQ7CiK916tXL/Tq1UvsMIiISESG/VdxLbz66qs4fPgwAgMD1do//fRTzJ49W5TkyhiqV7rA6pXIZDKgpASwtQXMzcWOhkgvrFixQuN9J0+erMVIiIhIn5hMcrV06VIMGjQIv/76K9q2bQtA+Zz8vHnzsHfvXpGj0x5WrzTD6lUNzp4FAgOBlBSgc2exoyHSC5988ona+/z8fJSWlsLJyQkA8ODBA9SvXx+urq5MroiITIhh/0VcC++88w4KCgoQEhKCpKQk7NixAwsXLsS+ffvQs2dP0eJi9UozrF4RkT7JzMxU/fzVV1/hs88+wxdffIE2bdoAADIyMjBu3Lgq178iIiLjZTLJFQDMmDED9+7dQ1BQEGQyGQ4ePIhu3bqJHZbWsXqlGV1Ur5hgERmf2bNnY9euXarECgDatGmDTz75BK+99hpCQ0NFjI6IiHTJsP8afoaqnon39PRE/fr18Y9//AOnTp3CqVOnAIj7TDyrV5oxhuoVERmf27dvo6Ki8tp/MpkMeXl5IkRERERiMerk6uln4p8wNzfH8ePHcfz4cQDKBR+N/Zl4Vq80w+oVEdXWiy++iHfffReff/45Ov9vXGJKSgrGjx/P6diJiEyMYf8l/Ax/fyZe3+mieqWLBEvbWL0SQceOwJ07wP8G6hORug0bNiA8PBxBQUGwsLAAAFRUVGDAgAH4/PPPRY6OiIh0yaiTK9I9Vq807MOQqlcWFoCLi9hREOktFxcX7Nu3D5cuXcLFixcBAG3btkXr1q1FjoyIiHTNTOwAdGXkyJFYvHhxpfYlS5bgn//8pwgRVdbc+Z7W+7Bu9FDrfWjbIx38nV/qLNF+J4bi6lVg6FDlloiq1bp1awwdOhRDhw5lYkVEZKIMu8RQC7/++ivmzJlTqX3QoEFYtmyZ7gMyYsZQvdIFg6leFRYCe/YAVfz7ISLlxBWbNm1CYmIi7ty5A7lcrvb5zz//LFJkRESkaybzF3BxcTEsLS0rtVtYWEAqlYoQUdU49kozuhh7xYWFiUgTU6ZMwaZNm/Dyyy+jQ4cOkEhY+SYiMlUmk1x17NgRO3bsQExMjFr79u3b0a5dO43P084hF5dkzYQOT40xTM3O6pVmDKZ6RUTV2r59O3bu3InBgweLHQoREYnMZP76nT17Nl599VVcvXoVL7zwAgAgMTERX3/9Nb755huRo9M9Vq80w+oVET2LpaUlfHx8xA6DiIj0gMlMaDFkyBB8//33uHLlCiZMmID3338fN27cwOHDhzF8+PBanauj4y3tBPk3upjcQtseO1deVFNoupjcQttK3SzEDqFmnp7AsmXKLRFV8v777+PTTz+FQsEvYoiITJ3JVK4A4OWXX8bLL78sdhh6wxiqV7pg8tUrNzcgKkrsKIhqZfXq1Vi6dClyc3Ph7++PlStXomvXrs88bvv27XjzzTcxbNgwfP/99xr1lZSUhF9++QX79+9H+/btVWtdPbF79+66XAIRERkgk0quACAlJQXp6ekAgPbt26NTp051Ok9Hx1s4V+ghZGiVcOyVZoxhYWG9Hnt1/z5w+DAQEgI0aCB2NETPtGPHDkRFRSEhIQHBwcGIj4/HgAEDkJGRAVdX12qPu379OqZNm4bevXvXqj8nJyeMGDHiecMmIiIjYDLJ1Z07d/DGG2/gyJEjcHJyAgA8ePAA/fr1w/bt2+FSh0VSdZFgaRurV5ox6epVZibw+utASgqTKzIIy5cvx7hx4xAREQEASEhIwN69e7FhwwbMmjWrymNkMhlCQ0Mxd+5cHDt2DA8ePNC4v40bNwoRNhERGQGTGXP13nvvoaioCOfPn0dBQQEKCgrw559/QiqVYvLkyWKHVy2OvdIMx17VTllZGaRSqdqLSJ89fb+WlZVVuV95eTlSUlIQEhKiajMzM0NISAiSk5OrPf+8efPg6uqKt99+u07xVVRU4PDhw1i7di2KiooAALdu3UJxcXGdzkdERIbJZJKrAwcO4LPPPoOvr6+qrV27dli9ejX2799f5/PqYnILbbNu9FDrfegiwdK2UmfjWbsmLi4Ojo6OqpeXl5fYIRHVyMvLS+2ejYuLq3K/u3fvQiaTwc3NTa3dzc0Nubm5VR6TlJSEL774AuvXr69TbFlZWejYsSOGDRuGiRMnIj9f+Zzy4sWLMW3atDqdk4iIDJPJJFdyubzSIGNAuYiwXC4XISLNGUP1ShdYvdJcdHQ0CgsLVa+cnByd9EtUVzk5OWr3bHR0tCDnLSoqwpgxY7B+/Xo4OzvX6RxTpkxBUFAQ7t+/Dxubvx6zHjFiBBITEwWJk4iIDIPJjLl64YUXMGXKFHz99dfw8FCOk7p58yYiIyPx4osvPte5OfZKM8awsLCxjL2ysrKClZWVZjvb2ACdOim3RCJxcHCAg4PDM/dzdnaGubk58vLy1Nrz8vLg7u5eaf+rV6/i+vXrGDJkiKrtyRdu9erVQ0ZGBlq2bFljn8eOHcOJEydgaWmp1u7t7Y2bN28+M2YiIjIeJlO5WrVqFaRSKby9vdGyZUu0bNkSzZs3h1QqxcqVK8UO75lYvdIMq1da4OsLpKYqt0R6ztLSEoGBgWoVI7lcjsTERHTv3r3S/m3btsW5c+dw5swZ1Wvo0KHo168fzpw5o9Ejs3K5HDKZrFL7jRs3YG9v/3wXREREBsWwywi14OXlhdTUVBw+fBgXL14EAPj6+qoNen4erF5phtUrDfvQ56nZifRcVFQUwsPDERQUhK5duyI+Ph4lJSWq2QPDwsLg6emJuLg4WFtbo0OHDmrHP5lR9un26rz00kuIj4/HunXrAAASiQTFxcWIjY3F4MGDhbswIiLSe4b9V24tSSQS9O/fH/379xc7lDrRxbpXxjA1uzGse6VX/vgD6NYNOHlS+XggkZ4bNWoU8vPzERMTg9zcXAQEBODAgQOqSS6ys7NhZibcgxvLli3DgAED0K5dOzx69AhvvfUWLl++DGdnZ3z99deC9UNERPpPolAoDH8ASTVWrFih8b7Pmo5dKpXC0dERk5OGwcqu+se2tF290sWiwrpIrrRdvdJFcqWLsVeaVK8qKh4h6ec5KCws1GhMSlWe3N9VniM1FQgMVK5z1blznc5PVFc13pt6pKKiAtu3b0daWhqKi4vRuXNnhIaGqk1wQURExs+oK1effPKJRvtJJBK9Xuvq71i90gyrV0SkS/Xq1cPo0aPFDoOIiERm1MlVZmamzvs0hrFXusCxVxr2wbFXRAYhIyMDK1euRHp6OgDlmN5Jkyahbdu2IkdGRES6ZDKzBRoTXcwcqIuFhbVNFzMHGtPCwkRUN99++y06dOiAlJQU+Pv7w9/fH6mpqejYsSO+/fZbscMjIiIdMuzSQS0oFArs2rULv/zyC+7cuVNp4eDdu3cL1herV5oxhuqVLohevfL1Bf78E2jRQrwYiPTYjBkzEB0djXnz5qm1x8bGYsaMGRg5cqRIkRERka6ZTOVq6tSpGDNmDDIzM2FnZwdHR0e1l6Fh9UozrF4JwMYGaN+eiwgTVeP27dsICwur1D569Gjcvn1bhIiIiEgsJlM22Lp1K3bv3q2zNUd0Ub3SxeQW2sbqlWZErV5lZQEffQTMng00ayZODER6rG/fvjh27Bh8fHzU2pOSktC7d2+RoiIiIjGYzF+1jo6OaMHHmmqNMwdqRheTW4jm3j3giy+ACROYXBFVYejQoZg5cyZSUlLQrVs3AMDJkyfxzTffYO7cufjxxx/V9iUiIuNl1Otc/d3mzZtx4MABbNiwoU7rjmi6ztXTdDH2StvVK2NY9wrQfoIl1rpXXOeKjJkhrHOl6YLEEokEMplMy9EQEZGYTKZy9frrr+Prr7+Gq6srvL29YWGhniClpqaKFJn+M4bqlS4YdfWKiKr19ARJRERkukwmuQoPD0dKSgpGjx4NNzc3SCS6mYSAY680o4uxV8awsLDoMwcSUY0ePXoEa2trscMgIiKRmExytXfvXhw8eBC9evXSed/GMDU7q1eaMcrqlZsbMGuWcktElchkMixcuBAJCQnIy8vDpUuX0KJFC8yePRve3t54++23xQ6RiIh0xGSmYvfy8tLb5/WFoIup2bXtsXOF1vvQxdTs2lbqpvmYP0F4egJxccotEVWyYMECbNq0CUuWLIGlpaWqvUOHDvj8889FjIyIiHTNZJKrZcuWYcaMGbh+/boo/Xd0vCVKv0LSxbpXukiwtM3o1r0qKgKOHFFuiaiSLVu2YN26dQgNDYW5ubmq3d/fHxcvXhQxMiIi0jWTeSxw9OjRKC0tRcuWLVG/fv1KE1oUFBSIFJlwjGHslS5w7FUtXb4M9OvH2QKJqnHz5s1Ka1wByokuHj/mGEkiIlNiMslVfHy82CFw7JWGjGFhYaMce0VEVWrXrh2OHTuGZk+tA7dr1y506tRJpKiIiEgMhv0XbC2Eh4drtN+iRYvw73//G05OTtoNSEtYvdIMq1dEJJSYmBiEh4fj5s2bkMvl2L17NzIyMrBlyxb89NNPYodHREQ6ZDJjrjS1cOFCrT4iyLFXmuHYKw370PXkFkRUybBhw7Bnzx4cPnwYtra2iImJQXp6Ovbs2YP+/fuLHR4REemQyVSuNKVQGP6jXLqoXhnD1OzGUL3SCQsL5UyBFkzkiKrTu3dvHDp0SOwwiIhIZKxcicAYqle6wOqVZh66aDnp6dgRuHFDuSUiIiKiarFyZaRYvdIMq1dEVBcNGjSARKLZlyfGMBstERFphsmVSIxh5kBd4MyBeuDcOWDQIGD/flaviP7n7zPQ3rt3D/Pnz8eAAQPQvXt3AEBycjIOHjyI2bNnixQhERGJgY8FGrHmzve03ocuJrfQtkcu2u/DoBcWfvwYuHlTuSUyEKtXr4a3tzesra0RHByMU6dOVbvv7t27ERQUBCcnJ9ja2iIgIABbt26t8fzh4eGq1/HjxzFv3jx8/fXXmDx5MiZPnoyvv/4a8+bNw9GjR4W+NCIi0mNMrp7Su3dv2Njo5lE3XYy90kWCpW3GMPaKiHRnx44diIqKQmxsLFJTU+Hv748BAwbgzp07Ve7fsGFDfPDBB0hOTkZaWhoiIiIQERGBgwcPatTfwYMHMXDgwErtAwcOxOHDh5/rWoiIyLCYVHJ19epVfPjhh3jzzTdV/ye7f/9+nD9/XrXPvn370LhxY7FCNEisXmnGoKtXRAZk+fLlGDduHCIiItCuXTskJCSgfv362LBhQ5X79+3bFyNGjICvry9atmyJKVOmwM/PD0lJSRr116hRI/zwww+V2n/44Qc0asR1B4mITInJJFdHjx5Fx44d8dtvv2H37t0oLi4GAJw9exaxsbGixcXqlWZYvSIiTZSXlyMlJQUhISGqNjMzM4SEhCA5OfmZxysUCiQmJiIjIwP/+Mc/NOpz7ty5mDlzJoYMGYL58+dj/vz5GDJkCGbNmoW5c+fW+VqIiMjwmExyNWvWLMyfPx+HDh2CpaWlqv2FF17AyZMnRYzMOLB6pRl9qV6VlZVBKpWqvarVqhXwyy/KLZFInr5fy8rKqtzv7t27kMlkcHNzU2t3c3NDbm5utecvLCyEnZ0dLC0t8fLLL2PlypUaLwA8duxYHD9+HA4ODti9ezd2794NBwcHJCUlYezYsRpfIxERGT7DnoatFs6dO4evvvqqUrurqyvu3r0rQkR/0cXMgbqYml3bdDFzoKlMzR4XF6f5N+r29kDfvlqNh+hZvLy81N7HxsZizpw5gp3f3t4eZ86cQXFxMRITExEVFYUWLVqgr4b3fnBwMLZt2yZYPEREZJhMJrlycnLC7du30bx5c7X2P/74A56eniJFZVyMYd0rXdCHqdmjo6MRFRWlei+VSiv98apy8yawahUwaRLAfyskkpycHDg4OKjeW1lZVbmfs7MzzM3NkZeXp9ael5cHd3f3as9vZmYGHx8fAEBAQADS09MRFxencXJFREQEmNBjgW+88QZmzpyJ3NxcSCQSyOVyHD9+HNOmTUNYWJjY4XHslYZ0MfZKF48His3KygoODg5qr2rl5QGLFim3RCJ5+n6tLrmytLREYGAgEhMTVW1yuRyJiYmqNag0IZfLq330kIiIqDomk1wtXLgQbdu2hZeXF4qLi9GuXTv84x//QI8ePfDhhx+KHR4A3SRY2mYMY690QV/GXhEZo6ioKKxfvx6bN29Geno6xo8fj5KSEkRERAAAwsLCEB0drdo/Li4Ohw4dwrVr15Ceno5ly5Zh69atGD16tFiXQEREBspkHgu0tLTE+vXrERMTg3PnzqG4uBidOnVCKxMbpM+xV5oxlbFXRMZo1KhRyM/PR0xMDHJzcxEQEIADBw6oJrnIzs6Gmdlf3y2WlJRgwoQJuHHjBmxsbNC2bVt8+eWXGDVqlFiXQEREBkqiUCjEHfwhEplMhnPnzqFZs2Zo0KDBM/eXSqVwdHTE5KRhsLKz0Gps2p7cQhfJlS7GXmk7wdJFciXE2CtZ+SOk7PgAhYWFNT/eV4Mn93eV50hNBQIDgZQUoHPn546XqDZqvDeJiIj0jMlUrqZOnYqOHTvi7bffhkwmQ58+fXDixAnUr18fP/30k0kNWjaG6pUusHr1P40aAW+/rdwSEQDg1Vdf1Xjf3bt3azESIiLSJyYz5mrXrl3w9/cHAOzZswfXrl3DxYsXERkZiQ8++EDj83SxvaatEFU49kozxrCwsEGMvWrWDPj8c+WWiAAAjo6OGr+IiMh0mEzl6u7du6ppePft24fXX38drVu3xv/93//h008/rdW5utldwcliH22EqTOsXmmG1SsADx8C164BLVoANpxqnwgANm7cKHYIRESkh0ymcuXm5oYLFy5AJpPhwIED6N+/PwCgtLQU5ubmIkdXGatXmmH1SgfS04EOHZRbIiIiIqqWyVSuIiIi8Prrr6Nx48aQSCQICQkBAPz2229o27Ztrc/H6pVmjGFhYVaviOhZdu3ahZ07dyI7Oxvl5eVqn6WmpooUFRER6ZrJVK7mzJmDL774Av/6179w/Phx1QKU5ubmauud6BNjqF7pAqtXRCSmFStWICIiAm5ubvjjjz/QtWtXNGrUCNeuXcOgQYPEDo+IiHTIZCpX8+bNU/28YcMGtc+ysrIwdOjQWp+T1SvNsHpFRMbss88+w7p16/Dmm29i06ZNmDFjBlq0aIGYmBgUFBSIHR4REemQySRX3333ndr7x48fIzMzE/Xq1UPLli0RExMjUmQ16+h4S+vrXhkDXSwsrG2lzhJB1r0SnEQCWFoqt0RUSXZ2Nnr06AEAsLGxQVFREQBgzJgx6NatG1atWiVmeEREpEOG/ddoLfzxxx+V2qRSKcaOHYsRI0bU+bysXmmG1SvN6GWC1akTUFYmdhREesvd3R0FBQVo1qwZmjZtipMnT8Lf3x+ZmZlQKPTs3zMREWmVyYy5qoqDgwPmzp2L2bNnix1KjXQx9qq58z2t96FtxjD2iogMzwsvvIAff/wRgHLypMjISPTv3x+jRo16ri/viIjI8JhM5ao6hYWFKCwsfK5zGEP1ShdYvdKM3lWv0tOB0FBg2zbA11fsaIj0zrp16yCXywEAEydORKNGjXDixAkMHToU7777rsjRERGRLplMcrVixQq19wqFArdv38bWrVsNYjYnXYy9MoaFhY1h7JXeefgQ+OMP5ZaIKjEzM4OZ2V8Pgrzxxht44403RIyIiIjEYjJ/hX7yySdq783MzODi4oLw8HBBpmJn9UozrF5pRu+qV0SkJi0tDR06dICZmRnS0tJq3NfPz09HURERkdhMJrnKzMwUO4TnxuqVZnRRveLU7ESmLSAgALm5uXB1dUVAQAAkEkmVk1dIJBLIZDIRIiQiIjGYTHKlC6xeacYYqle6wOoVkf7KzMyEi4uL6mciIiLAxGcLNEScOVAzupg58JGL1rvQD82bAzt3KrdEBABo1qwZJP9b+y0rKwuenp5o1qyZ2svT0xNZWVkiR0pERLrE5Epg3eyuaL0PXSRY2mbdiJMjaKLUWQ8W7m3QAPjnP5VbIqqkX79+KCgoqNReWFiIfv36iRARERGJhckVVYnVK82YRPUqLw9Yvly5JaJKFAqFqor1d/fu3YOtra0IERERkVg45koLdDH2SheTW2ibLsZeGcPU7KKPvbp5E3j/faBvX8DNTbw4iPTMq6++CkA5acXYsWNhZWWl+kwmkyEtLQ09evQQKzwiIhKBYf/VSVplDDMH6gJnDiQyTY6OjgCUlSt7e3vY2Pz1ZZGlpSW6deuGcePGiRUeERGJgMmVlrB6pRlWrzQjevWKiCrZuHGjavr1lStXws7OTuSIiIhIbBxzpUW6mNxC24xh7JUumMTYKyKqRKFQYNu2bbh9+7bYoRARkR5gcmXgOHOgZnQxuYW2iTZzoKMjMGSIcktkIFavXg1vb29YW1sjODgYp06dqnbf9evXo3fv3mjQoAEaNGiAkJCQGvf/OzMzM7Rq1Qr37vGLKCIiYnKldaxeacYYpmY32upVy5bAjz8qt0QGYMeOHYiKikJsbCxSU1Ph7++PAQMG4M6dO1Xuf+TIEbz55pv45ZdfkJycDC8vL7z00ku4efOmRv0tWrQI06dPx59//inkZRARkQGSKJ48ME41kkqlcHR0xNY/OqK+vXmtjtX22CsAWh97pYuJLbQ99gqA1sde6WJii6fHXsnKHyFlxwcoLCyEg4NDnc755P6u8hyPHwMPHgBOToCFRd2CJqqjGu/NagQHB6NLly5YtWoVAEAul8PLywvvvfceZs2a9czjZTIZGjRogFWrViEsLOyZ+zdo0AClpaWoqKiApaWl2sQWAKpcA4uIiIyTYY/yNxC6mNxC23Qxc6AuJrfQNl3MHKjzyS3OnQMCA4GUFKBzZ931S1QH5eXlSElJQXR0tKrNzMwMISEhSE5O1ugcpaWlePz4MRo2bKjR/vHx8XUJlYiIjBCTKyNhDDMH6oIxzBwohLKyMpSVlaneS6VSEaMheran71ErKyu1daWeuHv3LmQyGdyeWpPNzc0NFy9e1KivmTNnwsPDAyEhIRrtHx4ertF+RERk/DjmSkc49kozHHulmeed3CIuLg6Ojo6ql5eXl0CREWmHl5eX2j0bFxenlX4WLVqE7du347vvvoO1tXWtj3/06BGkUqnai4iITAe/wjciuqheGcPCwqxeAdHR0YiKilK9l0ql1SZYRzLuoC+AlT9fxq3rNY250uxRxdqM8tR0X4U2+tZ0P4FjrE3nmseo4e9Hw/Mpz6nhfhqer627PSb2q/7x6ZycHLUxV1VVrQDA2dkZ5ubmyMvLU2vPy8uDu7t7jTF8/PHHWLRoEQ4fPgw/Pz8NIwdKSkowc+ZM7Ny5s8pZA2UymcbnIiIiw2baf2HqmDGMvdIFjr3SzPOMvarukaqqpN+Woi+AA3/m4vxdLpJK2vGg1LnG5MrBwUGjCS0sLS0RGBiIxMREDB8+HIByQovExERMmjSp2uOWLFmCBQsW4ODBgwgKCqpV7DNmzMAvv/yCNWvWYMyYMVi9ejVu3ryJtWvXYtGiRbU6FxERGTYmV0aG1SvNsHqlubYDeyPBJxUDrW0wwLzmmTI1eVhRouETjRJNdxSyTw2uQPNzabifBjtqEpem59KUpr9/oa7Tw0m4L1SioqIQHh6OoKAgdO3aFfHx8SgpKUFERAQAICwsDJ6enqpHCxcvXoyYmBh89dVX8Pb2Rm5uLgDAzs4OdnbP/kJhz5492LJlC/r27YuIiAj07t0bPj4+aNasGbZt24bQ0FDBro2IiPQb/7rUMVavNMPqlWZKnSWw0vI60v3aNUa/do212wmRgEaNGoX8/HzExMQgNzcXAQEBOHDggGqSi+zsbJiZ/TXkeM2aNSgvL8drr72mdp7Y2FjMmTPnmf0VFBSgRYsWAJQVtidTr/fq1Qvjx48X6KqIiMgQMLkyQqxeaUYX1StdJFhad/kyMGkSsGoV0KqV2NEQaWTSpEnVPgZ45MgRtffXr19/rr5atGiBzMxMNG3aFG3btsXOnTvRtWtX7NmzB05OTs91biIiMiycLVAEupg5sKOjlssZOmAMMwfqQmkjAZ8Fq0pREfDf/yq3RFRJREQEzp49CwCYNWsWVq9eDWtra0RGRmL69OkiR0dERLrEyhXVGatXmjGK6hURVSsyMlL1c0hICC5evIiUlBT4+PjUatZBIiIyfKxciYTVK82wekVE+koul2Px4sXo2bMnunTpglmzZuHhw4do1qwZXn31VSZWREQmiMkVPRddLCysbY+dK7Tehy4WFiYi3VqwYAH+85//wM7ODp6envj0008xceJEscOqtTlz5iAgIEDsMFSuX78OiUSCM2fOiB0KEVGtMbkSEatXmtFF9UoXCZbB8vJSTmZRzSLDRKZqy5Yt+Oyzz3Dw4EF8//332LNnD7Zt2wa5XC52aNWSSCT4/vvvxQ6DiMhoMbmi52YM1StdMNjqlYsLMHGicktEKtnZ2Rg8eLDqfUhICCQSCW7dMvwvtZ6lvLxc7BCIiPQSkyuRsXqlGVavRFRQAHz5pXJLRCoVFRWwtrZWa7OwsMDjx4+12m/fvn0xefJkzJgxAw0bNoS7u7tG63F5e3sDAEaMGAGJRKJ6/8TWrVvh7e0NR0dHvPHGGyj62wyhffv2xaRJkzB16lQ4OztjwIABAIA///wTgwYNgp2dHdzc3DBmzBjcvXtXddyBAwfQq1cvODk5oVGjRnjllVdw9epVtX5PnTqFTp06wdraGkFBQfjjjz/UPr9//z5CQ0Ph4uICGxsbtGrVChs3bqzFb4yISHeYXOkBXSRY2sbqlWYMsnp1/TowZoxyS0QqCoUCY8eOxauvvqp6PXr0CP/+97/V2rRh8+bNsLW1xW+//YYlS5Zg3rx5OHToUI3HnD59GgCwceNG3L59W/UeAK5evYrvv/8eP/30E3766SccPXoUixYtqtSnpaUljh8/joSEBDx48AAvvPACOnXqhN9//x0HDhxAXl4eXn/9ddUxJSUliIqKwu+//47ExESYmZlhxIgRqkcni4uL8corr6Bdu3ZISUnBnDlzMG3aNLV+Z8+ejQsXLmD//v1IT0/HmjVr4Ozs/Fy/PyIibeFU7CZCFwsLa5t1o4d4dM9Gq33oYmp2IjIO4eHhldpGjx6tk779/PwQGxsLAGjVqhVWrVqFxMRE9O/fv9pjXP73aK+TkxPc3d3VPpPL5di0aRPs7e0BAGPGjEFiYiIWLFig2qdVq1ZYsmSJ6v38+fPRqVMnLFy4UNW2YcMGeHl54dKlS2jdujVGjhyp1s+GDRvg4uKCCxcuoEOHDvjqq68gl8vxxRdfwNraGu3bt8eNGzcwfvx41THZ2dno1KkTgoKCAKBSxY2ISJ/wr0g90c3uCk4W+4gdxnPRxbpXukiwtI3rXhEZBzEfTXt6mvfGjRvjzp07dT6ft7e3KrGq7nyBgYFq78+ePYtffvkFdnZ2lc539epVtG7dGpcvX0ZMTAx+++033L17V1Wxys7ORocOHZCeng4/Pz+1xyu7d++udq7x48dj5MiRSE1NxUsvvYThw4ejR48edb5WIiJtYnJlQoyheqULrF4Rkb6zsLBQey+RSJ5rlkJNzmdra6v2vri4GEOGDMHixYsrna9x48YAgCFDhqBZs2ZYv349PDw8IJfL0aFDh1pNiDFo0CBkZWVh3759OHToEF588UVMnDgRH3/8scbnICLSFY650iMce6UZY1hY2KDGXtnaAt26KbdEZNAsLCwgk8kEOVfnzp1x/vx5eHt7w8fHR+1la2uLe/fuISMjAx9++CFefPFF+Pr64v79+2rn8PX1RVpaGh49eqRqO3nyZKW+XFxcEB4eji+//BLx8fFYt26dINdARCQ0JlcmxhhmDtQFzhz4N23aAMnJyi0RGTRvb28kJiYiNze3UqJTWxMnTkRBQQHefPNNnD59GlevXsXBgwcREREBmUyGBg0aoFGjRli3bh2uXLmCn3/+GVFRUWrneOuttyCRSDBu3DhcuHAB+/btq1SRiomJwQ8//IArV67g/Pnz+Omnn+Dr6/tcsRMRaQuTKz3D6pVmWL0iIqq9ZcuW4dChQ/Dy8kKnTp2e61weHh44fvw4ZDIZXnrpJXTs2BFTp06Fk5MTzMzMYGZmhu3btyMlJQUdOnRAZGQkli5dqnYOOzs77NmzB+fOnUOnTp3wwQcfVHrM0NLSEtHR0fDz88M//vEPmJubY/v27c8VOxGRtkgUCoVC7CAMgVQqhaOjIy6mu+G8RWOt9qWLiS10MfZK25Nb6GJiC22PvRJiYgtZ2SOkf/YfFBYWwsHBoU7neHJ/V3mO1FQgMBBISQE6d37+gIlqocZ7k4iISM+wcqWHjKF6pQusXhERERGRPmFyVQfdrPPEDuG56WLslTEsLKyLsVdMsIhICNu2bYOdnV2Vr/bt24sdHhGRSeB803rKGNa90gVjWPeKiEgIQ4cORXBwcJWfPT3VOhERaQeTqzrqZp2Hk4/cxA7juehi3StdLCysbbpY94oLCxPR87K3t1dbCJiIiHSPjwXqMV2MvTKGqdmNYeyVXmvXDrh8WbklIiIiomoxuXoOxjD2Shc49kozejv2ytoa8PFRbolMRFxcHLp06QJ7e3u4urpi+PDhyMjI0HkcixYtgkQiwdSpU3XS382bNzF69Gg0atQINjY26NixI37//Xet9imTyTB79mw0b94cNjY2aNmyJT766CNoYzLjX3/9FUOGDIGHhwckEgm+//57tc8VCgViYmLQuHFj2NjYICQkBJcvXxY8DiIyXkyuasn8hm4Xl2X1SjOsXmlRZiYwerRyS2Qijh49iokTJ+LkyZM4dOgQHj9+jJdeegklJSU6i+H06dNYu3Yt/Pz8dNLf/fv30bNnT1hYWGD//v24cOECli1bhgYNGmi138WLF2PNmjVYtWoV0tPTsXjxYixZsgQrV64UvK+SkhL4+/tj9erVVX6+ZMkSrFixAgkJCfjtt99ga2uLAQMG4NGjR4LHQkTGiWOuasnqv2WAr6XqvTGMvdIFjr3SjF6Ovbp/H9i2DYiKApo3FzsaIp04cOCA2vtNmzbB1dUVKSkp+Mc//qH1/ouLixEaGor169dj/vz5Wu8PUCY5Xl5e2Lhxo6qtuQ7+zZ84cQLDhg3Dyy+/DADw9vbG119/jVOnTgne16BBgzBo0KAqP1MoFIiPj8eHH36IYcOGAQC2bNkCNzc3fP/993jjjTcEj4eIjA8rV7Vk81/df3vF6pVmdFG90sXjgUSkfwoLCwEADRs21El/EydOxMsvv4yQkBCd9AcAP/74I4KCgvDPf/4Trq6u6NSpE9avX6/1fnv06IHExERcunQJAHD27FkkJSVVmwRpS2ZmJnJzc9V+546OjggODkZycrJOYyEiw8XKVS3VO1uBijG34eD41/iTF4JK8POo5oBEImJk+s8Yqle6UJvqVX5+PvK+jNNuQEQmTi6XY+rUqejZsyc6dOig9f62b9+O1NRUnD59Wut9/d21a9ewZs0aREVF4T//+Q9Onz6NyZMnw9LSEuHh4Vrrd9asWZBKpWjbti3Mzc0hk8mwYMEChIaGaq3PquTm5gIA3NzUn0Zxc3NTfUZE9CxMrjT0ZGBtMQCHX4AKPIJCAhT/qz6Kh1mjtFiu1f79kIHTJS202kdr8yxckLprtQ9ZqXYrfxY2j1BWoN11r8psAYt72v2nIyvTbL+/J1bPM/j7ybFSqbTyh8XFf22r+pxIi57ck9qY3EBTEydOxJ9//omkpCSt95WTk4MpU6bg0KFDsNbxJDJyuRxBQUFYuHAhAKBTp074888/kZCQoNXkaufOndi2bRu++uortG/fHmfOnMHUqVPh4eGh1X6JiLSByZWGioqKAABef29UAFhbqnzhjg6iOKeDPshQFRUVwdHRsc7HAoCXl1f1O/XpU6dzEwnhee7v5zFp0iT89NNP+PXXX9GkSROt95eSkoI7d+6gc+fOqjaZTIZff/0Vq1atQllZGczNzbXSd+PGjdHuqSUXfH198e2332qlvyemT5+OWbNmqcY0dezYEVlZWYiLi9NpcuXurvxyMS8vD40bN1a15+XlISAgQGdxEJFhY3KlIQ8PD+Tk5MDe3h4SPv5HekShUKCoqAgeHnVfEJr3N+krIe7vuvb73nvv4bvvvsORI0d0MrEDALz44os4d079i7SIiAi0bdsWM2fO1FpiBQA9e/asNN38pUuX0KxZM631CQClpaUwM1MfAm5ubg65XLtPhDytefPmcHd3R2JioiqZkkql+O233zB+/HidxkJEhovJlYbMzMx08q0lUV087zf6vL9Jn4lRsZo4cSK++uor/PDDD7C3t1eNuXF0dISNjfYePba3t680rsvW1haNGjXS+nivyMhI9OjRAwsXLsTrr7+OU6dOYd26dVi3bp1W+x0yZAgWLFiApk2bon379vjjjz+wfPly/N///Z/gfRUXF+PKlb8micrMzMSZM2fQsGFDNG3aFFOnTsX8+fPRqlUrNG/eHLNnz4aHhweGDx8ueCxEZJwkCjEfZCciItJD1VVwN27ciLFjx+o0lr59+yIgIADx8fFa7+unn35CdHQ0Ll++jObNmyMqKgrjxo3Tap9FRUWYPXs2vvvuO9y5cwceHh548803ERMTA0tLy2efoBaOHDmCfv36VWoPDw/Hpk2boFAoEBsbi3Xr1uHBgwfo1asXPvvsM7Ru3VrQOIjIeDG5IiIiIiIiEgDXuSIiIiIiIhIAkysiIiIiIiIBMLkiIiIiIiISAJMrIiIiIiIiATC5IiIiIiIiEgCTKyIiIiIiIgEwuSIiIiIiIhIAkysiIqJqlJWVYc6cOSgrKzOZvnnNRER1x0WEiYiIqiGVSuHo6IjCwkI4ODiYRN+8Zt1eMxEZF1auiIiIiIiIBMDkioiIiIiISAD1xA7AUMjlcty6dQv29vaQSCRih0OkolAoUFRUBA8PD5iZ1e37Et7fpK/Evr+lUqnaVpfE6pvXrDt1vb9Xr16NpUuXIjc3F/7+/li5ciW6du1a7f4PHjzABx98gN27d6OgoADNmjVDfHw8Bg8eLMRlENHfcMyVhm7cuAEvLy+xwyCqVk5ODpo0aVKnY3l/k77j/U3GrDb3944dOxAWFoaEhAQEBwcjPj4e33zzDTIyMuDq6lpp//LycvTs2ROurq74z3/+A09PT2RlZcHJyQn+/v5CXwqRyWNypaHCwkI4OTkBAGxtgeTTlf8DRqRrfu3uqH5+8OABHB0d63Sev9/fAJB2gfc3iU/o+zsnJ4eTFWjizBmgTx/g6FEgIEDsaIyaVCqFl5dXre7v4OBgdOnSBatWrQKgrMx6eXnhvffew6xZsyrtn5CQgKVLl+LixYuwsLAQNH4iqoyPBWroyaMktrbAH+cawcKCw9VIfGkXbOHXrgQAnutxvr8fe+GSI2xseH+T+C5cckS71oUAhLm/HRwcmFxpws7ury1/Xzqh6f1dXl6OlJQUREdHq9rMzMwQEhKC5OTkKo/58ccf0b17d0ycOBE//PADXFxc8NZbb2HmzJkwNzcXJH4i+guTq1pKPu3KxIr0hr29/f8SrDvP3lkDaRdcmViR3rCxsUHaBata399lZWVq6xWJMXaIqDaevketrKxgZWVVab+7d+9CJpPBzc1Nrd3NzQ0XL16s8tzXrl3Dzz//jNDQUOzbtw9XrlzBhAkT8PjxY8TGxgp3EUQEgLMFEhGRkYmLi4Ojo6PqxfFWpO+8vLzU7tm4uDjBzi2Xy+Hq6op169YhMDAQo0aNwgcffICEhATB+iCiv7ByRURERiU6OhpRUVGq90/GtZCGXF2ByEjllnTi6fGAVVWtAMDZ2Rnm5ubIy8tTa8/Ly4O7u3uVxzRu3BgWFhZqjwD6+voiNzcX5eXlsLS0FOAKiOgJJldERGRUqnukasjKY6hnbQsAaGRnhZVvdoKbg7Wuw9N/TZoAy5eLHYVJ0XQ8oKWlJQIDA5GYmIjhw4cDUFamEhMTMWnSpCqP6dmzJ7766ivI5XLVdO+XLl1C48aNmVgRaQEfCyQiIpOQebcUV/NLcDW/BKcyC/DLRWHGKhqd4mIgOVm5Jb0TFRWF9evXY/PmzUhPT8f48eNRUlKCiIgIAEBYWJjahBfjx49HQUEBpkyZgkuXLmHv3r1YuHAhJk6cKNYlEBk1Vq6IiMgkbBrbBbb29lj1yxUcu3wXRY8qxA5JP126BPToAaSkAJ07ix0NPWXUqFHIz89HTEwMcnNzERAQgAMHDqgmucjOzlZbkNjLywsHDx5EZGQk/Pz84OnpiSlTpmDmzJliXQKRUWNyRUREJiGoeUM4ODhgT9otHLsMFJUxuSLDNGnSpGofAzxy5Eiltu7du+PkyZNajoqIAD4WSEREJsbOSrmQajErV0REJDAmV0REZFLsrZUPbRSXPRY5EiIiMjZMroiIyKT8lVyxclWlevUAZ2flloiIaoXJVS1FTn4AuVwudhhEAIDy8nIE+gs341n3Lnfw+DG/zSf9IJPJMHLYXcHPa2elTBo4oUU1/PyA/HzlloiIaoXJVS0lHi7HhHcLxQ6DCADQ0bcAQuZCJSVAZ797wp2Q6Dm8MrAAly8L/2UWkysiItIWJld1kHaW3+yTfigvF/6cXNqG9MX16zKtnNeOjwXW7Px5wMdHuSUiolphclUHfv4WYodABACwtBT+nHZ2wp+TqC68vc21cl57zhZYs7Iy4OpV5ZaIiGqFyVUtvRhiic/WOoodBhEA4Fx6Q1gImOvb2gKpaY2EOyHRc/jpQEO0aiX8/01xQgui2qmoqMDhw4exdu1aFBUVAQBu3bqFYj7qQFQJk6ta+mSFk9rK50RisrS0RMpZV8HOl3zaFRZCZmtEz8Hc3Bzf/uAs+Hn//ligTK4Q/PxExiQrKwsdO3bEsGHDMHHiROTn5wMAFi9ejGnTpokcHZH+YZZAREQm5cmEFgBQUs7qFVFNpkyZgqCgINy/fx82Njaq9hEjRiAxMVHEyIj0ExexICIik2JVzwwW5hI8lilQ/KgCDtas1qrx8QEOHFBuyeQdO3YMJ06cgOVTg3y9vb1x8+ZNkaIi0l+sXBERkUmRSCSq6hXHXVXBwQEYMEC5JZMnl8shk1WeufPGjRuwt7cXISIi/cbkioiITM6TcVdc66oKt28Dc+Yot2TyXnrpJcTHx6veSyQSFBcXIzY2FoMHDxYvMCI9xeSKiIhMjmo6dlauKrt9G5g7l8kVAQCWLVuG48ePo127dnj06BHeeust1SOBixcvFjs8Ir3DMVdERGRy/qpccVF4opo0adIEZ8+exY4dO3D27FkUFxfj7bffRmhoqNoEF0SkxOSKiIhMjv2TMVd8LJDomerVq4fQ0FCEhoaKHQqR3jOKxwJ//fVXDBkyBB4eHpBIJPj+++/VPlcoFIiJiUHjxo1hY2ODkJAQXL58WZxgiYhIdHZcSJhII3FxcdiwYUOl9g0bNvCxQKIqGEVyVVJSAn9/f6xevbrKz5csWYIVK1YgISEBv/32G2xtbTFgwAA8evRIx5ESEZE+eDJbICe0qEKDBkBoqHJLJm/t2rVo27Ztpfb27dsjISFBhIiI9JtRJFeDBg3C/PnzMWLEiEqfKRQKxMfH48MPP8SwYcPg5+eHLVu24NatW5UqXJqInPwAcrlcgKiJnp9MJsPIYXcFOx/vb9InQt/ff2dvzQktqtW8OfDll8otmbzc3Fw0bty4UruLiwtuc9ITokqMIrmqSWZmJnJzcxESEqJqc3R0RHBwMJKTk6s9rqysDFKpVO0FAImHyzHh3UKtx02kiVcGFuDy5donQ7y/yRDU9f7WhL01x1xV69Ej4MoV5ZZMnpeXF44fP16p/fjx4/Dw8BAhIiL9ZvTJVW5uLgDAzc1Nrd3NzU31WVXi4uLg6Oioenl5eak+SzvL2aVIP1y/XnlhR03w/iZDUNf7WxOqxwLLeL9XcuEC0KqVcksmb9y4cZg6dSo2btyIrKwsZGVlYcOGDYiMjMS4cePEDo9I73C2wGpER0cjKipK9V4qlar+APXztxArLCI13t7muHix9n+A8v4mQ1DX+1sTHHNFpJnp06fj3r17mDBhAsrLywEA1tbWmDlzJqKjo0WOjkj/GH3lyt3dHQCQl5en1p6Xl6f6rCpWVlZwcHBQewHAiyGW+Gyto/YCJqqFnw40RKtWtf9nzPubDEFd729NcLZAMmSrV6+Gt7c3rK2tERwcjFOnTlW776ZNmyCRSNRe1tbWGvclkUiwePFi5Ofn4+TJkzh79iwKCgoQExMjxKUQGR2jT66aN28Od3d3JCYmqtqkUil+++03dO/evdbn+2SFE8zMjP7XRgbC3Nwc3/7gLNj5eH+TPhH6/v47rnNFhmrHjh2IiopCbGwsUlNT4e/vjwEDBuDOnTvVHuPg4IDbt2+rXllZWbXu187ODl26dEGHDh1gZWX1PJdAZNSM4rHA4uJiXLlyRfU+MzMTZ86cQcOGDdG0aVNMnToV8+fPR6tWrdC8eXPMnj0bHh4eGD58uHhBExGRaDhbIBmq5cuXY9y4cYiIiAAAJCQkYO/evdiwYQNmzZpV5TESiaTGp3VqUlJSgkWLFiExMRF37typNKPstWvX6nReImNlFMnV77//jn79+qnePxlLEh4ejk2bNmHGjBkoKSnBv/71Lzx48AC9evXCgQMHalUWJyIiw1BWVoaysjLV+yezYf6dHWcLrF7nzoBCIXYUJuXpe9TKyqrK6lB5eTlSUlLUxjqZmZkhJCSkxhmQi4uL0axZM8jlcnTu3BkLFy5E+/btNYrtnXfewdGjRzFmzBg0btwYEolEw6siMk1GkVz17dsXihr+j0AikWDevHmYN2+eDqMiIiIxxMXFYe7cuTXu82RCi+LyCsjlCpiZ8Q9GEs/fZ2wFgNjYWMyZM6fSfnfv3oVMJqtyBuSLFy9Wee42bdpgw4YN8PPzQ2FhIT7++GP06NED58+fR5MmTZ4Z2/79+7F371707NlT8wsiMmEcXEFEREYlOjoahYWFqldOTk6lfZ6sc6VQACXlrF6pycgAundXbkkncnJy1O5ZIWfh6969O8LCwhAQEIA+ffpg9+7dcHFxwdq1azU6vkGDBmjYsKFg8RAZOyZXRERkVKqbDVNtn3pmqPe/ahXHXT2lpAQ4eVK5JZ14+n6tbsIIZ2dnmJub13oG5L+zsLBAp06d1Maq1+Sjjz5CTEwMSktLNdqfyNQxuSIiIpMjkUg47ooMjqWlJQIDA9VmQJbL5UhMTNR4BmSZTIZz586hcePGGu2/bNkyHDx4EG5ubujYsSM6d+6s9iIidUYx5oqIiKi27K3r4UHpYxSxckUGJCoqCuHh4QgKCkLXrl0RHx+PkpIS1eyBYWFh8PT0RFxcHABg3rx56NatG3x8fPDgwQMsXboUWVlZeOeddzTqjzMrE9UOkysiIjJJdlYWAB6iiJUrMiCjRo1Cfn4+YmJikJubi4CAABw4cEA1yUV2drbaeoX379/HuHHjkJubiwYNGiAwMBAnTpxAu3btNOovNjZWK9dBZKyYXBERkUniQsLV8PYGtm5VbkkvTZo0CZMmTarysyNHjqi9/+STT/DJJ588V38PHjzArl27cPXqVUyfPh0NGzZEamoq3Nzc4Onp+VznJjI2HHNVS9u2lNY47TuRLslkMowcdlew840cppzml0gfKBQKbNuivUH0qjFXZY+11odBatgQGD1auSWTl5aWhtatW2Px4sX4+OOP8eDBAwDA7t27BZ3VkMhYMLmqpcWLirFpA2fMIf3wysACXL4sF+x8ly/L8crAAsHOR/Q8Nm0oxeJFxVo7/5O1rvhY4FPy84HVq5VbMnlRUVEYO3YsLl++DGtra1X74MGD8euvv4oYGZF+YnJVBym/81tO0g/XrwtfZdLGOYnq4nct/7f2r8oVkys1OTnApEnKLZm806dP4913363U7unpidzcXBEiItJvTK7qIDDIQuwQiAAA3t7mBnFOoroI0vJ/a+05FTvRM1lZWUEqlVZqv3TpElxcXESIiEi/MbmqpZmz7DD2/+qLHQYRAOCnAw3RqpVw/4xbtTLDTwc4zoL0w9j/q4+Zs+y0dn57PhZI9ExDhw7FvHnz8PixspIskUiQnZ2NmTNnYuTIkSJHR6R/mFzVUmhYfUgkErHDIAIAmJub49sfnAU737c/OMPcnJUr0g8SiQShYdr7MuvJmCs+FkhUvWXLlqG4uBiurq54+PAh+vTpAx8fH9jb22PBggVih0ekdzgVOxERmSQ7a+Vjh1xE+Cn29sBLLym3ZPIcHR1x6NAhJCUlIS0tDcXFxejcuTNCQkLEDo1ILzG5IiIik6SqXD3iJEVqWrUCDh4UOwrSM7169UKvXr3EDoNI7zG5IiIik+TA2QKrJpMBJSWArS3Ax4RN0ooVKzTed/LkyVqMhMjwMLmqpSMPW8DGnL820h8PH1YAuCPIuXh/k74R8v5+mh1nC6za2bNAYCCQkgJ07ix2NCSCTz75RO19fn4+SktL4eTkBAB48OAB6tevD1dXVyZXRE8RdUKLzZs3Y+/evar3M2bMgJOTE3r06IGsrCwRIyMiImPHRYSJqpaZmal6LViwAAEBAUhPT0dBQQEKCgqQnp6Ozp0746OPPhI7VCK9I2pytXDhQtjY2AAAkpOTsXr1aixZsgTOzs6IjIwUMzQiIjJyqspVeQXkcoXI0RDpp9mzZ2PlypVo06aNqq1Nmzb45JNP8OGHH4oYGZF+EvX5n5ycHPj4+AAAvv/+e4wcORL/+te/0LNnT/Tt21fM0IiIyMjZWylnC1QogNLHMlUli4j+cvv2bVRUVK7uymQy5OXliRARkX4TtXJlZ2eHe/fuAQD++9//on///gAAa2trPHz4ULB+ZDIZZs+ejebNm8PGxgYtW7bERx99BIWC31QSEZkqawszmJsp1y3kuCuiqr344ot49913kZqaqmpLSUnB+PHjOR07URVE/Zquf//+eOedd9CpUydcunQJgwcPBgCcP38e3t7egvWzePFirFmzBps3b0b79u3x+++/IyIiAo6OjhyISURkoiQSCeyt6+FB6WMUlz0GYC12SPqhY0fgzh3gf5MXkGnbsGEDwsPDERQUBAsLZbW3oqICAwYMwOeffy5ydET6R9TkavXq1fjwww+Rk5ODb7/9Fo0aNQKg/EbkzTffFKyfEydOYNiwYXj55ZcBAN7e3vj6669x6tQpwfogEoNMJkPc62cFO9+Rr25j4LgmkEgkgp2TqK7kcjk+fz9Dq33YWSmTK05q8TcWFoCLi9hRkJ5wcXHBvn37cOnSJVy8eBEA0LZtW7Ru3VrkyIj0k6jJlZOTE1atWlWpfe7cuYL206NHD6xbtw6XLl1C69atcfbsWSQlJWH58uXVHlNWVoaysjLVe6lUKmhMREL4aPhZ3L5a+0doq7u/dy/LgoWVGULCPQWLkaiuEt67iLQj97XaB2cMrMLVq0BkJPDJJ0DLlmJHQ3qidevWTKiINKDz5CotLU3jff38/ATpc9asWZBKpWjbti3Mzc0hk8mwYMEChIaGVntMXFyc4EkekdDuZD+q03E13d9X/yhCSPjzREUkjOt/Fmu9D3suJFxZYSGwZw8wZ47YkZAekMlk2LRpExITE3Hnzh3I5XK1z3/++WeRIiPSTzpPrgICAiCRSKBQKJ756JFMJhOkz507d2Lbtm346quv0L59e5w5cwZTp06Fh4cHwsOr/isyOjoaUVFRqvdSqRReXl6CxEMkFNem1rh5qbTWx9V0f7fsZC9YfETPw7uDHe7nFmi1jyeVK05oQVS1KVOmYNOmTXj55ZfRoUMHPjZO9Aw6T64yMzNVP//xxx+YNm0apk+fju7duwNQrne1bNkyLFmyRLA+p0+fjlmzZuGNN94AAHTs2BFZWVmIi4urNrmysrKClZWVYDEQacPs7/0xd8iZWj8aWN39/er7zfBimIdQ4RE9l3+vbIvV49O1+mignbVygH4RK1dEVdq+fTt27typmnSMiGqm8+SqWbNmqp//+c9/YsWKFWr/YP38/ODl5YXZs2dj+PDhgvRZWloKMzP1WefNzc0rlbaJDI25uTmid/pjcuBJQc7X963G/FaS9IaZmRneWdZGsPu7KqrHAlm5IqqSpaWlak1SIno2Ude5OnfuHJo3b16pvXnz5rhw4YJg/QwZMgQLFizA3r17cf36dXz33XdYvnw5RowYIVgfRERkeOxVE1o8FjkSPeLpCSxbptySyXv//ffx6aefcm1QIg2Jmlz5+voiLi4O5eXlqrby8nLExcXB19dXsH5WrlyJ1157DRMmTICvry+mTZuGd999Fx999JFgfRARkeFRjbniY4F/cXMDoqKUW9JLq1evhre3N6ytrREcHKzx0jLbt2+HRCKp1ZNBSUlJ2LZtG1q2bIkhQ4bg1VdfVXsRkTpRp2JPSEjAkCFD0KRJE9XMgGlpaZBIJNizZ49g/djb2yM+Ph7x8fGCnZOIiAyf3f8eC+SYq7+5fx84fBgICQEaNBA7GnrKjh07EBUVhYSEBAQHByM+Ph4DBgxARkYGXF1dqz3u+vXrmDZtGnr37l2r/pycnPikD1EtiJpcde3aFdeuXcO2bdtUC9ONGjUKb731FmxtbcUMjYiITABnC6xCZibw+utASgqTKz20fPlyjBs3DhEREQCUX1Tv3bsXGzZswKxZs6o8RiaTITQ0FHPnzsWxY8fw4MEDjfvbuHGjEGETmQxRkysAsLW1xb/+9S+xwyAiIiNRm0Xguc4V6YOn79HqZnQtLy9HSkoKoqOjVW1mZmYICQlBcnJyteefN28eXF1d8fbbb+PYsWO1jq+iogJHjhzB1atX8dZbb8He3h63bt2Cg4MD7Ozsan0+ImMmenIFABcuXEB2drba2CsAGDp0qEgRERGRoarNIvD2/5uKnZUrEtPT62jGxsZiThWLON+9excymQxuT42Hc3NzUz0B9LSkpCR88cUXOHPmTJ1iy8rKwsCBA5GdnY2ysjL0798f9vb2WLx4McrKypCQkFCn8xIZK1GTq2vXrmHEiBE4d+6camFhAKqpoIVaRJiIiExHbRaBt+NsgaQHcnJy4ODgoHov1DqbRUVFGDNmDNavXw9nZ+c6nWPKlCkICgrC2bNn0ahRI1X7iBEjMG7cOEHiJDImoiZXU6ZMQfPmzZGYmIjmzZvj1KlTuHfvHt5//318/PHHYoZGREQGqjaLwHNCiyrY2ACdOim3pBMODg5qyVV1nJ2dYW5ujry8PLX2vLw8uLu7V9r/6tWruH79OoYMGaJqe7LGZ7169ZCRkYGWLVvW2OexY8dw4sQJWFpaqrV7e3vj5s2bz4yZyNSIOhV7cnIy5s2bB2dnZ5iZmcHMzAy9evVCXFwcJk+eLGZoRERkAuz/NhU71/H5H19fIDVVuSW9YmlpicDAQCQmJqra5HI5EhMT0b1790r7t23bFufOncOZM2dUr6FDh6Jfv344c+ZMtRXdv5PL5VU+SXTjxg3Y29s/3wURGSFRK1cymUz1D9PZ2Rm3bt1CmzZt0KxZM2RkZIgZGhERmYAnlSuFAij9f/buO66J+40D+CcJhD1FQBDBrThAQRG3FcXduuqgihT91UEd2DrqtlrQVkvrwr2qtba1w4WDghMXiDjBKgoqQ0U2hJDc7w8kNbKSI+FCeN6vV15wl7vvPYdf4Z58V5EERnoaMRSZkAoFBgbC19cX7u7u6Ny5M0JCQpCXlyebPXDixImwt7dHUFAQ9PX10bZtW7nzzc3NAaDM/or0798fISEh2LZtG4CSoRu5ublYtmwZBg0apLobI0RLcNpy1bZtW9y6dQsA4OHhgbVr1+LSpUtYuXIlmjRpwmVoFYo8mEKfbhKNIZFIEPTxLZWVR/WbaBJV1+/yGOgKIOCXjPOlGQPfunkT0NMr+Uo0zpgxY/Ddd99h6dKlcHV1RWxsLMLCwmSTXCQlJSElJUVl11u3bh0uXboEZ2dnFBYWYvz48bIugWvWrFHZdQjRFpx+RLd48WLk5eUBKJkmdMiQIejRowfq1auHX375hcvQKnRk3VPo6vHh5WvPdSiE4OuPbiHlUYHKyqP6TTSJqut3eXg8Hoz1dJBVIEZOYTFsqh72ov0YBigqKvlKNFJAQAACAgLKfS8yMrLSc/fs2aPUtRo2bIhbt27h0KFDiIuLQ25uLvz9/eHj4wMDGpdHSBmcJlfe3t6y75s1a4YHDx4gIyMDFhYWshkDNdGjmznw8uU6CkKA9KRClZdJ9ZtoCnXU7/L8l1zRjIGElEdHRweffPIJ12EQUitw2i2w1L///otTp06hoKAAlpaWXIdTpaYdaAAn0QzWjfRVXibVb6Ip1FG/y0MLCRNSufj4eAQEBKBv377o27cvAgICKlxXi5C6jtPk6vXr1+jbty9atGiBQYMGyfoI+/v7Y+7cuVyGVqERcx3Rd6Id12EQAgBY8qcLGjRVXbcMqt9Ek6i6flekdK0rWkiYkLJ+//13tG3bFtHR0XBxcYGLiwtiYmLQrl07/P7771yHR4jG4TS5mjNnDnR1dZGUlARDQ0PZ/jFjxiAsLIzDyCrWe3wDje6ySOoWgUCAhYddVFYe1W+iSVRdvytCa129p3Vr4M4dmoqdAADmzZuHhQsXIioqCuvXr8f69etx+fJlfPXVV5g3bx7X4RGicThNrk6fPo01a9agYcOGcvubN2+Op0+fchQVIYSQusREXxcAtVzJGBgAbdrQIsIEAJCSkoKJEyeW2f/JJ5+odFZCQrQFp8lVXl6eXItVqYyMDOjp6XEQESGEkLrGWI/GXMl5+hSYPLnkK6nzevfujQsXLpTZf/HiRfTo0YODiAjRbJzOFtijRw/s27cPX3/9NYCSKXGlUinWrl2LPn36cBkaIYSQOqJ0QguaLfCt16+BnTuB6dMBR0euoyEcGzZsGObPn4/o6Gh06dIFAHDlyhX8+uuvWLFiBf7++2+5Ywmp6zhNrtauXYu+ffvixo0bKCoqwrx583D37l1kZGTg0qVLXIZGCCGkjihtuYpPy8WZe2kKn2ckFKBzY0voCDRi4l1C1GL69OkAgM2bN2Pz5s3lvgeUfEAukUhqNDZCNBGnyVXbtm2RkJCAjRs3wsTEBLm5uRgxYgRmzJiBBg0acBlahXobPIaJIf0hJZojRyJVWVlUv4mmUWX9rojp25ar8wkvcT7hpVLnLhrUGlN6NlFHWIRoBKlU/f8HCdEmnCVXYrEYAwYMQGhoKBYtWsRVGIQQQuq4AW0b4PzDV8jIK1L4nJc5IjzPLEBCWo4aIyNEsxQWFkJfv2bWnyOktuIsudLV1UVcXFyNXe/58+eYP38+Tp48ifz8fDRr1gy7d++Gu7u7UuXoPCsGWgvVFCUh3KL6TeoiWzN97JrUSalzDl5Nwld/3MabfMUTslrDxgZYsKDkK6nzJBIJvvnmG4SGhiItLQ0JCQlo0qQJlixZAicnJ/j7+3MdIiEahdP+P5988gl27typ9uu8efMG3bp1g66uLk6ePIl79+5h3bp1sLCwULosg9Na+IeUkLeofhOiGEujkg8hXivR2lVr2NsDQUElX0mdt3r1auzZswdr166FUPjfh29t27bFjh07OIyMEM3E6Zir4uJi7Nq1C2fPnoWbmxuMjIzk3l+/fr1KrrNmzRo4ODhg9+7dsn2NGzdmVZbhmULkzzJWSVyEaBqq34Qopp5xyUOmMl0Ja42cHCA6GnBzA0xMuI6GcGzfvn3Ytm0b+vbti6lTp8r2u7i44MGDBxxGRohm4jS5unPnDjp27AgASEhIkHuPx+Op7Dp///03vL29MXr0aJw7dw729vaYPn06pkyZonRZwlvFkE5KhbHJf32ORW66yPE1BFQYMyGKyMvLQ3tn1Y35oPpNNEl+fj7aO2dzHUa5LAzfJle5WphcPXwI9OlTkmC9/RtN6q7nz5+jWbNmZfZLpVKIxbR8ASHv4zS5ioiIqJHrPH78GFu2bEFgYCC++uorXL9+HTNnzoRQKISvr2+554hEIohEItl2dnbJH3g+ANtwACgEwweyphsh5xN68CTcaNuKXWJF9ZvUBm1aamZiBQD13nYLzBEVo6hYCqEOzbJJtJOzszMuXLgAx/fWPPvtt9/QoUMHjqIiRHNxmlzVFKlUCnd3d3zzzTcAgA4dOuDOnTsIDQ2tMLkKCgrCihUrKiyz2JaPVz+YobCrnlpiJkSdqH4TUj1mBroQ8HmQSBm8yS+CjSnNoEa009KlS+Hr64vnz59DKpXiyJEjiI+Px759+3Ds2DGuwyNE43D6UVteXh6WLFmCrl27olmzZmjSpIncS1UaNGgAZ2dnuX2tW7dGUlJShecsXLgQWVlZsldycrLc+2kHLOnBk9RaVL8JqR4+nwcLQ10AwGtt7BpIyFsffvghjh49irNnz8LIyAhLly7F/fv3cfToUfTr14/r8AjROJy2XE2ePBnnzp3DhAkT0KBBA5WOs3pXt27dEB8fL7cvISGhTBP3u/T09KCnV/HDpd61Iohb1ImGP6LB7jwwYdU1kOo3qQ3uxptqdNdASyMhXuUWad+kFrq6JTMF6upyHQnRED169MCZM2e4DoOQWoHTp6eTJ0/i+PHj6Natm1qvM2fOHHTt2hXffPMNPv74Y1y7dg3btm3Dtm3blC6rqAkfeCyF0YlC5H5iqIZoCVGckZER4u4ZoL1zukrKo/pNNImhoSHi7umrrH6rWumkFq/zRFUcWcu0awc8e8Z1FIQQUitxmlxZWFjA0tJS7dfp1KkT/vjjDyxcuBArV65E48aNERISAh8fH6XLSv2tHvS+zYXxrwXgv5FCakGDmIn2oPpNiOJKp2N/o20tV6TOs7CwULg3UUZGhpqjIaR24TS5+vrrr7F06VLs3bsXhobq/ZR8yJAhGDJkSLXLYfR5eL3WDAU99aB/pQj5A2kQM9EeVL8JUVzpQsJa1y3w9m1g4EDg5MmSVixS54SEhMi+f/36NVatWgVvb294enoCAKKionDq1CksWbKEowgJ0Vw1nlx16NBB7tOQf//9FzY2NnBycoLue/27Y2Jiajo8heUP0QcYhuswCFELqt+EVM3SqGTc4mttS67EYuD585KvpE56dyblkSNHYuXKlQgICJDtmzlzJjZu3IizZ89izpw5XIRIiMaq8eTqo48+qulLqg+t/UO0GdVvQipVT1tbrojG27RpE7799lukpqbCxcUFGzZsQOfOncs99siRI/jmm2/w77//QiwWo3nz5pg7dy4mTJig0LVOnTqFNWvWlNk/YMAALFiwoFr3QYg2qvHkatmyZTV9SUIIIUTlLIxKJ7Sg5IrUnF9++QWBgYEIDQ2Fh4cHQkJC4O3tjfj4eFhbW5c53tLSEosWLUKrVq0gFApx7Ngx+Pn5wdraGt7e3lVer169evjrr78wd+5cuf1//fUX6tWrp7L7IkRbcDrmqkmTJrh+/XqZ/5yZmZno2LEjHj9+zFFkhBBCSOVKW65oQgtSk9avX48pU6bAz88PABAaGorjx49j165d5bYk9e7dW2571qxZ2Lt3Ly5evKhQcrVixQpMnjwZkZGR8PDwAABcvXoVYWFh2L59e/VviBAtw+lUYE+ePIFEIimzXyQS4RlNA0sIIYQFkUiE7OxsuZc6aO2EFs2bAxERJV9JjXi/vopE5U/vX1RUhOjoaHh5ecn28fl8eHl5ISoqqsrrMAyD8PBwxMfHo2fPngrFNmnSJFy6dAmmpqY4cuQIjhw5AlNTU1y8eBGTJk1SqAxC6hJOWq7+/vtv2fenTp2CmZmZbFsikSA8PByNGzfmIjRCCCG1XFBQEFasWKH268harvKLIJUy4PO1ZJyiiQnwXmsHUS8HBwe57WXLlmH58uVljnv16hUkEglsbGzk9tvY2ODBgwcVlp+VlQV7e3uIRCIIBAJs3rwZ/fr1Uzg+Dw8PHDhwQOHjCanLOEmuSie14PF4cjPSAICuri6cnJywbt06DiIjhBBS2y1cuBCBgYGy7ezs7DIPr6pQOuZKygCZBWJZS1at9/w5sHEjEBAA2NtzHU2dkJycDFNTU9m2np6eSss3MTFBbGwscnNzER4ejsDAQDRp0qRMl0FCSPVxklxJpVIAQOPGjXH9+nVYWVlxEQYrc2ZmYttOc/D5tLgq4Z5UKsWcmZkqK2/kh69w8kw9CAQClZVJCFsMw+DAvnylz9PT01P5w2l5dAV8mOjrIKewGBl5Iu1JrtLSgOBgYPRoSq5qiKmpqVxyVRErKysIBAKkpaXJ7U9LS4OtrW2F5/H5fDRr1gwA4Orqivv37yMoKIiSK0LUgNMMITExUaHEql27dkhOTq6BiKoWfrYI0z/L4joMQgAA0z/LQvhZ1Y33ePhQiiEDMlRWHiHVsWdXPtYE53IdRqX+m46d1oQi6icUCuHm5obw8HDZPqlUivDwcNkCv4qQSqUVjusihFQPp7MFKurJkycQa9BihnG3NCcWUrfdUkNdfPKk7CQzhHDhxg3N/11raSTEk9f5yMijB1VSMwIDA+Hr6wt3d3d07twZISEhyMvLk80eOHHiRNjb2yMoKAhAyRhEd3d3NG3aFCKRCCdOnMD+/fuxZcsWLm+DEK1VK5IrTdPeRZfrEAgBALi46CI1RbUPdU5O1CWQaAZ3d12cOFbIdRiVsjQq6X5Ia12RmjJmzBi8fPkSS5cuRWpqKlxdXREWFiab5CIpKUlu6EJeXh6mT5+OZ8+ewcDAAK1atcJPP/2EMWPGcHULhGg1Sq6U1NdLiM1bzao+kJAasHmrGf7nn6myroHNm/NxLMxSJWURUl2TPjWEqJDR6K6Bsm6BuVqUXNWrB/j7l3wlGikgIAABAQHlvhcZGSm3vWrVKqxatUqp8keMGKHwsUeOHFGqbEK0HSVXSvr+R5rMgmgOPp+P7380R3vndJWU9/tfVhAIqH4TzcDj8eAz0VCjk6vSGQO1quXK0RHYsYPrKAiH3l0ihxCiHEquCCGEEJbeXetKaxQUAI8fA02aAAYGXEdDOLB7926uQyCk1qKPqAkhhBCWLGWzBWpRcnX/PtC2bclXQgghSuG85So8PBzh4eFIT0+XrX9VateuXQCArVu3llmNnCuRBU1gIOD8x0aITEFBMQDVdAuk+k00jSrrtzpYGr/tFqhNY64Iec9vv/2Gw4cPIykpCUVF8nU9JiaGo6gI0UyctlytWLEC/fv3R3h4OF69eoU3b97IvUqNHz8eRkZGHEZKCCGElGVpqIUtV4S848cff4Sfnx9sbGxw8+ZNdO7cGfXq1cPjx48xcOBArsMjRONw+hF1aGgo9uzZgwkTJnAZBiGEEMLKu90CGYYBj8fjOCJCVGvz5s3Ytm0bxo0bhz179mDevHlo0qQJli5diowMWnSekPdx2nJVVFSErl27chkCIYQQwlq9t90CiyRS5BVpyQLcPB4gFJZ8JXVeUlKS7FnNwMAAOTk5AIAJEybg559/5jI0QjQSp8nV5MmTcfDgQS5DIIQQQlgzFOpAX7fkT6nWrHXVoQMgEpV8JXWera2trIWqUaNGuHLlCgAgMTERDMNwGRohGonTboGFhYXYtm0bzp49i/bt20NXV1fu/fXr16vlusHBwVi4cCFmzZqFkJAQtVyDEEJI3VDPSA/PMwvwOk+ERvUMuQ6HEJX64IMP8Pfff6NDhw7w8/PDnDlz8Ntvv+HGjRtKLTZMSF3BaXIVFxcHV1dXAMCdO3fk3lNXv/Xr169j69ataN++vVrKJ4QQUrdYGOnieWaB9kxqcf8+4OMDHDgAtG7NdTSEY9u2bZPN5jxjxgzUq1cPly9fxrBhw/DZZ59xHB0hmofT5CoiIqJGr5ebmwsfHx9s374dq1atYlVG5MEUDJjSkAYtE40glUqxY268ysqj+k00iarrt7pYGukBAF5rS3JVUADcvFnyldR5fD4ffP5/o0jGjh2LsWPHchgRIZqtTi1oM2PGDAwePBheXl5VJlcikQgikUi2nZ2dDQA4su4pdPX48PK1V2ushCgi9PMHiIt8U/WB76H6TWoDtvW7ptV7O2PgG21JrkidFxcXh7Zt24LP5yMuLq7SY6knECHy6kxydejQIcTExOD69esKHR8UFIQVK1aU+96jmznw8lVldISw8+ROLqvzqH6T2oBt/a5p707HTog2cHV1RWpqKqytreHq6goej1fu5BU8Hg8SiZbMkkmIitSJ5Co5ORmzZs3CmTNnoK+vr9A5CxcuRGBgoGw7OzsbDg4OAICmHUzUEichynJqa4w3qcqvM0L1m9QGbOt3TStNrrSmWyCp8xITE1G/fn3Z94QQxXE6FXtNiY6ORnp6Ojp27AgdHR3o6Ojg3Llz+PHHH6Gjo1Pupy56enowNTWVewHAiLmO6DvRrqZvgZByTd3QCu17Wyh9HtVvUhuwrd81Tetarho3Bg4fLvlK6iRHR0fZ2NunT5/C3t4ejo6Oci97e3s8ffqU40gJ0Tx1Irnq27cvbt++jdjYWNnL3d0dPj4+iI2NhUAgULis3uMb0GB/ojH4fD4mr2upsvKofhNNour6rS5al1xZWACjR5d8JXVenz59ZOtcvSsrKwt9+vThICJCNFud6BZoYmKCtm3byu0zMjJCvXr1yuwnhBBClFFP25KrtLSSadh9fAAbG66jIRxjGKbcD91ev34NIyMjDiIiRLPVieSKEEIIUReta7l6/hyYOxfo3ZuSqzqsdIFgHo+HSZMmQU9PT/aeRCJBXFwcunbtylV4hGisOptcRUZGch0CIYQQLVDv7TpXuaJiiIol0NNRvKs5IZrKzMwMQEnLlYmJCQwMDGTvCYVCdOnSBVOmTOEqPEI0Vp1NrgghhBBVMNHXgYDPg0TKICOvCA3MDKo+iRANt3v3btn06xs2bICxsTHHERFSO9SJCS0IIYQQdeHzebAw1LKugYSgpNXqwIEDSElJ4ToUQmoNSq4IIYSQatKqSS3MzIChQ0u+Eo20adMmODk5QV9fHx4eHrh27VqFx27fvh09evSAhYUFLCws4OXlVenx7+Lz+WjevDlev36tqtAJ0XqUXBFCCCHVpFWTWjRtCvz9d8lXonF++eUXBAYGYtmyZYiJiYGLiwu8vb2Rnp5e7vGRkZEYN24cIiIiEBUVBQcHB/Tv3x/Pnz9X6HrBwcH48ssvcefOHVXeBiFai5IrQgghpJosjUuSq9e5WpBcicXAy5clX4nGWb9+PaZMmQI/Pz84OzsjNDQUhoaG2LVrV7nHHzhwANOnT4erqytatWqFHTt2QCqVIjw8XKHrTZw4EdeuXYOLiwsMDAxgaWkp9yKEyKMJLQghhJBqstSmMVe3bwNubkB0NNCxI9fRkHcUFRUhOjoaCxculO3j8/nw8vJCVFSUQmXk5+dDLBYrnBiFhISwCZWQOouSK0IIIVpFJBJBJBLJtrOzs9V+TVm3wHwtSK5IjXu/jurp6cmtK1Xq1atXkEgksHlv/TEbGxs8ePBAoWvNnz8fdnZ28PLyUuh4X19fhY4jhJSgboFK2jE3HlKplOswCAFQMpNT5EHVzeIUeTBFNvUuIVyTSqXYMTde6fOCgoJgZmYmezk4OKghOnn13nYLzNCGboGkxjk4OMjV2aCgILVcJzg4GIcOHcIff/wBfX19pc8vLCxEdna23IsQIo9arpQUF/kGoZ8/wPRNzlyHQgjC973AkXVPVVbekXVPoavHh5evvcrKJISt0M8fIC7yjdLnLVy4EIGBgbLt7OxstSdYpS1X91OzsfXcowqPM9LTwWj3hrTQMJGTnJwMU1NT2XZ5rVYAYGVlBYFAgLS0NLn9aWlpsLW1rfQa3333HYKDg3H27Fm0b99e4djy8vIwf/58HD58uNxZAyUSicJlEVIXUHLFwpM7uVyHQAgA4N+YHJWX+ehmDryoFwjRAGx/11bUpUqdbE1LWgGevs5H0MnKu2clvsrDkiH0AR35j6mpqVxyVRGhUAg3NzeEh4fjo48+AgDZ5BQBAQEVnrd27VqsXr0ap06dgru7u1KxzZs3DxEREdiyZQsmTJiATZs24fnz59i6dSuCg4OVKouQuoCSKxac2tIq5UQzNOtoguiwVyots2kHE5WWRwhbTm2N8SY1g+swFNKxkQVmezVHUkZ+hceIxFIcv52CPZefYLR7Q7SyrfphmhMuLkBWFmBkxHUkpByBgYHw9fWFu7s7OnfujJCQEOTl5cHPzw9Ayex+9vb2sq6Fa9aswdKlS3Hw4EE4OTkhNTUVAGBsbAxj46qfZ44ePYp9+/ahd+/e8PPzQ48ePdCsWTM4OjriwIED8PHxUd/NElILUXKlpPa9LTB1QyuuwyAEANB3oh3EIqnKugaOmOuIvhPtVFIWIdU1dUMrbJp2n1XXwJrG5/Mw26tFlcdJf4rGyTupWPLnHRz+zBM8Hq8GolOSQAAo0IpCuDFmzBi8fPkSS5cuRWpqKlxdXREWFiab5CIpKQl8/n9D6rds2YKioiKMGjVKrpxly5Zh+fLlVV4vIyMDTZo0AVDSwpaRUfKBR/fu3TFt2jQV3RUh2oMmtFDS5HUt5X5pEcIlHo+H3uMbqKy83uMbaObDHqmT+Hw+Jq9ryXUYKrVkiDMMhQJcf/IGv8cotohrjXv4EPD2LvlKNFJAQACePn0KkUiEq1evwsPDQ/ZeZGQk9uzZI9t+8uQJGIYp81IksQKAJk2aIDExEQDQqlUrHD58GEBJi5a5ubmqbokQrUEtV0q6mNkcQrGQ6zAIkSnKKwJwRSVlUf0mmkaV9VsT2JkbYGbf5gg++QBBJ+6jX2sbmBnqch2WvJwc4PTpkq8VkEoZLPv7Lu6nKDdbHJ/Hw+D2DTDR05E+yKkl/Pz8cOvWLfTq1QsLFizA0KFDsXHjRojFYqxfv57r8AjROJRcKenea1sICmp2oDQhlZHki6o+SEFUv4mmUWX91hSfdmuM36Of4WF6Lr49/QCrPmrHdUhKu5mcif1X2HVHvvYkA49f5mLZ0Dbg8ynB0nRz5syRfe/l5YUHDx4gOjoazZo1U2rWQULqCkquCCGEkBok1OFj5YdtMW77FRy4mgSnekYw1Ve89Uqow4eXsw2M9bj7E37lccmU3J0bW+LTbo0VPi8hLQffn03A3qineJ1XhHUfu9C09BpKKpXi22+/xd9//42ioiL07dsXy5Ytg6OjIxwdHbkOjxCNRcmVkjLTjME3UH7hPULURVqgui5FVL+JplFl/dYknk3rYXgHe/xx8zlWHb+v9PnjOjsgaAR3rQalydWgtrYY0Lby9ZXeNaCtLRpbGSHwcCyOxaUgM1+M0AlunCaKpHyrV6/G8uXL4eXlBQMDA/zwww9IT0/Hrl27uA6Nld69e8PV1RUhISFqvc7y5cvx559/IjY2VqXlRkZGok+fPnjz5g2NddNw9NtMScI0XQiU+ISREHWTFKpuAUeq30TTqLJ+a5olQ5zB4wFZ+WKFzxEVS3Hx31f4K/YFFg12Vk9S4uAAbNxY8rUcRcVS3HhSMoNjl6b1lC5+qIsdzA118dn+aFz89xV6ro2AiX7F9yHg8TC1d1N87K7ehaCJvH379mHz5s347LPPAABnz57F4MGDsWPHDprYiwNdu3ZFSkoKzMzMuA6FVIGSK0IIIYQDlkZCrP/YValzGIZB3/Xn8PhlHo7HvcCYTo1UH1j9+sCMGRW+fft5JgrEElgaCdHCmt26eD2a18eh/3WB3+7reJ1XhIy8okqPX/H3XfRuWR/WJtSyXlOSkpIwaNAg2baXlxd4PB5evHiBhg0bchhZ3SQUCmFrq3grMeFOnfjoISgoCJ06dYKJiQmsra3x0UcfIT4+nuuwCCGEEKXweDxZC84v15PVc5GMDOCnn0q+luPK45L9Ho0tqzUhRfuG5jg/rw9+n9a10pdLQzPkFUnw/RmaGr4mFRcXQ19fPpnV1dWFWFzS0tq7d298/vnnmD17NiwsLGBjY4Pt27fLFjQ2MTFBs2bNcPLkSQCARCKBv78/GjduDAMDA7Rs2RI//PCDrOzCwkK0adMG//vf/2T7Hj16BBMTE4W7Il66dAm9e/eGoaEhLCws4O3tjTdv/lsnTyqVYt68ebC0tIStra3cdPRPnjwBj8eT686XmZkJHo+HyMhIACVd83g8HsLDw+Hu7g5DQ0N07dq10mfKR48eoUmTJggICADDMJXG//TpUwwdOhQWFhYwMjJCmzZtcOLECblrZ2ZmAij5+fN4vDKvJ0+eyGKfPHky6tevD1NTU3zwwQe4deuWQj9HUj11Irk6d+4cZsyYgStXruDMmTMQi8Xo378/8vLyuA6NEEIIUcqIjvYQ8HmIScrEv+kVT5fO2pMnwIQJJV/LUTreqksT5bsEvs9ITwdujhaVvhYPcQYA/HI9CfGparhfUi6GYTBp0iSMGDFC9iosLMTUqVMxYsQI3L17F6GhobCyssK1a9fw+eefY9q0aRg9ejS6du2KmJgY9O/fHxMmTEB+fj6kUikaNmyIX3/9Fffu3cPSpUvx1VdfydbN0tfXx4EDB7B371789ddfkEgk+OSTT9CvXz98+umnVcYbGxuLvn37wtnZGVFRUbh48SKGDh0KieS/rsV79+6FkZERrl69irVr12LlypU4c+aM0j+bRYsWYd26dbhx4wZ0dHQqjC8uLg7du3fH+PHjsXHjxiqXH5gxYwZEIhHOnz+P27dvY82aNTA2Ni732CNHjiAlJUX2GjFiBFq2bClbTHr06NFIT0/HyZMnER0djY4dO6Jv376yRaCJ+tSJboFhYWFy23v27IG1tTWio6PRs2dPjqIihBBClGdtoo8+La1x9n4aDt94hq8Gta6xa8uNt1JBcqWITk6WGNDGFmF3U7H6xH3s+7RzjVy3rvP19S2z75NPPpF9LxAIYGlpicWLFwMAFi5ciODgYFhZWWHKlCkAgKVLl2LLli2Ii4tDly5dsGLFCtn5jRs3RlRUFA4fPoyPP/4YAODq6opVq1Zh8uTJGDt2LJ4+fYpjx44pFO/atWvh7u6OzZs3y/a1adNG7pj27dtj2bJlAIDmzZtj48aNCA8PR79+/RS6RqnVq1ejV69eAIAFCxZg8ODBKCwslGvpu3z5MoYMGYJFixZh7ty5CpWblJSEkSNHol27kuUZmjRpUuGxlpaWsu+///57/PPPP7h69SoMDAxw8eJFXLt2Denp6dDTK1le5bvvvsOff/6J3377Ta51kKhenUiu3peVlQVAvmIqKvPqJVj2/IAWPyRaieo3IbXDmE4OOHs/DUdinuFL75bQFdRMR5R3x1s1ty7/E3V1WDCwFcIfpOF8wkucS3iJXi3q19i166rdu3dX+n7v3r3lkheBQIB69erJEgMAslaU9PR0AMCmTZuwa9cuJCUloaCgAEVFRXB1dZUrd+7cufjzzz+xceNGnDx5EvXqKZbEx8bGYvTo0ZUe8/66XA0aNJDFpox3y2nQoAGAknts1KhkDGRSUhL69euH1atXY/bs2QqXO3PmTEybNg2nT5+Gl5cXRo4cWeVaYidPnsSCBQtw9OhRtGjRAgBw69Yt5ObmlvnZFRQU4NGjRwrHQ9ipE90C3yWVSjF79mx069YNbdu2rfA4kUiE7OxsuRcAZJw5jsyoCzUVLiFqQfWbkNqtd8v6sDLWw6vcIvzzQPmHQ7ZUNd5KWU5WRpjQxQkA8M3x+5BIKx+7QmqGrq787LI8Hk9uX+kHdVKpFIcOHcIXX3wBf39/nD59GrGxsfDz80NRkfxkJunp6UhISIBAIMDDh4qPszMwMGAVr1QqBQDZDIjvjosqHV9WWTnv3mOp+vXro3Pnzvj5559lf18VMXnyZDx+/BgTJkzA7du34e7ujg0bNlR4/L179zB27FgEBwejf//+sv25ublo0KABYmNj5V7x8fH48ssvFY6HsFPnkqsZM2bgzp07OHToUKXHBQUFwczMTPZyeGdK2sKkJ2qOkhD1ovpNSO2mK+BjpJs9AOCwqie2MDICunQp+fqeqEeqG2+lrJl9m8HMQBfxaTnYfuExHqRmK/XKKlB8ynuiepcuXULXrl0xffp0dOjQAc2aNSu3FeXTTz9Fu3btsHfvXsyfPx/37yu2Dlz79u0RHh7OOr769UtaQ1NSUmT72K5VZWBggGPHjkFfXx/e3t7IyVF8rKCDgwOmTp2KI0eOYO7cudi+fXu5x7169QpDhw7FyJEjMWfOHLn3OnbsiNTUVOjo6KBZs2ZyLysrK1b3RBRXp7oFBgQE4NixYzh//nyV04guXLgQgYGBsu3s7GzZA6h+Iyd1hkmI2lH9JqT2G+3mgK3nHiMiPh1p2YWwMVXRNOUtWwJRUWV2FxVLceNpScsVF8mVuaEQn3/QDKuO30fwyQcIPvlAqfN5PKC1rSk6N7aER2NLtLA1gaCSLtACPg8NLQyom7SKNG/eHPv27cOpU6fQuHFj7N+/H9evX0fjxo1lx2zatAlRUVGIi4uDg4MDjh8/Dh8fH1y5cgVCobDS8hcuXIh27dph+vTpmDp1KoRCISIiIjB69GiFEgoDAwN06dIFwcHBaNy4MdLT02XjydgwMjLC8ePHMXDgQAwcOBBhYWEVTk5Ravbs2Rg4cCBatGiBN2/eICIiAq1blz+mcuTIkTA0NMTy5cuRmpoq21+/fn14eXnB09MTH330EdauXYsWLVrgxYsXOH78OIYPHw53d3fW90WqVieSK4Zh8Pnnn+OPP/5AZGSk3H/kiujp6ckGAb7Lst9gmHv2UEeYhNQYqt+E1H7NrI3h7miBG0/fYEvkIwxsW/EaOMb6OnBuYFqtRCHuWSYKxdKS9a1sam681bsmejrhwsNXuPtC8a5WACBlGGTkFeFeSjbupWRjz+UnCp3XtWk9bJvorp7FmuuYzz77DDdv3sSYMWPA4/Ewbtw4TJ8+XTZV+4MHD/Dll19i586dsg/7Nm/ejPbt22PJkiVYs2ZNpeW3aNECp0+fxldffYXOnTvDwMAAHh4eGDdunMIx7tq1C/7+/nBzc0PLli2xdu1aue52yjI2NsbJkyfh7e2NwYMH48SJEzAqp0W4lEQiwYwZM/Ds2TOYmppiwIAB+P7778s99vz58wAAR0dHuf2JiYlwcnLCiRMnsGjRIvj5+eHly5ewtbVFz549ZePgiPrwmKom3dcC06dPx8GDB/HXX3+hZcuWsv1mZmYK9dEFSj7ZNzMzQ5PF30CgT4sYEs0hKSzE41VfISsrC6ampqzKoPpNNJUq63d1ytBUh28kY95vcQodO6idLdaNdoWBUFD5gTExgJsbEB0NdOwo273xn4f47nQCBrWzxWYft+qEzYn07EJce5KBa4klr2dvCio9vkAsgUTKwNXBHHs/7QwzA91Kj2dDm+smIXVVnfgoZsuWLQBKZrZ51+7duzFp0qSaD4gQQghRgaHt7XD6bioSX1W+bmNSRj5O3E7FszdR2DHRHdYsuhCWTmbBRZdAVbA21ceQ9nYY0t5OoeNvP8vChF1XEZucifHbr2C/vwcsjSrvmkYIIXUiuaoDjXOEEELqIAOhADt8O1V53NXHr/HZT9GIe5aFDzddwg5fd7SxM1P4OlyPt+JCu4Zm+HlKF3yy4yruvsjGuG1X8NNkD9Q3KduluiIMw+DZmwLEp+a8nVgjB6lZhbL3xQWVJ8XkPwMHDsSFC+XPZvvVV1/hq6++quGIlKcN90CqVieSK0IIIaQu82hSD3/N6IZP91zHo5d5GLUlCgPb2YJfzhgs+8cJmAPg+zMJeP6opAthTqFYNt6qJte34lrrBqb45TNP+Oy4gvi0HHRafVal5UtF+SotT5vt2LEDBQXld+Vks24pF7ThHkjVKLlSkmE6IKBeAUSDSIqqPkZRVL+JplFl/a7rHOsZ4cj0bphxIAYX/32FIzHPyz2uTWoa5gA4ez8Nd9+YyL3XvZlVnZs9r5m1MQ5/5gm/3dfxuIrul+XRFfDQtL4xWjcwRStbEzhYGqJ0ibC83ByMClFtvNrK3t6e6xCqTRvugVSNkislGb2QQEdXwnUYhMgUi1VXH6l+E02jyvpNADMDXez264S/Y18gPUdU7jGCosY40O8sPqzfAEOE/3WB0xXwMMxVsfFK2saxnhHOBvZCRr7y2b6ZgS50BeUvK5qdXfHMcYSQ2omSKyUZPc+DjoD+2BPNUSwprPogBVH9JppGlfWblChZgLjytR4B5xqJpTbh83mwMlZ8vJU2CQ4OxsKFCzFr1iyEhIQAAAoLCzF37lwcOnQIIpEI3t7e2Lx5s9xU30lJSZg2bRoiIiJgbGwMX19fBAUFQUen6sdPiUSC5cuX46effkJqairs7OwwadIkLF68WNZ6yjAMli1bhu3btyMzMxPdunXDli1b0Lx5c1k5GRkZ+Pzzz3H06FHw+XyMHDkSP/zwQ7lrTp0/fx7ffvstoqOjkZKSgj/++AMfffQRAEAsFmPx4sU4ceIEHj9+DDMzM3h5eSE4OBh2dnZKXS8uLg4zZszA9evXUb9+fXz++eeYN29elTGUun//PubPn49z586huLgYzs7O+P3339GoUaNq/9t8++23OHLkCB48eAADAwN07doVa9askZttW1X/9pGRkQgMDMTdu3fh4OCAxYsXa8VEc5RcKYn3JAU8PvWbIpqDJ1Vdvymq30TTqLJ+EwUlJgJLlgBffw0osC4k0W7Xr1/H1q1b0b59e7n9c+bMwfHjx/Hrr7/CzMwMAQEBGDFiBC5dugSgJDkaPHgwbG1tcfnyZaSkpGDixInQ1dXFN998U+V116xZgy1btmDv3r1o06YNbty4AT8/P5iZmWHmzJkAgLVr1+LHH3/E3r170bhxYyxZsgTe3t64d+8e9N8uK+Lj44OUlBScOXMGYrEYfn5++N///oeDBw+WuWZeXh5cXFzw6aefYsSIEXLv5efnIyYmBkuWLIGLiwvevHmDWbNmYdiwYbhx44bsuKqul52djf79+8PLywuhoaG4ffs2Pv30U5ibm+N///tfpTEAwKNHj9C9e3f4+/tjxYoVMDU1xd27d2X3W91/m5iYGMyYMQOdOnVCcXExvvrqK/Tv3x/37t2TrdGlin/7xMREDB48GFOnTsWBAwcQHh6OyZMno0GDBvD29q6yfmiyOrHOlSqUrkXR18IXOvTwSTRIsbQI4W/2qmQdIKrfRNOosn7TWkIKqmCdK6J6ml43c3Nz0bFjR2zevBmrVq2Cq6srQkJCkJWVhfr16+PgwYMYNWoUgJJFgFu3bo2oqCh06dIFJ0+exJAhQ/DixQtZi0ZoaCjmz5+Ply9fQiis/G/NkCFDYGNjg507d8r2jRw5EgYGBvjpp5/AMAzs7Owwd+5cfPHFFwCArKws2NjYYM+ePRg7dizu378PZ2dnXL9+He7u7gCAsLAwDBo0CM+ePZNrcXofj8crt9XoXdevX0fnzp3x9OlTNGrUSKHrbdmyBYsWLUJqaqrsZ7BgwQL8+eefePDgQZUxjB07Frq6uti/f3+5Man63+bly5ewtrbGuXPn0LNnT5WVP3/+fBw/fhx37tyRu7fMzEyEhYVV+DOvDcrvBEwIIYQQQuq0GTNmYPDgwfDy8pLbHx0dDbFYLLe/VatWaNSoEaKiogAAUVFRaNeunVxXMW9vb2RnZ+Pu3btVXrtr164IDw9HQkICAODWrVu4ePEiBg4cCKCk5SM1NVUuBjMzM3h4eMjFYG5uLkt0AMDLywt8Ph9Xr15V9sdRRlZWFng8HszNzRW+XlRUFHr27CmXwHh7eyM+Ph5v3ryp9HpSqRTHjx9HixYt4O3tDWtra3h4eODPP/+UHaPqf5usrCwA/81mqKryo6KiytQrb29vWRm1GSVXhNRiUqkUt3IiVFZeUuE9WheOaAyGYZBUeI/rMAipkw4dOoSYmBgEBQWVea+01aU0qShlY2OD1NRU2THvPlyXvl/6XlUWLFiAsWPHolWrVtDV1UWHDh0we/Zs+Pj4yJVR3jXejcHa2lrufR0dHVhaWioUQ2UKCwsxf/58jBs3TtbqqMj1qvNzSU9PR25uLoKDgzFgwACcPn0aw4cPx4gRI3Du3DlZGar6t5FKpZg9eza6deuGtm3bqrT8io7Jzs6ucLr62oLGXCmo9IGzmCkCpBwHQ8hbt3Ii8Ko4CUD1FssuPfdhwXUAQCN9GsxOuJdUeE9WJ1VRv7Ozs1USl9bLzf3vK/3M1Kq0Tmrah1rJycmYNWsWzpw5IzeWpyYdPnwYBw4cwMGDB9GmTRvExsZi9uzZsLOzg6+vLycxlRKLxfj444/BMAy2bNlSY9eVSkseQD/88EPMmTMHAODq6orLly8jNDQUvXr1Uun1ZsyYgTt37uDixYsqLVfbUXKloJycHADAucyfOY6EkPLl5OTAzMyM9bmlHhZclz3QEqIpVFG/HRwcVBmS9lPxgxqpWHXqtzpER0cjPT0dHd8ZcyeRSHD+/Hls3LgRp06dQlFRETIzM+VaMNLS0mBrawsAsLW1xbVr1+TKTUtLk71XlS+//FLWegUA7dq1w9OnTxEUFARfX19ZGWlpaWjQoIHcNVxdXWXXSU9Plyu3uLgYGRkZCsVQntLE6unTp/jnn3/kxsopcj1bW1vZz+HdmEvfq4yVlRV0dHTg7Cz/AWjr1q1lCZCtra1K/m0CAgJw7NgxnD9/Hg0b/je7qKrKr+jnYGpqCgMDg0p/DpqOkisF2dnZITk5GSYmJnVuAUWi2RiGQU5OTqUDc6tC9ZtoKqrfRJupon6rQ9++fXH79m25fX5+fmjVqhXmz58PBwcH6OrqIjw8HCNHjgQAxMfHIykpCZ6engAAT09PrF69Gunp6bKucmfOnIGpqWmZ5KA8+fn54PPlR68IBAJZ603jxo1ha2uL8PBwWTKVnZ2Nq1evYtq0abIYMjMzER0dDTc3NwDAP//8A6lUCg8PD6V/LqWJ1cOHDxEREYF69erJva/I9Tw9PbFo0SKIxWLo6urKfi4tW7aEhYVFpdcXCoXo1KkT4uPj5fYnJCTA0dERAODm5latf5vWrVsjICAAf/zxByIjI9H4vRlDq1t+6b+9p6cnTpw4IVf2mTNnZGXUagwhhBBCCCGV6NWrFzNr1izZ9tSpU5lGjRox//zzD3Pjxg3G09OT8fT0lL1fXFzMtG3blunfvz8TGxvLhIWFMfXr12cWLlyo0PV8fX0Ze3t75tixY0xiYiJz5MgRxsrKipk3b57smODgYMbc3Jz566+/mLi4OObDDz9kGjduzBQUFMiOGTBgANOhQwfm6tWrzMWLF5nmzZsz48aNK/eaOTk5zM2bN5mbN28yAJj169czN2/eZJ4+fcoUFRUxw4YNYxo2bMjExsYyKSkpspdIJFL4epmZmYyNjQ0zYcIE5s6dO8yhQ4cYQ0NDZuvWrVXGwDAMc+TIEUZXV5fZtm0b8/DhQ2bDhg2MQCBgLly4oJJ/m2nTpjFmZmZMZGSk3D3m5+er9N/+8ePHjKGhIfPll18y9+/fZzZt2sQIBAImLCxMofqhySi5IoQQQgghlXo/uSooKGCmT5/OWFhYMIaGhszw4cOZlJQUuXOePHnCDBw4kDEwMGCsrKyYuXPnMmKxWKHrZWdnM7NmzWIaNWrE6OvrM02aNGEWLVokl8hIpVJmyZIljI2NDaOnp8f07duXiY+Plyvn9evXzLhx4xhjY2PG1NSU8fPzY3Jycsq9ZkREBAOgzMvX15dJTEws9z0ATEREhFLXu3XrFtO9e3dGT0+Psbe3Z4KDgxWKodTOnTuZZs2aMfr6+oyLiwvz559/ypVfnX+biu5x9+7dKin//Z+3q6srIxQKmSZNmshdozajda4IIYQQQgghRAVoKnZCCCGEEEIIUQFKrgghhBBCCCFEBSi5IoQQQgghhBAVoOSKEEIIIYQQQlSAkitCCCGEEEIIUQFKrgghhBBCCCFEBSi5IoQQQgghhBAVoOSKEEIIIYSwJhKJsHz5cohEojobA9fXpxg0By0iTAghhBBCWMvOzoaZmRmysrJgampaJ2Pg+voUg+aglitCCCGEEEIIUQFKrgghhBBCCCFEBXS4vHhQUBCOHDmCBw8ewMDAAF27dsWaNWvQsmVL2TGFhYWYO3cuDh06BJFIBG9vb2zevBk2NjayY5KSkjBt2jRERETA2NgYvr6+CAoKgo7Of7cXGRmJwMBA3L17Fw4ODli8eDEmTZqkcKxSqRQvXryAiYkJeDyeSu6fEFVgGAY5OTmws7MDn8/u8xKq30RTUf0m2kxb6nd2drbc17oYA9fX19QY2NTxTZs24dtvv0VqaipcXFywYcMGdO7cucLjMzMzsWjRIhw5cgQZGRlwdHRESEgIBg0aVP0bYoPhkLe3N7N7927mzp07TGxsLDNo0CCmUaNGTG5uruyYqVOnMg4ODkx4eDhz48YNpkuXLkzXrl1l7xcXFzNt27ZlvLy8mJs3bzInTpxgrKysmIULF8qOefz4MWNoaMgEBgYy9+7dYzZs2MAIBAImLCxM4ViTk5MZAPSil8a+kpOTWf9fpPpNL01/Uf2mlza/qH7TS9tfitbxQ4cOMUKhkNm1axdz9+5dZsqUKYy5uTmTlpZW7vEikYhxd3dnBg0axFy8eJFJTExkIiMjmdjYWNb/p6pLoya0ePnyJaytrXHu3Dn07NkTWVlZqF+/Pg4ePIhRo0YBAB48eIDWrVsjKioKXbp0wcmTJzFkyBC8ePFC1poVGhqK+fPn4+XLlxAKhZg/fz6OHz+OO3fuyK41duxYZGZmIiwsTKHYsrKyYG5ujmQApgCS/raEuJmuqn8EhCilo3Oa7PvMzEyYmZmxKofqN9FEqq7fjs28YNekh9yn+3k2OiioD4jqs/xTaFUIW6ssAEBbi1R0MXkEADh7MA2/fPdMdtiYLxrCa7xNuUV0NUgus2//3nwEB+fKthcsMMYEX0OFQmJ7rs5DMWyHvSmzP/WoJYqbyXd0uVzgoFAsylDkZ1bezwrg5udV3XPnzM7C6VMlM6qp5Pd3cnKdnUCAaK7sS5fgMGiQwnXcw8MDnTp1wsaNGwGUtMw6ODjg888/x4IFC8ocHxoaim+//RYPHjyArq5mPLdw2i3wfVlZJX+gLC0tAQDR0dEQi8Xw8vKSHdOqVSs0atRIllxFRUWhXbt2ct0Evb29MW3aNNy9excdOnRAVFSUXBmlx8yePVvh2Er/GJu+fdW/W4zsDnrsbpQQFTEzB7IyS76vTncQqt9EEzVrAfybUPK9Kur303/PQkdoCPsm3WXvCYQ6EOgBfH2WyZUhoGNUCAAQGuvC0EQAABj6vwYQ6vGRcDMXLToYw9vXpsJ7MDEo21Vm2gwj6OnzEB0thpubLj71N1T4Z8D2XKO7EpT3aF58txh5HYRy+wx1BArFoownd/Plt+/ly36epcr7WQHc/Lyqe66np1CWXKnk97epKSVXRPMYGQFQrI4XFRUhOjoaCxculO3j8/nw8vJCVFRUuef8/fff8PT0xIwZM/DXX3+hfv36GD9+PObPnw+BQPW/pxShMcmVVCrF7Nmz0a1bN7Rt2xYAkJqaCqFQCHNzc7ljbWxskJqaKjvm3cSq9P3S9yo7Jjs7GwUFBTAwMCgTj0gkkpujv7TvqKiJAHgsgfGJQmR/YlSNOyak+hrY6SArs1jp8yqq3w8AdAaofhON8PE4E3yzIkfp8yqq3wCQ/eYJ7NG9vNNUisfjYcAkWwyYxP58/8lG8J9cc+canihJEsXNBciaawKzdTnQfSiB4YlC5H2iWGtMdbToaIyrJzP+2+5grPC5XPy8qnvup/6GEBUyci1fiqisfhOiqd6vp3p6etDTk/8Q99WrV5BIJOU+sz948KDcch8/fox//vkHPj4+OHHiBP79919Mnz4dYrEYy5YtU+1NKEhjZgucMWMG7ty5g0OHDnEdCoCSyTbMzMxkLweHki4Qz36rh6xxBjCMKgL/jZTjKEldN2J02Q8GFFFR/e4F4EZHXarfRCP4+hvhiwUmSp9XUf0GAFMLJxVGqD34b6TQiypC7ngDpJ2wQsEQfaSdsELuOAPoXa6Z3wfevjaYsKgRPAZZYsKiRvD2Lb8bpbbg8XgKdyF8V2X1mxBN5eDgIFdvg4KCVFKuVCqFtbU1tm3bBjc3N4wZMwaLFi1CaGioSspnQyOSq4CAABw7dgwRERFo2LChbL+trS2KioqQmZkpd3xaWhpsbW1lx6SlpZV5v/S9yo4xNTUtt9UKABYuXIisrCzZKzn5bT9vfR7S15ojdYM5DK4Usb5nQlSB7cNnRfU7YIEJTP+sR/WbaAQej4fxE5V/+KyofjduORB2jbupOkytoHelCK83muPNWjMwBiXddxgDHt58a4bXG82hVwO/D0pb+2b+0AwDJtnSzI4VqPD5hBBNVL8+ACA5OVmu3r7b9a+UlZUVBAJBuc/spc/072vQoAFatGgh1wWwdevWSE1NRVERN88xnCZXDMMgICAAf/zxB/755x80btxY7n03Nzfo6uoiPDxcti8+Ph5JSUnw9PQEAHh6euL27dtIT0+XHXPmzBmYmprC2dlZdsy7ZZQeU1pGefT09GT9l8vrx5w7xAB5A2hMCuEW24fPiur3+Ikl4wWofpParKL63cDJkx7YK1AwQA8FQ/TLf2+IPgro94HGqOr5hBCNYm8PAGXq7PtdAgFAKBTCzc1N7pldKpUiPDy8wmf2bt264d9//4VU+l/rekJCAho0aAChUFjuOerGaXI1Y8YM/PTTTzh48CBMTEyQmpqK1NRUFBQUAADMzMzg7++PwMBAREREIDo6Gn5+fvD09ESXLl0AAP3794ezszMmTJiAW7du4dSpU1i8eDFmzJgh+4ebOnUqHj9+jHnz5uHBgwfYvHkzDh8+jDlz5lTvBuiPNNFmVL8JqTuq+v9Ovw8IIWzkKjemMDAwENu3b8fevXtx//59TJs2DXl5efDz8wMATJw4Ua7Va9q0acjIyMCsWbOQkJCA48eP45tvvsGMGTNUehvK4HRCiy1btgAAevfuLbd/9+7dsgV+v//+e/D5fIwcOVJuEeFSAoEAx44dw7Rp0+Dp6QkjIyP4+vpi5cqVsmMaN26M48ePY86cOfjhhx/QsGFD7NixA97e3mq/R0IIIYQQQuqkf/9V6vAxY8bg5cuXWLp0KVJTU+Hq6oqwsDDZJBdJSUlyixE7ODjg1KlTmDNnDtq3bw97e3vMmjUL8+fPV+ltKIPT5EqRJbb09fWxadMmbNq0qcJjHB0dceLEiUrL6d27N27evKl0jIRoMoZhcHBfftUHElILqbp+GzzPhY5ALNsucFB+vCKpGMMwOLU3DQkxuWjRsfLp5wkhpCIBAQEICAgo973IyMgy+zw9PXHlyhU1R6U4jZmKnRCivH278vFdsPJTVRNSG1D9rl1O7U3D/tVJACCbUn3ApPIHoRNCiLbSiNkCCSHsxNygGf2I9qL6XbskxMiPrUi4qdxYC0II0QaUXBFSi3V052YmHELU7USuM/Tb0fo9tUmLjvKL/iqzCDAhREvp1L1OcnXvjolGO5HrzHUItYrVxwyGZD/BsfWPuQ6FVBPV/bJ6TnCAWCSh+l1LlC76m3AzFy06GGv9IsCEEAW0bct1BDWOkislnc5rBX0e/diIZuDxeOg+zqHWPXxSIkEUUVvrd11VugjwgElcR0IIIdyhLIEQIkMfHhBCCCFEZe7f5zqCGkdjrgghhBBCCCGqJxJxHUGNo+SKEEIIIYQQQlSAkisl7fvyNqRSKddhEKIWVL8JIWwxDIOwPan4cea/CNuTCoZhuA5JozEMg/17aRF4QrhWXFyMs2fPYuvWrcjJKVlb8cWLF8jNZbecBCVXSroXmYHds25zHQYhakH1mxDCVukiwldPZmD/6iSc2pvGdUgabdfOfAQH01pghHDp6dOnaNeuHT788EPMmDEDL1++BACsWbMGX3zxBasyKbliIflODtchEKI2VL8JIZW5UNCo3P20iLBybtwQcx0CIerXpAnXEVRq1qxZcHd3x5s3b2BgYCDbP3z4cISHh7Mqk5IrFhzamnAdAiFqQ/WbEMIGLSKsHHd3Xa5DIET9TE25jqBSFy5cwOLFiyEUCuX2Ozk54fnz56zKpDmXleTc2xJ+P7TjOgxC1ILqNyGELVpEWDmf+htCVMhQ10Ci3VJTuY6gUlKpFBKJpMz+Z8+ewcSE3YfN1HKlpInftgOfTz82op2ofhNC2CpdRHjmD80wYJIteDyeSsvvYZCk0vK4xuPxMMHXkOswCFEvDU+u+vfvj5CQENk2j8dDbm4uli1bhkGDBrEqk1quCCGEEEIIIXXOunXr4O3tDWdnZxQWFmL8+PF4+PAhrKys8PPPP7Mqk5IrQgghhBBCSJ3TsGFD3Lp1C7/88gtu3bqF3Nxc+Pv7w8fHR26CC2VQckUIIYQQQgipk3R0dODj4wMfHx+VlEeDKwipxRiGwcWfk7kOgxC1oPpNagLDMNi5Iw/TpmZi5448WvyYEFUyN+c6gkoFBQVh165dZfbv2rULa9asYVUmJVeE1GLn9yfj2PrHXIdBiFpQ/SY1YdfOfKxYnoPjxwqxYnkOdu3M5zokQrSHkxPXEVRq69ataNWqVZn9bdq0QWhoKKsyKbkipBZLvJnFdQiEqA3Vb1IT3l/MNzqaFvclRGUKC7mOoFKpqalo0KBBmf3169dHSkoKqzJZJ1eZmZnYsWMHFi5ciIyMDABATEwM6wW3CCHKa9zBjOsQCFEbqt+kJry/mK+bGy3uS4jKPHjAdQSVcnBwwKVLl8rsv3TpEuzs7FiVyWpCi7i4OHh5ecHMzAxPnjzBlClTYGlpiSNHjiApKQn79u1jFQwhRDk9JzhALJJQ1ymilah+k5rwqX/JWlPR0WK4uenKtgkh2m/KlCmYPXs2xGIxPvjgAwBAeHg45s2bh7lz57Iqk1XLVWBgICZNmoSHDx9CX19ftn/QoEE4f/68wuWcP38eQ4cOhZ2dHXg8Hv78888yx9y/fx/Dhg2DmZkZjIyM0KlTJyQl/beQYGFhIWbMmIF69erB2NgYI0eORFpamlwZSUlJGDx4MAwNDWFtbY0vv/wSxcXFyt84IRqGx+Oh+zgHrsMgRC2ofpOawOPx4D/ZCJu3mMN/spHKFz8mhChn06ZNcHJygr6+Pjw8PHDt2rUKj92zZw94PJ7c693cpCpffvkl/P39MX36dDRp0gRNmjTB559/jpkzZ2LhwoWs4meVXF2/fh2fffZZmf329vZIVWIl5ry8PLi4uGDTpk3lvv/o0SN0794drVq1QmRkJOLi4rBkyRK5H9qcOXNw9OhR/Prrrzh37hxevHiBESNGyN6XSCQYPHgwioqKcPnyZezduxd79uzB0qVLlbhjQgghhBBCiDr98ssvCAwMxLJlyxATEwMXFxd4e3sjPT29wnNMTU2RkpIiez19+lTh6/F4PKxZswYvX77ElStXcOvWLWRkZFQrT2DVLVBPTw/Z2dll9ickJKB+/foKlzNw4EAMHDiwwvcXLVqEQYMGYe3atbJ9TZs2lX2flZWFnTt34uDBg7KmvN27d6N169a4cuUKunTpgtOnT+PevXs4e/YsbGxs4Orqiq+//hrz58/H8uXLIRQKFY6XEEIIIYQQoh7r16/HlClT4OfnBwAIDQ3F8ePHsWvXLixYsKDcc3g8Hmxtbat1XWNjY3Tq1KlaZZRi1XI1bNgwrFy5EmJxyYw6PB4PSUlJmD9/PkaOHKmSwKRSKY4fP44WLVrA29sb1tbW8PDwkOs6GB0dDbFYDC8vL9m+Vq1aoVGjRoiKigIAREVFoV27drCxsZEd4+3tjezsbNy9e7fC64tEImRnZ8u9CNE0bNcBovpNagOq3zWP1nwihKiUq6vChxYVFSE6OlruuZ7P58PLy0v2XF+e3NxcODo6wsHBAR9++GGlz/fvy8vLw5IlS9C1a1c0a9ZM1jWw9MUGq5ardevWYdSoUbC2tkZBQQF69eqF1NRUeHp6YvXq1awCeV96ejpyc3MRHByMVatWYc2aNQgLC8OIESMQEREhu6ZQKIT5ewuU2djYyLonpqamyiVWpe+XvleRoKAgrFixQiX3Qoi6sF0HiOo3qQ2ofte80jWfAOD4sZIplP0nG3EZEnmPSCSCSCSSbdOHB6Q2eL+e6unpQU9PT27fq1evIJFIyn1uf1DBrIMtW7bErl270L59e2RlZeG7775D165dcffuXTRs2LDKuCZPnoxz585hwoQJaNCggUrGXLJKrszMzHDmzBlcvHgRcXFxyM3NRceOHeUyzeqSSqUAgA8//BBz5swBALi6uuLy5csIDQ1Fr169VHat8ixcuBCBgYGy7ezsbDg40MBqolkex7BbB4jqN6kNqH7XvPLWfPKfzFEwtQDDMNi1Mx83bojh7l4y06C6J8SgDw9IrfLwIQCU+R28bNkyLF++vNrFe3p6wtPTU7bdtWtXtG7dGlu3bsXXX39d5fknT57E8ePH0a1bt2rHUopVcpWUlAQbGxt0794d3bt3l+1nGAbJyclo1KhRtQOzsrKCjo4OnJ2d5fa3bt0aFy9eBADY2tqiqKgImZmZcq1XaWlpsr6Xtra2ZWYZKZ1NsLL+meVl1IRoGqlEyuo8qt+kNqD6XfPc3XVlLVYArflUFS5a+ujDA1Kr5OUBAJKTk2FqairbXd7vaCsrKwgEgjKzfr/7XF8VXV1ddOjQAf/++69Cx1tYWMDS0lKhYxXFasyVk5MTOnbsiEePHsntT09PR+PGjVUSmFAoRKdOnRAfHy+3PyEhAY6OjgAANzc36OrqIjw8XPZ+fHw8kpKSZFmsp6cnbt++LTfLyJkzZ2BqalomcSOktuELaMpgor2ofte8T/0NsWy5CYYM1cey5Sa05lMVymvpUzc9PT2YmprKvQjRdO/X2fKSK6FQCDc3N7nneqlUivDwcLnWqcpIJBLcvn0bDRo0UOj4r7/+GkuXLkV+fr5iN6IAVi1XQEkLUufOnXH48GH07dtXtl+Zwa+5ublymWViYiJiY2NhaWmJRo0a4csvv8SYMWPQs2dP9OnTB2FhYTh69CgiIyMBlHRP9Pf3R2BgICwtLWFqaorPP/8cnp6e6NKlCwCgf//+cHZ2xoQJE7B27VqkpqZi8eLFmDFjBn2ySWq9xh3McOvUS67DIEQtqH7XvNI1n6groGLc3HRw/Nh/2x07sn6sIoSgZC1dX19fuLu7o3PnzggJCUFeXp5s9sCJEyfC3t4eQUFBAICVK1eiS5cuaNasGTIzM/Htt9/i6dOnmDxZsV9i69atw6NHj2BjYwMnJyfo6sq31sfExCh9D6x+C/B4PGzevBkHDhzA4MGDsXbtWsycOVP2nqJu3LiBPn36yLZLm7l9fX2xZ88eDB8+HKGhoQgKCsLMmTPRsmVL/P7773JdEb///nvw+XyMHDkSIpEI3t7e2Lx5s+x9gUCAY8eOYdq0afD09ISRkRF8fX2xcuVKNrdOiFa7+HMy+k52okU0iWaoI/XwQoHyXel7GCSpIZK6qTrjpt4/jn53ElI9Y8aMwcuXL7F06VKkpqbC1dUVYWFhskkukpKSwOf/1/HuzZs3mDJlClJTU2FhYQE3NzdcvnxZ4d5pH330kcrvgcewmGeVz+cjNTUV1tbWOHnyJMaNG4fRo0dj6dKlcHJygkQiUXmgXMvOzoaZmRmCr/eCvjF9MkU0w7fDr+L5g1wAJeu+se0eUlq/AWD4wuboNbH64yYJqa49c24jNqykS7cq6nfftvOgI/ivx0KBgwlybXVQYA0UWrOccty6EHb1MwEA7S1foLvpQ3blKInL5IpNMqgK6rrnnTvyZOOmAGDZchOFx01Nm5opN0ZtyFB9bN5irvC1c3KkaNM6XSX1uzplEKIu2U+ewKxx4zpVP1mNuXrXwIEDcfnyZURERGDIkCGqiIkQwqHEm+xmaCNE1Rp3MOM6BFIHVGfclLu7fBcimgCEkPeoeLIIdcjMzMSOHTuwcOFCZGRkACjpDvj8+XNW5bFqgunVqxeEQqFs29nZGVevXsWIESNowUFCalCnj2zxPFixGXEURQ+0RFP0nOAAsUjCaq0rQhRVnRkSSyf8iI4Ww81NlyYAIeR9r15xHUGl4uLi4OXlBTMzMzx58gRTpkyBpaUljhw5gqSkJOzbt0/pMlklVxEREWX21atXD+fOnWNTHCGEpV4TG6G4SKqyh88hgU3QcwJN6Us0A4/HQ/dxDpRcEbWqToJEE4AQUoVnz7iOoFKBgYGYNGkS1q5dCxMTE9n+QYMGYfz48azKVDi5ys7OlvWVrGo18LrSp5IQQgjRJlwsiss1SpAIqbuuX7+OrVu3ltlvb2+P1NRUVmUqnFxZWFggJSUF1tbWMDc3L/eXLcMw4PF4WjmhRSmaTY1okvP7k1X6qf6x9Y+hoydAb5rQgpA6iYtFcesqhmGwf6/q1tYhhChPT0+v3EajhIQE1K9fn1WZCidX//zzj2wF4/K6BdYVx9Y/hq6egGZTIxpBHZNPXP8zhZIrQjTchYJGapk9r7zJHahFRz127cxHcHAu12EQUqcNGzYMK1euxOHDhwGUtGQnJSVh/vz5GDlyJKsyFU6uevXqVe73dVHizSz0msh1FISUTD5ROlW1ytCcNITUWdWZ3IEo5/1ElhCt9M44Jk20bt06jBo1CtbW1igoKECvXr2QmpoKT09PrF69mlWZrCa0CAsLg7GxsWwx302bNmH79u1wdnbGpk2bYGFhwSqY2oJmUyOaQh2zqXX6yFZlZRFCahea/a7mvJ/IEqKVmjblOoJKmZmZ4cyZM7h48SLi4uKQm5uLjh07wsvLi3WZrNa5+vLLL2X9E2/fvo3AwEAMGjQIiYmJCAwMZB1MbUCzqRFNUjqbmqoMCWxCXV4JqcNKJ3fYvMUc/pONaHyxGn3qb4gFC4y5DoMQ9aol8zB0794d06dPx7x586qVWAEsW64SExPh7OwMAPj9998xdOhQfPPNN4iJicGgQYOqFZCm6z7Ogf7YEK1F9ZsQQmoGj8fDBF9DGndFtNvt21xHUMaPP/6o8LEzZ85UunxWyZVQKER+fskMN2fPnsXEiSUDkCwtLaucpp0QQgghhBBCuPD999/Lbb98+RL5+fkwNzcHAGRmZsLQ0BDW1tY1l1x1794dgYGB6NatG65du4ZffvkFQMm0hQ0bNmRTJCGEEEIIIYSoVWJiouz7gwcPYvPmzdi5cydatmwJAIiPj8eUKVPw2WefsSqf1ZirjRs3QkdHB7/99hu2bNkCe3t7AMDJkycxYMAAVoEQQgghhBBCSE1ZsmQJNmzYIEusAKBly5b4/vvvsXjxYlZlsmq5atSoEY4dO1Zm//vNbMHBwZg6daqsmY0QQgghhBBCNEFKSgqKi4vL7JdIJEhLS2NVJquWK0V98803yMjIUOclCCGEEEIIIZqoTRuuI6hU37598dlnnyEmJka2Lzo6GtOmTWM9a6BakyuGodVICVEnhmFw8edkrsMgRC2oftc8hmGwc0cepk3NxM4defR3nBBSPbqavRD5rl27YGtrC3d3d+jp6UFPTw+dO3eGjY0NduzYwapMVt0C67J9X97G5E0u4PPVmpcSopDz+5NVuoDwxZ+T0XeyE03HTjTCORXX7/cZJOcg11b7Fr1nGAa7dubjxg0x3N1LFgJW9P/0rp35WLE8BwBkC9z6TzZSW6yEEC33WH2/w1Whfv36OHHiBBISEvDgwQMAQKtWrdCiRQvWZVJypaR7kRnYNTMOkze6ch0KIUi8maXS8o6tfwwdIR+9fR1VWi4hbFz/I4XrEGql6iRIvx4ukN/+tYCSK0IIe7VkiaYWLVpUK6F6FyVXLDy6lsl1CIQAABp3MENsWLpKy7z+ZyolV4TUYjduiOW2o6PF8J+s2LmvX0sr3SaEEG0ikUiwZ88ehIeHIz09HVKp/O+8f/75R+kyqW8bC0IDAdchEAIA6DnBAUMCm3AdBiFq0ekjW65DqJXc3eXHOLi5KT7mwdJSvvtgPcuy3Ql7GCSxC0wL0Rg1Qmq3WbNmYdasWZBIJGjbti1cXFzkXmyovOWqoKAABgYGAIAePXrIvtcmDduacB0CIQBKPnE5t1e1A/4tG+qrtDxC2Oo6rgH+XvcvpOKqj9U2DMPg1N40JMTkokVHY3j72ig8bupTf0MAJS1Wbm66sm1FfDzGUNalEABGf6z4uep2oaCRxiV21emCWVxcjF49XqktNkJI1Q4dOoTDhw9j0KBBKiuTVXI1c+ZM/Pjjj2X25+XlYciQIYiIiAAAnDhxonrRaag3Lwq5DoEQAMDKD6KQ81q1T558AU1mQTTDkq6X62RiBQCn9qZh/+qSROLqyZIlTQZMkm/JqyjZ4PF48J9spHBXwHdVJzGri27cKJLbjr5RpHBy1aXza7yi3IpoOzs7riOolFAoRLNmzVRaJqtugcePH8eyZcvk9uXl5WHAgAHlLsRVkfPnz2Po0KGws7MDj8fDn3/+KXtPLBZj/vz5aNeuHYyMjGBnZ4eJEyfixYsXcmVkZGTAx8cHpqamMDc3h7+/P3Jzc+WOiYuLQ48ePaCvrw8HBwesXbtW+Zt+B82kRjRFzquiqg9SklRC3VqIZhDlSbgOgTMJMfJ/xxJu5lZwpGqVJmabt5jDf7IR/b2rwvuPPGLFH4Hw6hX9riV1gLW10qds2rQJTk5O0NfXh4eHB65du6bQeYcOHQKPx8NHH32k8LXmzp2LH374QaVdelklV6dPn8b27dsREhICAMjJyUG/fv3A4/EQFhamcDl5eXlwcXHBpk2byryXn5+PmJgYLFmyBDExMThy5Aji4+MxbNgwueN8fHxw9+5dnDlzBseOHcP58+fxv//9T/Z+dnY2+vfvD0dHR0RHR+Pbb7/F8uXLsW3bNja3DgDo9FED1ucSokoCXdU/+FDLFdEUekZ1d3xri47G8tsdjCs4knBJ8F4V1VGiP5CVFf2uJXXAmzdKHf7LL78gMDAQy5YtQ0xMDFxcXODt7Y309Mon73ry5Am++OIL9OjRQ6nrXbx4EQcOHEDTpk0xdOhQjBgxQu7FBqtugU2bNkVYWBj69OkDPp+Pn3/+GXp6ejh+/DiMjBSfsnXgwIEYOHBgue+ZmZnhzJkzcvs2btyIzp07IykpCY0aNcL9+/cRFhaG69evw93dHQCwYcMGDBo0CN999x3s7Oxw4MABFBUVYdeuXRAKhWjTpg1iY2Oxfv16uSRMUUMCm6DXBAelzyNEHQbMdMSx756otMwmHc1VWh4hbH19uSsWuF+ok10DvX1tAJS0WLXoYCzbJpqlUychThwXybbd3YUKnxt11RLt275GXp46IiNEQzx9qtTh69evx5QpU+Dn5wcACA0NxfHjx7Fr1y4sWLCg3HMkEgl8fHywYsUKXLhwAZmZmQpfz9zcHMOHD1cqxqqwntCiffv2OHbsGPr16wcPDw8cO3ZM7ZNXZGVlgcfjwdzcHAAQFRUFc3NzWWIFAF5eXuDz+bh69SqGDx+OqKgo9OzZE0Lhf7/wvL29sWbNGrx58wYWFuUvICkSiSAS/fcLM/vtPP3dxzlQNwmiMfjvf2yqoIrq95DAJuhJHx4QDXHp4AtWiVVF9bs8xqnFKLDWvFVJeDweBkyyxYBJXEdCKlOdMWp7dheySqyUqd+EaIr366menh709PTk9hUVFSE6OhoLFy6U7ePz+fDy8kJUVFSFZa9cuRLW1tbw9/fHhQsXlIpr9+7dSh2vCIX/onTo0KHcpEJPTw8vXrxAt27dZPtiYmJUE907CgsLMX/+fIwbNw6mpqYAgNTUVFi/15dTR0cHlpaWSE1NlR3TuHFjuWNsbGxk71WUXAUFBWHFihWqvg1CVCpyN7uZAiuq3/ThAdEkESqu34SoWnUmD9m2NZ/VNal+k9rIwUH+g9tly5Zh+fLlcvtevXoFiUQie04vZWNjgwcPHpRb7sWLF7Fz507Exsayjq24uBiRkZF49OgRxo8fDxMTE7x48QKmpqYwNla+S7bCyZUyg8NUTSwW4+OPPwbDMNiyZUuNXHPhwoUIDAyUbWdnZ5epGIRwrSBLidHT76D6TWoDqt9Em2VmshtAT/Wb1EbJycmyxhEAZVqt2MjJycGECROwfft2WFlZsSrj6dOnGDBgAJKSkiASidCvXz+YmJhgzZo1EIlECA0NVbpMhZOr92cHrCmlidXTp0/xzz//yP3D2NralhngVlxcjIyMDNja2sqOSUtLkzumdLv0mPKU11xJiKYxMNOBOF35GQOpfpPagOo30Wbm5nykpUmVPo/qN6lV3g4ZMjU1lXuGL4+VlRUEAkG5z+3lPbM/evQIT548wdChQ2X7pNKS/1M6OjqIj49H06ZNK73mrFmz4O7ujlu3bqFevXqy/cOHD8eUKVMqv7cKsJotsFRRURGePXuGpKQkuZeqlCZWDx8+xNmzZ+VuGgA8PT2RmZmJ6Oho2b5//vkHUqkUHh4esmPOnz8Psfi/jvtnzpxBy5YtK+wSSEht0efTRlyHQIjaUP0m2uyzqbSGGKkDWrZU+FChUAg3NzeEh4fL9kmlUoSHh8PT07PM8a1atcLt27cRGxsrew0bNgx9+vRBbGysQi26Fy5cwOLFi+XmZgAAJycnPH/+XOHY38VqFG9CQgL8/f1x+fJluf0Mw4DH40EiUWxtktzcXPz777+y7cTERMTGxsLS0hINGjTAqFGjEBMTg2PHjkEikcjGUVlaWkIoFKJ169YYMGAApkyZgtDQUIjFYgQEBGDs2LGwe7to2fjx47FixQr4+/tj/vz5uHPnDn744Qd8//33bG6dEI3Se2IjSIqkOLb+MdehEKJyVL+JNvOfbIQiERAcXDNrmBFSGwQGBsLX1xfu7u7o3LkzQkJCkJeXJ5s9cOLEibC3t0dQUBD09fXRtm1bufNLJ717f39FpFJpuXnLs2fPYGJiwuoeWCVXfn5+0NHRwbFjx9CgQQPWA+Bv3LiBPn36yLZL+xD7+vpi+fLl+PvvvwEArq6ucudFRESgd+/eAIADBw4gICAAffv2BZ/Px8iRI/Hjjz/KjjUzM8Pp06cxY8YMuLm5wcrKCkuXLmU1DTshhBBC1INhGJzam4aEmFy06Fgy/bw2T7DD4/EwwdeQkiui3W7dUurwMWPG4OXLl1i6dClSU1Ph6uqKsLAw2SQXSUlJ4POr1fFOTv/+/RESEiJb/5bH4yE3NxfLli3DoEGDWJXJKrmKjY1FdHQ0WrVqxeqipXr37l3pisiKrJZsaWmJgwcPVnpM+/btlZ6akZDa4Pz+ZPpUn2gtqt91y6m9adi/umRowdWTGQCAAZMqHhtNCKkFFHiWf19AQAACAgLKfS8yMrLSc/fs2aPUtdatWwdvb284OzujsLAQ48ePx8OHD2FlZYWff/5ZqbJKsUqunJ2d8erVK1YXJISoTuLNLK5DIERtqH7XLQkx8i04CTdzaZ0vQohaNWzYELdu3cKhQ4cQFxeH3Nxc+Pv7w8fHh/X6vaySqzVr1mDevHn45ptv0K5dO+jq6sq9X9VsIIQQ1XDqYIbYsPSqD1TQxZ+T0Xeyk1Z3xSG1R2MV129SNYZhsGtnPm7cEMPdvWRR3Jr6fdCio7GsxQoAWnRQfn0ZQghRlo6ODj755BPVlcfmJC8vLwBA37595fYrO6FFbUQPn0QZ/7yqXtfZqjzKLQTwUGXlHVv/GLp6AvSaSLO0Ee71+KQh4qNe415kRtUHE5XYuSMPK1eUtCAdP1YIhmEweUrNJDneviVjKhJu5qJFB2PZNiGEqFN8fDw2bNiA+/fvAwBat26NgIAA1sOfWCVXERERrC6mDejhs3ZRd3LDtYzbKSovM/FmFnpNVHmxRENp8v+RR4djKbGqYb/9Wii//VthjSVXPB4PAybZsuoKyGWLGyGkEkpMxc6F33//HWPHjoW7u7tsuvcrV66gXbt2OHToEEaOHKl0maySq169erE5TWvQw6fiNPnBTRswUuUHilalcQczlZdZm1Ed5k7yyXiuQ6jzakt6smtnPlYszwFQ0uIGlEx1TgjhGMtxSzVl3rx5WLhwIVauXCm3f9myZZg3b17NJVfnz5+v9P2ePXuyKbbW0PSHT3oYrDt4AtVNRwoAQwKboOeEsovuUZ0i3FD9hwekcqM/NpAlKQAwarRmPxiVunFDLLcdHS2G/2SOgiGE/CcpiesIKpWSkoKJE8u2mHzyySf49ttvWZXJKrkqXWPqXe82v2vzmKv3Hz7poZNwydzZGikRj1RWXuQfb8AM6w+BQKCyMglhy2FgK9zdcInrMOqUT/0NAZQkJ25uurJtTefuritrsQIANzfdSo7WDBKJBB8Ne811GISoV4Zmd+3u3bs3Lly4gGbNmsntv3jxInr06MGqTFbJ1Zs3b+S2xWIxbt68iSVLlmD16tWsAqktJP37IuK1kOswCAEAPFNxt6ncxDc4738YffaMU2m5hLDRZLQLpEUS3N96hetQ6gwejwf/yUa1rtWnNiaFA70z8PChlOswCKnThg0bhvnz5yM6OhpdunQBUDLm6tdff8WKFSvw999/yx2rCFbJlZlZ2W5x/fr1g1AoRGBgIKKjo9kUWys8+eM2mvl0pIGyRCPkPstUfZnJqi+TEDZ4PB6chrej5EpJdXFyh9qYFCYmam8vH0Jqi+nTpwMANm/ejM2bN5f7HgClZkNnlVxVxMbGBvHx2j0A+f7WKxDo6aDJaBeuQyEEPAEfDFT8yScNcyEagmEYPPnjNtdh1DpcTe7AMAxO7U1DQkwuWnQsmUpd25O66tDVBUQirqMgpG6TSlXfeswquYqLi5PbZhgGKSkpCA4Ohqurqyri0mgZd1IpuSIagcdXw4MLPQwRDZH4Wxy1WrHA1eQOp/amYf/qksHrpYsBD5hkq/4L11LGxjzk5tKnWUTLWVtzHYHCCgsLoa+vX+1yWE015urqig4dOsDV1VX2/aBBg1BUVIQdO3ZUOyhNZ9mW/lgQzaBjoPpB28YOmj0bJqk7XqthHTdtc6Gg7JqL7u7yvxdqanKHhJhc+e2buRUcqT0YhsHOHXmYNjUTO3fkgWEUT5ZcXGn8NqkD7Oy4jqBSEokEX3/9Nezt7WFsbIzHjx8DAJYsWYKdO3eyKpNVy1ViYqLcNp/PR/369VWS7Wm61p91QeNR7bkOgxAAgHlrG6RdSKz6QAUZN7ZAz50fq6w8QqqDkdBgfza4mtyhRUdjWYsVALToUDOLD3OpOl0wt24zg/+nmQg/W6S2+AjhXE5O1cdwaPXq1di7dy/Wrl2LKVOmyPa3bdsWISEh8Pf3V7pMVsmVo6Mjm9O0gtPwdtSHnGgMvkC1dbHHllE0DTvRGGrp9loHcDW5g7evDYCSFqsWHYxl29qsOl0w+Xw+ftxgjjat09UQGSEa4pHqlotRh3379mHbtm3o27cvpk6dKtvv4uKCBw8esCqT9YQW4eHhCA8PR3p6epnBYLt27WJbLCFECfXa2yEl8jHXYRCiFiqv389SAd47XbEaNVBd2QQ8Hg8DJtliwCSuI6k5tXF9LULIf54/f15mjSugZKILsVhczhlVY5VcrVixAitXroS7uzsaNGhQp1pyaCp2okkaj2oPiaiYBv0TreQ0sh3SbyQj/fJTrkMhWqw6U9fXxvW1CCH/cXZ2xoULF8r0yvvtt9/QoUMHVmWySq5CQ0OxZ88eTJgwgdVFazOaip1oEloHiGizJ7/fpsSKqF11xk3VxvW1CCH/Wbp0KXx9ffH8+XNIpVIcOXIE8fHx2LdvH44dO8aqTFazBRYVFaFr166sLqgNMu6kch0CIYRovQyaLZDUgPLGTRFCVERXs7vKfvjhhzh69CjOnj0LIyMjLF26FPfv38fRo0fRr18/VmWySq4mT56MgwcPsrqgNqCp2AkhRP0s29GYKKJ+XE1dT0id0KYN1xFUqUePHjhz5gzS09ORn5+Pixcvon///qzLU7hbYGBgoOx7qVSKbdu24ezZs2jfvj1038tK169fzzogTUdTsRNCSM2gMYWkJtC4KUKIKimcXN28eVNu29XVFQBw584duf3aPtEDTcVOCCE1g8YUkppA46YIUaO7d7mOoAwLCwuFn+UzMjKqPug9CidXERERSheuChKJBMuXL8dPP/2E1NRU2NnZYdKkSVi8eLHsB8MwDJYtW4bt27cjMzMT3bp1w5YtW9C8eXNZORkZGfj8889x9OhR8Pl8jBw5Ej/88AOMjbV/kUNCCCFEG1woaIQeBklch0EIURTL6czVKSQkRPb969evsWrVKnh7e8PT0xMAEBUVhVOnTmHJkiWsymc15iorK6vcTC4jIwPZ2dmsAqnImjVrsGXLFmzcuBH379/HmjVrsHbtWmzYsEF2zNq1a/Hjjz8iNDQUV69ehZGREby9vVFY+N/aEz4+Prh79y7OnDmDY8eO4fz58/jf//6n0lgJIYQQQggh7G3atAlOTk7Q19eHh4cHrl27VuGxR44cgbu7O8zNzWFkZARXV1fs37+/0vJ9fX1lr0uXLmHlypX4+eefMXPmTMycORM///wzVq5ciXPnzrGKn1VyNXbsWBw6dKjM/sOHD2Ps2LGsAqnI5cuX8eGHH2Lw4MFwcnLCqFGj0L9/f9kPmmEYhISEYPHixfjwww/Rvn177Nu3Dy9evMCff/4JALh//z7CwsKwY8cOeHh4oHv37tiwYQMOHTqEFy9eKBXPkz9ug2EYld4jIZqC6jchhNQMhmGwf28+12EQolF++eUXBAYGYtmyZYiJiYGLiwu8vb2Rnp5e7vGWlpZYtGgRoqKiEBcXBz8/P/j5+eHUqVMKXe/UqVMYMGBAmf0DBgzA2bNnWd0Dq+Tq6tWr6NOnT5n9vXv3xtWrV1kFUpGuXbsiPDwcCQkJAIBbt27h4sWLGDhwIAAgMTERqamp8PLykp1jZmYGDw8PREVFAShp3jM3N4e7u7vsGC8vL/D5fKXjvb/1ChJ/i6vubRGikah+E0JIzdi1Mx/Bwblch0GIRlm/fj2mTJkCPz8/ODs7IzQ0FIaGhti1a1e5x/fu3RvDhw9H69at0bRpU8yaNQvt27fHxYsXFbpevXr18Ndff5XZ/9dff6FevXqs7oHVIsIikQjFxcVl9ovFYhQUFLAKpCILFixAdnY2WrVqBYFAAIlEgtWrV8PHxwcAkJpasuaUjY2N3Hk2Njay91JTU2FtbS33vo6ODiwtLWXHvE8kEkEkEsm23+3umHEnlRYRJrUa1W+izSqr34RoivfX11IU1W9SqzRtCqBsPdXT04Oenp7cvqKiIkRHR2PhwoWyfXw+H15eXrIGk8owDIN//vkH8fHxWLNmjULhrVixApMnT0ZkZCQ8PDwAlDQihYWFYfv27QqV8T5WLVedO3fGtm3byuwPDQ2Fm5sbq0AqcvjwYRw4cAAHDx5ETEwM9u7di++++w579+5V6XXeFxQUBDMzM9nLwcFB9h6tc0VqO6rfRJtVVr8J0RTvr6+lKKrfpFYxMQEAODg4yNXboKCgMoe+evUKEomk0gaT8mRlZcHY2BhCoRCDBw/Ghg0bFF4AeNKkSbh06RJMTU1x5MgRHDlyBKamprh48SImTZqk+H2+g1XL1apVq+Dl5YVbt26hb9++AIDw8HBcv34dp0+fZhVIRb788kssWLBANparXbt2ePr0KYKCguDr6wtb25IHwbS0NDRo8N+Ck2lpabLp4m1tbcv01SwuLkZGRobs/PctXLhQbm2v7OxsODg4wLqrI5xGtlPlLRJS4yqq37SOG9EkDMPgyR+3lT6vovpNqsYwDHbtzMeNG2K4u5es+UTLj6jHp/6GEBUySncNpPpNapW3cxskJyfD1NRUtvv9VqvqMDExQWxsLHJzcxEeHo7AwEA0adIEvXv3Vuh8Dw8PHDhwQGXxsEquunXrhqioKHz77bc4fPgwDAwM0L59e+zcuVNu+nNVyM/PB58v38AmEAgglUoBAI0bN4atrS3Cw8NlyVR2djauXr2KadOmAQA8PT2RmZmJ6OhoWcvaP//8A6lUKmsCfF95zZUAkH75KZ78fpu6TZFaraL6Teu4EU2S+FscqzWuKqrfpGq7duZjxfIcAMDxYyUz7vpPNuIyJK3F4/EwwddQ6eSK6jepVd42bpiamsolV+WxsrKCQCBAWlqa3P60tLQKG0OAkq6DzZo1A1CyDu/9+/cRFBSkcHKlaqy6BQIlwR84cAB3797FjRs3sGvXrjKJVXBwMDIzM6sV4NChQ7F69WocP34cT548wR9//IH169dj+PDhAEp+Oc2ePRurVq3C33//jdu3b2PixImws7PDRx99BABo3bo1BgwYgClTpuDatWu4dOkSAgICMHbsWNjZ2SkdU8adipsmCSGEqEbG7RSuQ6hz3h8HFB2teWvUEEK0k1AohJubG8LDw2X7pFIpwsPDZWtQKUIqlcqNS6xprFquFPXNN9/g448/hrm5OesyNmzYgCVLlmD69OlIT0+HnZ0dPvvsMyxdulR2zLx585CXl4f//e9/yMzMRPfu3REWFgZ9fX3ZMQcOHEBAQAD69u0rW0T4xx9/ZBUTjUkhhBD1s2zXAC8iHnEdRp3i7q4ra7ECADc3duOCCCGEjcDAQPj6+sLd3R2dO3dGSEgI8vLy4OfnBwCYOHEi7O3tZWO2goKC4O7ujqZNm0IkEuHEiRPYv38/tmzZwtk9qDW5UsV6OSYmJggJCZFbTfl9PB4PK1euxMqVKys8xtLSEgcPHqx2PDQmhRBCakbjUe0hERWz6hpI2PnU3xBASYuVm5uubJsQQmrCmDFj8PLlSyxduhSpqalwdXVFWFiYbJKLpKQkueFCeXl5mD59Op49ewYDAwO0atUKP/30E8aMGcPVLag3udJGNCaFEEJqBo/Hg9PwdpRc1SAejwf/yUbwn8x1JIQQrWBpqfQpAQEBCAgIKPe9yMhIue1Vq1Zh1apVbCJTG0qulPTkj9to5tOREiyilah+E0KI4mh2RUKq0KgR1xGUMWLECIWPPXLkiNLlU3KlpPtbr0Cgp0OzBRK1up9qU/VBb0nyC6s+SNHrUv0mhBCF0eyKhFShoIDrCMowMzNTa/mUXLGQcSeVHj6hXAJAag9Nqt9Ux4gqPzwgRNXKm12RulQS8o74eK4jKGP37t1qLV+tyVWPHj1gYGCgzktwoqhRc3roI1qL6jchhCiGZlckhLyPVXK1Z88eTJo0qcz+4uJiLFmyRDY94okTJ6oVnCayHO8F80FduA6DELWg+k0IIYqj2RUJqf1+++03HD58GElJSSgqKpJ7LyYmRunyWC0iPHPmTIwePRpv3ryR7YuPj4eHhwd+/vlnNkXWGubenWmwKtFaVL8JIURxpbMrbt5iDv/JRvT7k5Ba5scff4Sfnx9sbGxw8+ZNdO7cGfXq1cPjx48xcOBAVmWySq5u3ryJZ8+eoV27djhz5gw2bdqEjh07olWrVrh16xarQAghhBBCCCFaRMM/cNi8eTO2bduGDRs2QCgUYt68eThz5gxmzpyJrKwsVmWy6hbYtGlTXLp0CbNnz8aAAQMgEAiwd+9ejBs3jlUQhBBCCCGEEC3johkTZFUkKSkJXbt2BQAYGBggJ6dk9s8JEyagS5cu2Lhxo9Jlsmq5AoDjx4/j0KFD8PT0hLm5OXbu3IkXL16wLY4QQgghhBBCaoytrS0yMjIAAI0aNcKVKyWL1icmJoJhGFZlskquPvvsM4wePRrz58/HhQsXEBcXB6FQiHbt2uHw4cOsAiGEEEIIIYRoEQ2civ1dH3zwAf7++28AgJ+fH+bMmYN+/fphzJgxGD58OKsyWXULvHTpEq5evQqXt019tra2OHHiBDZt2oRPP/0UH3/8MatgCCGEEKLZLhQ04joEQkhtoYGLCL9r27ZtkEqlAIAZM2agXr16uHz5MoYNG4bPPvuMVZmskqvo6Gjo6emV2T9jxgx4eXmxCoQQQgghhBBCagqfzwef/19HvrFjx2Ls2LHVKpNVclVeYlWqZcuWrIMhhBBCCCGEEHWJi4tD27ZtwefzERcXV+mx7du3V7p8VskVoPoFt2qLzFPXYPlRD1rLgmglqt+EkNqIYRjs2pmPGzfEcHcvWcyXfo8RQsrj6uqK1NRUWFtbw9XVFTwer9zJK3g8HiQSidLls0qufvzxRyxatAiTJk3CX3/9BT8/Pzx69AjXr1/HjBkz2BRZa2QcPAu+UBcWgz25DoUQMAyDzFPXVFZeSf3WgcXgriork5C6iGEYnNqbhoSYXLToaAxvXxt62FejXTvzsWJ5yRTKx48VAgD8JxtxGRIhBAAcHbmOoIzExETUr19f9r2qsZotUB0LbtUmhfHJXIdACAAg88QVZBw8q9IysyNjVVoeIXXRqb1p2L86CVdPZmD/6iSc2pvGdUha7cYNsdx2dLS4giMJITXKwoLrCMpwdHSUfdj19OlT2Nvbw9HRUe5lb2+Pp0+fsiqfVcuVOhbcqk30WzpwHQLhSPFzzfoktCCW1pYjRBMlxOTKb9/MxYBJ3MRSF7i768parADAzU2Xw2gIITLp6VxHUKk+ffogJSUF1tbWcvuzsrLQp0+fmusWWLrglqOjo2zBLRcXl2otuFVbWI73gvmgLgA070Gb1D16jZ2QF3tLpWUauXamuk00hrRQwHUIrLToaIyrJzP+2+5gzGE02u9Tf0MAJS1Wbm66sm1CCMdeaPaHwAzDlNtl+/Xr1zAyYvcsxCq5Kl1wq0OHDrIFt3777TfcuHEDI0aMYBVIbWHSvg8kL/S5DoMQAIBpzx5gxGK8OXZCJeVZDBkEs149VVIWIbWOdWHVxyjI29cGQEmLVYsOxrJtoh48Hg/+k43gP5nrSAghtUFpvsLj8TBp0iS5mdAlEgni4uJkvfSUxSq5en/BLSsrK1y6dAnDhg3D1KlTWQVCCFEej8eDafduKkuuTLt3o0H3hKgAj8fDgEm21BVQDS4UNEIPgySuwyCE1GJmZmYASlquTExMYGBgIHtPKBSiS5cumDJlCquyWSVXfD4fRUVFiImJQXp6OgwMDGSLB4eFhWHo0KGsgiGEEEIIIcobueUyXJo0QBs7U7SxM0PrBiYw0aexZ4SUZ/fu3bKhTBs2bICxseq6brNKrsLCwjBhwgS8fv26zHts54RXRHBwMBYuXIhZs2YhJCQEAFBYWIi5c+fi0KFDEIlE8Pb2xubNm2Fj818XjKSkJEybNg0REREwNjaGr68vgoKCoKPDepkvQgghhBCNEZ+ag4dvJPgt+r99jSwN4dzAFM52pmjdwBStbE3Q0MKAeiiQmmNqynUEFWIYBgcOHMBXX32F5s2bq6xcVlOxf/755/j444+RkpICqVQq91JXYnX9+nVs3bq1zErJc+bMwdGjR/Hrr7/i3LlzePHihdy4L4lEgsGDB6OoqAiXL1/G3r17sWfPHixdulQtcRJCCCGE1LSQsa6Y2bc5vFpbo4FZydjwpIx8hN1NxfozCZiy7wZ6rI1Au+WnMXLLZXz1x23svfwEUY9eIyOviOPoidZq0kTpUzZt2gQnJyfo6+vDw8MD165VvJ7n9u3b0aNHD1hYWMDCwgJeXl6VHv8uPp+P5s2bl9tYVB2smm7S0tIQGBgo1zqkTrm5ufDx8cH27duxatUq2f6srCzs3LkTBw8exAcffACgpJmvdevWuHLlCrp06YLTp0/j3r17OHv2LGxsbODq6oqvv/4a8+fPx/LlyyEUCmvkHgghhBCiOS4UNKrymNo0tsurtQ1M32kleJNXhPsp2biXko27L7JxPyUbj17mIldUjOinbxD99I3c+VbGemhmbYRm1sZoVt8YzaxN0MzaGDametTSRdgTK7fm3C+//ILAwECEhobCw8MDISEh8Pb2Rnx8fJnp0gEgMjIS48aNQ9euXaGvr481a9agf//+uHv3Luzt7au8XnBwML788kts2bIFbdu2VSrWirBKrkaNGoXIyEg0bdpUJUFUZcaMGRg8eDC8vLzkkqvo6GiIxWLZeC8AaNWqFRo1aoSoqCh06dIFUVFRaNeunVwi6O3tjWnTpuHu3bvo0KFDjdwDIYQQQjSDIolVbWdhJETXZlbo2sxKtk8skSLxVR7up2QjIS0H8am5SEjLQVJGPl7livAqV4QrjzPkyjESCtCkvjGa1DdCY6uSl2M9IzjVM4S5IX1ATapw965Sh69fvx5TpkyBn58fACA0NBTHjx/Hrl27sGDBgjLHHzhwQG57x44d+P333xEeHo6JEydWeb2JEyciPz8fLi4uEAqFchNbAEBGRkYFZ1aMVXK1ceNGjB49GhcuXEC7du2gqys/YHLmzJlsii3XoUOHEBMTg+vXr5d5LzU1FUKhEObm5nL7bWxskJqaKjvm/Ra20u3SY8ojEokgEolk29nZ2WxvgRCNQ/WbaDOq30RVNHFmwurUb10BHy1sTNDCxkRuf56oGP+m55a8XubKvk/KyEdekQS3n2fh9vOsMuWZGeiikaUhGlkawsHSEA6WBmhoYQh7c33YmRvAUEhj20mJ9+upnp6e3PTnAFBUVITo6GgsXLhQto/P58PLywtRUVEKXSc/Px9isRiWlpYKHV86h4Mqsar1P//8M06fPg19fX1ERkbKNRfzeDyVJVfJycmYNWsWzpw5A339ml1bKigoCCtWrKjRaxJSU6h+E21G9Vt96kKLj6ZTR/020tOBi4M5XBzM5fYXFUuRlJGHRy/z8OhlLp6+yseT13l4+jofqdmFqGlFbAAAIk5JREFUyCoQV5h4AYCFoS7szA3QwEwftmb6aGBmAFtTfVib6sHaRB/WJnowN9Slbod1gIODg9z2smXLsHz5crl9r169gkQiKbdR5MGDBwpdZ/78+bCzs5Pr1VYZX19fhY5TBqvkatGiRVixYgUWLFgAPp/VnBgKiY6ORnp6Ojp27CjbJ5FIcP78eWzcuBGnTp1CUVERMjMz5Vqv0tLSYGtrCwCwtbUtM7AtLS1N9l5FFi5ciMDAQNl2dnZ2mYpB1M/wOf3CrYpEpPzPiOq35qO6X4LqN1G12p4g1mT9Furw3469MinzXkGRBE8z8pCcUYCkjHwkZ+QjKSMfLzIL8PxNAXJExXiTL8abfDHuvqi4dU0o4KOesbDkZaQHK2M9WBrpwtxQCAtDISwMS743M9CFmaEuzAx0YSQUUEJWyyQnJ8uNCXy/1UoVgoODcejQIURGRrJqlCksLERRkfzkLqYsZjtklVwVFRVhzJgxak2sAKBv3764ffu23D4/Pz+0atUK8+fPh4ODA3R1dREeHo6RI0cCAOLj45GUlARPT08AgKenJ1avXo309HTZQLgzZ87A1NQUzs7OFV67vOZKADB4wYNAj/5Dk9qN6jfRZhXVb0K0gabUbwOhAK1sTdHKtvyHz6wCMZ6/KUBqdgFSsgqRklmIlKxCpGUXIj2nEOk5ImTmi1EkkZa8n1Wo8LUFfB6M9XRgrKcDE/2Sl6GwZNtQKIDR268GugIYCAXQ1y198aGv89/3Qp23L4H897qyF4+SOBUxNTWtMlGxsrKCQCCQNYKUerfRpCLfffcdgoODcfbs2TIzi1cmLy8P8+fPx+HDh8udNZDNLOiskitfX1/88ssv+Oqrr9icrjATE5MyM3cYGRmhXr16sv3+/v4IDAyEpaUlTE1N8fnnn8PT0xNdunQBAPTv3x/Ozs6YMGEC1q5di9TUVCxevBgzZszQiF9OhBBCCCHaxsygpJXJ2a7iB2pRsQSvcovwOleE17lFePl2Uo3MfDHe5BXhTX4R3uSLkZlfhKyCYmQVFEEsYSCRMsgqECOrQLmZ6NjQFfCgw+dDR8CDroAPHf7brwKe7HsBnwedt+/p8HnQEfAg4JdsC97u++/r2/0CHnRLtwXlH6f7zn6dt9fRfVu27ttjS2MR8HkQCviyOEqTw3cTRR1BafJYcnyNJI7t2il8qFAohJubG8LDw/HRRx8BAKRSKcLDwxEQEFDheWvXrsXq1atx6tQpuLu7KxXevHnzEBERgS1btmDChAnYtGkTnj9/jq1btyI4OFipskqxSq4kEgnWrl2LU6dOoX379mUmtFi/fj2rYNj4/vvvwefzMXLkSLlFhEsJBAIcO3YM06ZNg6enJ4yMjODr64uVK1fWWIyEqAvDMMiIvcR1GISoBdXv2oVhGJzam4aEmFy06GgMb18bjfvUn02XQE2c1EJb6OkIYG9uAHtzg6oPRkkdKxBLkF1QjFyRGDmFxcgVFSOnsBh5omLkF0mQKyr5vkAsQaFYgoIiCQrEEhSIpSgUSyAqlkL09r2iYimKJFKIxFKIJFIUFUvLXFMsYSCWSAD153E1iscrmeBE1mon4ENXhydrudPT+a81T09HAKGADz3d//br6QigV/r17X59XUGZr+KCXKXiCgwMhK+vL9zd3dG5c2eEhIQgLy9PNnvgxIkTYW9vj6CgIADAmjVrsHTpUhw8eBBOTk6yyeqMjY1hbGxc5fWOHj2Kffv2oXfv3vDz80OPHj3QrFkzODo64sCBA/Dx8VHyJ8syubp9+7ZsCvM7d+7IvafuX6SRkZFy2/r6+ti0aRM2bdpU4TmOjo44ceKEWuMihAsZMReQfv4412EQohZUv2uXU3vTsH91SRJy9WTJ9MUDJlXelYcQZfB4PBgKdd7OQqj6ic4YpqRVrEgihbi45GuxtOR7sVQKsUSKYgmDYikDiVQqa0UTS6Rvv5ZsF0ulb78yKJYwkDAMJBJpybb07TGSt2VIGUjf3S999xqM7JoShkFxaRmSkuPEEvnjiyXy+8Rvt8Vvz5O/15IJS4qKpYCogh+ICjRM/Vep48eMGYOXL19i6dKlSE1NhaurK8LCwmSTXCQlJckNS9qyZQuKioowatQouXLKmzCjPBkZGWjydqFjU1NT2dTr3bt3x7Rp05SKvRSr5CoiIoLVxbRBRuwlWHX+QOM+jSN1U/7zRK5DIERt8p/V3vpdG1pxVC0hRv4T6oSbuRgwiZtYCGGDxyvp0qcj4ANatoQXwzCyREv8tpWu6J3kq3S7NOF6d19py55ILPlv++1xhcWSt9sSFL79+u62TSYDZfsfBAQEVNgN8P1GlidPnrD6eZRq0qQJEhMT0ahRI7Rq1QqHDx9G586dcfTo0TJLPSmKFiBQUvr54+Dr6KKeW0+uQyEEhvaNkR1/i+swCFELhinbRae2qIutOC06GsvuFQBadKi6S05d8363xPwCCYB0boIhdQqPx4NQhwehjnono3tf9nlg/9YavaRS/Pz8cOvWLfTq1QsLFizA0KFDsXHjRojFYtbDnCi5YiH/+RNKrohGsOzYA9JiMXWdItqpFrf0aGMrTlXjlbx9S7rtJNzMRYsOxrJtbUDjrgjRTnPmzJF97+XlhQcPHiA6OhrNmjVTatbBd1FyxYKhvRPXIRACoOSTKEvXbpRcEa1k1LAJchLiuA6DlbrYisPj8TBgkm2tTyLVpbavr0WINpFKpfj222/x999/o6ioCH379sWyZcvg6OgIR0fHapVNyZWSrHsOhmXHHlyHUasYP6+9XXtqg2Jx3f35Ut3SbkY23aDrUoTkW7VvQiJNb8WhB/2aRT9vUmc1bMh1BOVavXo1li9fDi8vLxgYGOCHH35Aeno6du3aVe2yKblSkoONJ3ReMACYKo8lpLYxSpFCR5cSFqIZeDwerFt0rZXJFbXiaJ7qJjjUNZAQFqysuI6gXPv27cPmzZvx2WefAQDOnj2LwYMHY8eOHXKzEbJRs6PaCCGEEMIpakWpWfTzJnVaRkbVx3AgKSkJgwYNkm17eXmBx+PhxYsX1S6bkitCCCGkFqOHd0KIxkrSzNbe4uJi6OvLr5Wmq6sLsbj6q0VTt0BCCCGEEAVQIkuIdmAYBpMmTYKenp5sX2FhIaZOnQojIyPZviNHjihdNiVXhBBCCCGEkDrD19e3zL5PPvlEJWVTckUIIYTUEdTyQgghwO7du9VWNiVXhBBCCEcuZjdndV5304cqjoQQQtTgnS52dQUlV6QMk8QCrkMgSiguLlRZWSZPCqCjQ8sMEM2hyvqtadgmVoQQUms0r3u/5yi5UhI9fBJCCCGEEELKQ1OxE0IIITWMWq0IIXVCbCzXEdQ4Sq4IIYQQQgghRAUouSKEEELqgLo8U2BdvndCSM2i5IoQQgipQdQlkBBCtBdNaEEIIYSQWoVaogghmoqSK6LVdB495zoE9ZMWcR0BISpR7v9Xqt+EEFJ7tWrFdQQ1jpIrJekkpkCHL+Q6DELUguo30WoNbQGBntyuXFv6M0gIIWqjr891BDWOxlwRQgghNYTGWxFC6pQnT7iOoMZRckUIIYQQQghRvcxMriOocZRcEUIIIYQQQogKUGdzJUVl/IGuFqMgEAi4DoUQiEQiRGTtVll5Z1/txgfmfhAKadwV4V5xcTHOZfyssvKSXt1AY+uu4PF4Ch3PMAyyw6IgSngKvRaOMB3gqfC51cUwDG7/HI/UWy9h61If7ca1VCruXTvzceOGGO7uuvjU31Cpc0/tTUNCTC5adDSGt69Njd4z22vX1nPPHkxT6FhCSO1ByZWCGIYBAORJ3+Dym9/gaTmc44gIgVxiVVpH2fjvXAb/ZO6Gl5VfNSMjpPrOZfwMMQoAqKZ+P0z9BwDQyMpd9p6kqBASESAtZID8Qrnzss9cxZtfTgMA8q7eAVMkhmk/jzLlF+eJAABFQjH+yXWqIhqxQjHf+TUBVzfGAgAenU1CsUiCtqNbyN7P50nkjs8plsq+3783H8HBuQCA48cKISpkMMHXEPkF8ueU5+zBNPzy3TMAwNWTGSgSSeE13kahmKurOteu7eeqon5nZ2ezLoMQdcnOywNQvTpe2/CYunS31fDs2TM4ODhwHQYhFUpOTkbDhg1ZnUv1m2g6qt9Em1H9JtquOnW8tqHkSkFSqRQvXryAiYlJjXWRIEQRDMMgJycHdnZ24PPZDaOk+k00Fdf1Ozs7Gw4ODkhOToapqSmr67PF1bXpnmvuulzXb0LUTRV1vLah5IoQQgipQHZ2NszMzJCVlcVJosHFtemea/aeCSHapW6kkIQQQgghhBCiZpRcEUIIIYQQQogKUHJFCCGEVEBPTw/Lli2Dnp5enbk23TMhhLBHY65IlRiGwWeffYbffvsNb968wc2bN+Hq6qrWa/J4PPzxxx/46KOP1HodQrji5OSE2bNnY/bs2RpZHiGEEEKURy1XpEphYWHYs2cPjh07hpSUFGRnZ2Po0KGws7MDj8fDn3/+We559+/fx7Bhw2BmZgYjIyN06tQJSUlJNRs8IYQQQgghNYSSK1KlR48eoUGDBujatStsbW2Rl5cHFxcXbNq0qdJzunfvjlatWiEyMhJxcXFYsmQJ9PX1azByQgghhBBCag4lV6RSkyZNwueff46kpCTweDw4OTlh4MCBWLVqFYYPH17heYsWLcKgQYOwdu1adOjQAU2bNsWwYcNgbW3NKo5ly5ahQYMGiIuLw1dffQUPD48yx7i4uGDlypWsyie1n5OTE0JCQuT2ubq6Yvny5WAYBsuXL0ejRo2gp6cHOzs7zJw5U3acSCTCF198AXt7exgZGcHDwwORkZEKXXfPnj0wNzfHsWPH0LJlSxgaGmLUqFHIz8/H3r174eTkBAsLC8ycORMSiaTCcpKSkvDhhx/C2NgYpqam+Pjjj5GWliZ3zNGjR9GpUyfo6+vDysqq0v+DO3bsgLm5OcLDwxW6D0IIIYRUHyVXpFI//PADVq5ciYYNGyIlJQXXr1+v8hypVIrjx4+jRYsW8Pb2hrW1NTw8PCrsPlgZhmHw+eefY9++fbhw4QLat28PHx8fXLt2DY8ePZIdd/fuXcTFxWH8+PFKX4Nov99//x3ff/89tm7diocPH+LPP/9Eu3btZO8HBAQgKioKhw4dQlxcHEaPHo0BAwbg4cOHCpWfn5+PH3/8EYcOHUJYWBgiIyMxfPhwnDhxAidOnMD+/fuxdetW/Pbbb+WeL5VK8eGHHyIjIwPnzp3DmTNn8PjxY4wZM0Z2zPHjxzF8+HAMGjQIN2/eRHh4ODp37lxueWvXrsWCBQtw+vRp9O3bV4mfFCGEEEKqQ4frAIhmMzMzg4mJCQQCAWxtbRU6Jz09Hbm5uQgODsaqVauwZs0ahIWFYcSIEYiIiECvXr0UKqe4uBiffPIJbt68iYsXL8Le3h4A0KZNG7i4uODgwYNYsmQJAODAgQPw8PBAs2bN2N0o0WpJSUmwtbWFl5cXdHV10ahRI1likpSUhN27dyMpKQl2dnYAgC+++AJhYWHYvXs3vvnmmyrLF4vF2LJlC5o2bQoAGDVqFPbv34+0tDQYGxvD2dkZffr0QUREhFzCVCo8PBy3b99GYmIiHBwcAAD79u1DmzZtcP36dXTq1AmrV6/G2LFjsWLFCtl5Li4uZcqaP38+9u/fj3PnzqFNmzbK/7AIIYQQwhq1XBGVk0qlAIAPP/wQc+bMgaurKxYsWIAhQ4YgNDRU4XLmzJmDq1ev4vz587LEqpSPjw8OHjwIoKR16+eff4aPj4/qboJoldGjR6OgoABNmjTBlClT8Mcff6C4uBgAcPv2bUgkErRo0QLGxsay17lz5+RaRytjaGgoS6wAwMbGBk5OTjA2Npbbl56eXu759+/fh4ODgyyxAgBnZ2eYm5vj/v37AIDY2NgqW6HWrVuH7du34+LFi5RYqUhNT6ibkpKCe/fu1eg1S+Xn56OoqIiTaxNCiLag5IqonJWVFXR0dODs7Cy3v3Xr1krNFtivXz88f/4cp06dKvPeuHHjEB8fj5iYGFy+fBnJycnltgiQuoPP55d5EBaLxQAABwcHxMfHY/PmzTAwMMD06dPRs2dPiMVi5ObmQiAQIDo6GrGxsbLX/fv38cMPPyh0bV1dXbltHo9X7r7SDx7YMDAwqPKYHj16QCKR4PDhw6yvQ4C8vDzk5OQgOzsbPB6vxq77/PlztGvXDosXL8aNGzdq7LoAcOfOHXz88ce4cuUKRCJRjV332bNnOHz4MI4cOYLbt2/X2HUVQSvVEELYoG6BROWEQiE6deqE+Ph4uf0JCQlwdHRUuJxhw4Zh6NChGD9+PAQCAcaOHSt7r2HDhujVqxcOHDiAgoIC9OvXj/VkGUQ71K9fHykpKbLt7OxsJCYmyrYNDAwwdOhQDB06FDNmzECrVq1w+/ZtdOjQARKJBOnp6ejRowcXoaN169ZITk5GcnKyrPXq3r17yMzMlH1I0b59e4SHh8PPz6/Ccjp37oyAgAAMGDAAOjo6+OKLL2okfm1y7949zJkzBy9fvkRaWhrWrl0LHx8fMAyj9kTr4cOHyMrKQlZWFjZs2IBZs2ahY8eOAKDW69+9exc9evTAmDFj0Lhx4xpbSPf27dsYOnQo6tevj+TkZHTu3Bnff/+9XCtwTUhISMDOnTuRnp4OV1dXDBo0CM2bNwePx6uRf3dCiHah5IooLTc3F//++69sOzExEbGxsbC0tESjRo0AAF9++SXGjBmDnj17ok+fPggLC8PRo0cVnoGt1PDhw7F//35MmDABOjo6GDVqlOw9Hx8fLFu2DEVFRfj+++9Vcm+k9vrggw+wZ88eDB06FObm5li6dCkEAgGAkhn9JBIJPDw8YGhoiJ9++gkGBgZwdHREvXr14OPjg4kTJ2LdunXo0KEDXr58ifDwcLRv3x6DBw9We+xeXl5o164dfHx8EBISguLiYkyfPh29evWCu7s7gJIZM/v27YumTZti7NixKC4uxokTJzB//ny5srp27YoTJ05g4P/bu/ugqMo2DODXWeQjlkVSkA8lFkYsAxa2IMUxdaQgzI/UEjCJYKQZ1BhA0HESYRrNJMqwpoLQlYRGM8WU0EJELRsTUDNZ84MZYRwTJyBTEoHd5/3Dlx03s6TOQsD1m9k/zlnPcz9n14G9OM/eJzISQ4YM4U2Fe0Cv12PSpEl46aWXEBwcjNraWsTHx8PPz8/iN04HbgfoadOm4dlnn0V+fj7eeecdrFixAn5+fhb7kN/W1oa0tDTExMTggw8+AAD89NNPaG9vN/uZLreGhgZERkYiNjYWK1euxOHDh5GQkIDm5uZeDVd6vR4TJkxAaGgolEolsrKyUFZWhqioKCxcuJABi4h6ThD9jfXr1wsvLy/TdlVVlQBw1yMuLs7suI0bN4rRo0cLOzs7ERgYKHbt2nXfNQGI0tJS0/a2bduEnZ2d2LFjh2lfa2ursLW1Ffb29uL69ev/9PRogLh27ZqIiooSjo6OwtPTU2zevFkEBgaKrKwsUVpaKsaNGyccHR2FUqkU48ePF/v37zcd29HRIVatWiXUarWwtrYW7u7uYvbs2eLUqVN/W1en04mhQ4ea7cvKyhKBgYFm++Li4sSsWbNM215eXmL9+vWm7YaGBjFz5kyhVCqFSqUSL7zwgrhy5YrZGDt27BBBQUHCxsZGODs7izlz5txzvEOHDgmlUik2bNjwt+dAQjQ3N4vw8HCRnJxstn/KlCni1VdfFUIIYTQaLVa/q6tLXL16VYwZM0ZcunRJ7Ny5U4SEhIjExEQxYcIEMXfuXIvUbW9vFxMnThTHjx8XXV1dIiIiQoSEhAiVSiXGjx8vCgsLLVI3Pz9fTJkyxew1nTZtmsjPzxdFRUXiwIEDFql7p1u3bokFCxaIxMRE077z58+LqKgoMX78eJGXl2fxORDRwCMJwUXFREQ0uDU1NWHmzJnIzc3Fk08+CaPRCIVCgYSEBHR0dKC4uNii9cX/r44sWLAAsbGxiIiIQHl5OeLi4nDr1i1s2LABL7/8sux1m5qaEBAQgJKSEuzduxd6vR45OTm4fPkyDhw4gC1btuC9994zWzUgh/z8fOTk5ODzzz+HVqvFmjVrkJmZibCwMFy7dg0NDQ1Yt26dRc75TuHh4fD29kZ+fr7pPWhsbERWVhbq6+uRkZGBGTNmWHQORDSwsKEFERENeq6uriguLjZ97677hs8jR46EQmH+q/LGjRuy1+9edmZlZWVaPr1z504YDAZ4enrim2++wbFjx2SvO2LECISFhWH37t04f/48UlNTodFo8MwzzyA5ORlPPfUUKisrYTAYZG3wEB4eDjc3N8ybNw/PP/88MjMzUVpaiq+//hplZWWIjo5GUVERmpubLdJYwmAwoLOzE6NGjUJLS4upiYfRaMRDDz2EzMxMdHV1oaSkRPbaRDSwMVxRryspKTFreX3ng+2j6b8mMjLynv9f7+ceWNR/+Pr6Arj9Abu726MQwqyF/tq1a1FQUGBq5S+X7gAxdepU2NraYtGiRSgvL0dtbS1Wr16NQ4cOQafTob29Xda6kiRh6dKl0Ol0+PLLL81asY8aNQqurq7Q6/VQKBSyfu/I29sbxcXFWLNmDfz9/TF37lzMmjULkiRhxIgR8PDwQGtrK5RKpax1u0OzlZUVrK2tERcXh9LSUuTn50OSJCgUChgMBvj4+GDt2rXYvn076urqZKtPRAMfG1pQr5s5cybGjRv3p8/9sX01UV8rLCzEzZs3//S5YcOG9fJsqDd0t/Xv/lDffeVq1apVWL16NU6cOIEhQ+T99dldy9vbG/Hx8XB1dUVZWRm8vb3h7e0NSZIQGBgIOzs7WesCQHBwMPbu3YvJkyejoKAAPj4+pj90dXZ2YsyYMejq6pL953P3uRUWFqKmpgYdHR2wsbEBcHu5olqtNoUhOZw7dw579uzB/Pnz4e7uDgCYPHky1q1bh9TUVNjb22PhwoWmRjgqlQoPP/wwlEqlbHMgooGP4Yp6nUqlgkql6utpEN2XP97AmgaH7nA1ZMgQeHp6Ijc3Fzk5OaipqUFgYKDF6oaGhqKwsBDBwcHQaDSmeTz33HMWqwncvkfawYMHERMTg4SEBAQEBKCjowO7d+/Gt99+a9E/fE2YMAHp6enIy8uDm5sbTp8+DZ1Oh8OHD8sWbC5cuIDQ0FC0traiubkZaWlpcHZ2BgAkJSWhra0Nr7zyChoaGjBnzhx4eXlh+/bt6OzsZLgioh5hQwsiIqJ76G604OjoiP3795ta41tSdzONvnD27FkUFxfj6NGj8PX1xaJFi+Dv72/xulVVVUhMTIRCocDIkSORl5cHjUYjy9htbW1ITk6G0WhESEgIlixZgvT0dGRkZMDFxQXA7de8uLgYy5cvh5WVFVQqFX777Tfs2bPHdK8xIqL7wXBFRER0DzU1NXjiiSdw+vRp0w2dBwOj0QgAvRryWlpa0NnZCVtbWzg5Ock27s2bN6HT6TB8+HBERUXhs88+Q3R09F0BCwAuXryIxsZG/P777wgICOCVayLqMYYrIiKiv9DW1salYf3cH9/Dbdu2ISYmBkuXLsXy5cvh7OyMrq4uXL582WI3TiaiwYHfuSIiIvoLDFb9X/d7aDAYoFAoEBUVBSEE5s+fD0mSkJKSgtzcXDQ0NOCTTz6Bvb29rF0KiWjw4JUrIiIiGjSEEBBCQKFQYNu2bYiNjYWPjw/q6+tRXV2NoKCgvp4iEfVjDFdEREQ0qHR/9JEkCWFhYTh58iQOHjyIgICAPp4ZEfV3XBZIREREg4okSTAYDMjIyEBVVRVOnjzJYEVEsuibXq9EREREfczPzw/Hjx+Xre07ERGXBRIREdGg1H2TZiIiufDKFREREQ1KDFZEJDeGKyIiIiIiIhkwXBEREREREcmA4YqIiIiIiEgGDFdEREREREQyYLgiIiIiIiKSAcMVERERERGRDBiuiIiIBrHs7GwEBQX19TRMLl68CEmScPLkyb6eChFRjzFcERERDRKSJGHXrl19PQ0iogGL4YqIiIh6pKOjo6+nQET0n8RwRURE1I9MmTIFycnJWLZsGYYNGwY3NzdkZ2f/7XFqtRoAMHv2bEiSZNrutmXLFqjVagwdOhTR0dG4fv26Wc0lS5YgJSUFzs7OiIiIAACcPn0akZGRcHBwgKurK2JjY/HLL7+Yjtu3bx8mTpwIJycnDB8+HNOnT0d9fb1Z3WPHjkGr1cLOzg7BwcE4ceKE2fOtra148cUX4eLiggceeAC+vr7Q6XQ9eMWIiHoPwxUREVE/U1RUBKVSie+//x45OTl4/fXXUVFR8ZfHVFdXAwB0Oh1+/vln0zYA1NfXY9euXSgrK0NZWRkOHTqEN998866aNjY2OHLkCD766CP8+uuvmDp1KrRaLWpqarBv3z40NTVh3rx5pmPa2tqQlpaGmpoaVFZWQqFQYPbs2TAajQCAGzduYPr06Xj00UdRW1uL7OxspKenm9XNzMyEXq/H3r17cebMGXz44Ydwdnb+V68fEZGlDOnrCRAREVHPaDQaZGVlAQB8fX3x/vvvo7KyEk8//fQ9j3FxcQEAODk5wc3Nzew5o9GIzZs3Q6VSAQBiY2NRWVmJNWvWmP6Nr68vcnJyTNurV6+GVqvFG2+8Ydq3adMmeHp64ty5cxgzZgzmzp1rVmfTpk1wcXGBXq+Hv78/Pv30UxiNRmzcuBF2dnbw8/PDpUuXkJSUZDqmsbERWq0WwcHBAHDXFTciov8SXrkiIiLqZzQajdm2u7s7rl69+o/HU6vVpmB1r/Eef/xxs+0ffvgBVVVVcHBwMD0eeeQRADAt/Tt//jxiYmLg4+MDR0dHUzBqbGwEAJw5cwYajQZ2dnamcUNDQ83qJCUlYevWrQgKCsKyZcvw3Xff/ePzJCKyNF65IiIi6mesra3NtiVJMi21s9R4SqXSbPvGjRuYMWMG1q1bd9d47u7uAIAZM2bAy8sLH3/8MTw8PGA0GuHv79+jhhiRkZFoaGhAeXk5KioqEBYWhsWLFyM3N/e+xyAi6i28ckVERDRIWFtbw2AwyDLWY489hrq6OqjVaowePdrsoVQq0dzcjLNnz2LlypUICwvD2LFj0draajbG2LFjcerUKbS3t5v2HT169K5aLi4uiIuLQ3FxMd59910UFBTIcg5ERHJjuCIiIhok1Go1KisrceXKlbuCTk8tXrwYLS0tiImJQXV1Nerr6/HVV18hPj4eBoMBDz74IIYPH46CggJcuHABBw4cQFpamtkY8+fPhyRJSExMhF6vR3l5+V1XpFatWoUvvvgCFy5cQF1dHcrKyjB27Nh/NXciIkthuCIiIhok3n77bVRUVMDT0xNarfZfjeXh4YEjR47AYDAgPDwcAQEBSElJgZOTExQKBRQKBbZu3Yra2lr4+/sjNTUVb731ltkYDg4O2LNnD3788UdotVq89tprdy0ztLGxwYoVK6DRaDBp0iRYWVlh69at/2ruRESWIgkhRF9PgoiIiIiIqL/jlSsiIiIiIiIZMFwRERENACUlJWZt0e98+Pn59fX0iIgGBS4LJCIiGgCuX7+OpqamP33O2toaXl5evTwjIqLBh+GKiIiIiIhIBlwWSEREREREJAOGKyIiIiIiIhkwXBEREREREcmA4YqIiIiIiEgGDFdEREREREQyYLgiIiIiIiKSAcMVERERERGRDBiuiIiIiIiIZPA/gcFmmEnCPq4AAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import matplotlib.pyplot as plt\n", + "from skopt.plots import plot_objective\n", + "\n", + "plot_objective(res)\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + " fun: 0.10675194263458251\n", + " x: [True, True, 6, 2048]\n", + " func_vals: [ 1.373e-01 1.390e-01 ... 1.127e-01 1.138e-01]\n", + " x_iters: [[True, True, 5, 1300], [False, True, 5, 990], [True, True, 7, 1800], [False, False, 10, 1692], [False, True, 6, 1075], [True, False, 3, 291], [False, True, 3, 514], [False, False, 11, 1569], [False, False, 7, 1915], [False, True, 10, 1514], [False, False, 11, 1527], [False, False, 12, 2033], [False, True, 9, 3], [False, True, 1, 2004], [True, True, 12, 1], [False, False, 6, 2048], [False, False, 4, 2048], [False, False, 10, 1], [False, True, 11, 2048], [False, True, 9, 2048], [False, False, 8, 2017], [False, False, 6, 1], [False, True, 4, 1], [False, False, 6, 1587], [False, False, 9, 1056], [True, True, 12, 1450], [False, True, 6, 2048], [False, False, 6, 2048], [False, False, 6, 2048], [False, True, 6, 2048], [False, True, 6, 2048], [False, True, 5, 2048], [False, True, 6, 1464], [False, True, 8, 1], [True, True, 12, 1798], [True, False, 3, 2048], [True, True, 11, 683], [False, True, 11, 1], [True, True, 2, 1], [False, True, 11, 1238], [True, True, 11, 1260], [True, False, 6, 1295], [True, True, 6, 1292], [False, False, 12, 1250], [False, False, 12, 1200], [True, False, 4, 1250], [False, False, 12, 1191], [False, False, 12, 1180], [True, False, 10, 906], [False, False, 12, 1192], [True, True, 10, 2044], [False, False, 6, 1310], [False, False, 8, 1122], [True, False, 5, 4], [False, False, 7, 322], [False, False, 12, 1246], [False, False, 12, 1247], [False, False, 12, 1252], [True, True, 12, 811], [True, False, 6, 2048], [True, True, 12, 998], [False, True, 12, 1021], [False, True, 12, 1021], [False, True, 12, 1019], [True, False, 6, 759], [True, False, 6, 1064], [False, True, 12, 991], [True, True, 9, 533], [False, False, 11, 956], [False, False, 1, 3], [True, True, 6, 2048], [True, True, 6, 2048], [True, True, 6, 2048], [True, True, 6, 2048], [False, False, 7, 986], [True, True, 6, 2048], [True, True, 6, 2048], [True, True, 6, 2048], [True, True, 6, 2048], [True, True, 6, 2048], [True, True, 6, 2048], [True, True, 6, 2048], [True, True, 6, 2048], [True, True, 6, 2048], [True, True, 6, 2048], [True, True, 6, 2048], [True, True, 6, 2048], [True, True, 6, 2048], [True, True, 6, 2048], [True, True, 6, 2048], [True, True, 6, 2048], [True, True, 6, 2048], [True, True, 6, 2048], [True, True, 6, 2048], [True, True, 6, 2048], [True, True, 6, 2048], [True, True, 6, 2048], [True, True, 6, 2048], [True, True, 6, 2048], [True, True, 6, 2048]]\n", + " models: [GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097), GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5) + WhiteKernel(noise_level=1),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097)]\n", + " space: Space([Categorical(categories=(True, False), prior=None),\n", + " Categorical(categories=(True, False), prior=None),\n", + " Integer(low=1, high=12, prior='uniform', transform='normalize'),\n", + " Integer(low=1, high=2048, prior='uniform', transform='normalize')])\n", + " random_state: RandomState(MT19937)\n", + " specs: args: func: \n", + " dimensions: Space([Categorical(categories=(True, False), prior=None),\n", + " Categorical(categories=(True, False), prior=None),\n", + " Integer(low=1, high=12, prior='uniform', transform='normalize'),\n", + " Integer(low=1, high=2048, prior='uniform', transform='normalize')])\n", + " base_estimator: GaussianProcessRegressor(kernel=1**2 * Matern(length_scale=[1, 1, 1, 1], nu=2.5),\n", + " n_restarts_optimizer=2, noise='gaussian',\n", + " normalize_y=True, random_state=1248744097)\n", + " n_calls: 100\n", + " n_random_starts: None\n", + " n_initial_points: 10\n", + " initial_point_generator: random\n", + " acq_func: gp_hedge\n", + " acq_optimizer: auto\n", + " x0: None\n", + " y0: None\n", + " random_state: RandomState(MT19937)\n", + " verbose: False\n", + " callback: None\n", + " n_points: 10000\n", + " n_restarts_optimizer: 5\n", + " xi: 0.01\n", + " kappa: 1.96\n", + " n_jobs: 1\n", + " model_queue_size: None\n", + " function: base_minimize" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "res" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +}