Merge pull request #136 from jmorganca/brucemacd/remove-models

Delete models.json
This commit is contained in:
Bruce MacDonald 2023-07-20 16:40:46 +02:00 committed by GitHub
commit a3d7bb0a30
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -1,38 +0,0 @@
[
{
"name": "orca",
"display_name": "Orca Mini",
"parameters": "3B",
"url": "https://huggingface.co/TheBloke/orca_mini_3B-GGML/resolve/main/orca-mini-3b.ggmlv3.q4_1.bin",
"short_description": "Follow instructions. Great small model that runs fast even without GPU support.",
"description": "An OpenLLaMa-3B model trained on explain tuned datasets, created using Instructions and Input from WizardLM, Alpaca & Dolly-V2 datasets and applying Orca Research Paper dataset construction approaches.",
"published_by": "TheBloke",
"original_author": "psmathur",
"original_url": "https://huggingface.co/psmathur/orca_mini_3b",
"license": "CC-BY-SA-4.0"
},
{
"name": "nous-hermes",
"display_name": "Nous Hermes",
"parameters": "13B",
"url": "https://huggingface.co/TheBloke/Nous-Hermes-13B-GGML/resolve/main/nous-hermes-13b.ggmlv3.q2_K.bin",
"short_description": "Currently one of the best 13B general model.",
"description": "It is suitable for a wide range of language tasks, from generating creative text to understanding and following complex instructions. This model was fine-tuned by Nous Research, with Teknium and Karan4D leading the fine tuning process and dataset curation, Redmond AI sponsoring the compute, and several other contributors. The result is an enhanced Llama 13b model that rivals GPT-3.5-turbo in performance across a variety of tasks. \n \n This model stands out for its long responses, low hallucination rate, and absence of OpenAI censorship mechanisms. The fine-tuning process was performed with a 2000 sequence length on an 8x a100 80GB DGX machine for over 50 hours.",
"published_by": "TheBloke",
"original_author": "NousResearch",
"original_url": "https://huggingface.co/NousResearch/Nous-Hermes-13b",
"license": "GPL"
},
{
"name": "vicuna",
"display_name": "Vicuna",
"parameters": "7B",
"url": "https://huggingface.co/TheBloke/vicuna-7B-v1.3-GGML/resolve/main/vicuna-7b-v1.3.ggmlv3.q4_0.bin",
"short_description": "Vicuna is a chat assistant trained by fine-tuning LLaMA on user-shared conversations collected from ShareGPT.",
"description": "The primary use of Vicuna is research on large language models and chatbots. The primary intended users of the model are researchers and hobbyists in natural language processing, machine learning, and artificial intelligence.",
"published_by": "TheBloke",
"original_author": "LMSYS",
"original_url": "https://huggingface.co/lmsys/vicuna-7b-v1.3",
"license:": "Non-commercial"
}
]