models.json 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960
  1. {
  2. "available": [
  3. {
  4. "name": "Llava",
  5. "version": "1.5",
  6. "url": "https://huggingface.co/jartine/llava-v1.5-7B-GGUF/resolve/main/llava-v1.5-7b-q4-main.llamafile",
  7. "server-url": "https://huggingface.co/jartine/llava-v1.5-7B-GGUF/resolve/main/llava-v1.5-7b-q4-server.llamafile",
  8. "parameters": [
  9. {
  10. "switch": "p",
  11. "explanation": "the prompt to be passed into the model"
  12. },
  13. {
  14. "switch": "image",
  15. "explanation": "Image to be provided to the model"
  16. },
  17. {
  18. "switch": "temp",
  19. "explanation": "Model temp setting default (7)"
  20. }
  21. ]
  22. },
  23. {
  24. "name": "Minstral7b",
  25. "version": "0.1",
  26. "url": "https://huggingface.co/jartine/mistral-7b.llamafile/resolve/main/mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile",
  27. "server-url": "https://huggingface.co/jartine/mistral-7b.llamafile/resolve/main/mistral-7b-instruct-v0.1-Q4_K_M-server.llamafile",
  28. "parameters": [
  29. {
  30. "switch": "p",
  31. "explanation": "Prompt to be passed into the model"
  32. },
  33. {
  34. "switch": "summarize",
  35. "explanation": "URL to be summarized"
  36. },
  37. {
  38. "switch": "temp",
  39. "explanation": "Model temp setting default (7)"
  40. }
  41. ]
  42. },
  43. {
  44. "name": "WizardCoder-Python-13B",
  45. "version": "1",
  46. "url": "https://huggingface.co/jartine/wizardcoder-13b-python/resolve/main/wizardcoder-python-13b-main.llamafile",
  47. "server-url": "https://huggingface.co/jartine/wizardcoder-13b-python/resolve/main/wizardcoder-python-13b-server.llamafile",
  48. "parameters": [
  49. {
  50. "switch": "p",
  51. "explanation": "Prompt to be passed into the model"
  52. },
  53. {
  54. "switch": "temp",
  55. "explanation": "Model temp setting default (7)"
  56. }
  57. ]
  58. }
  59. ]
  60. }