{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"system-1","owner":"HyperMink","isFork":false,"description":"System-1 Releases","allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-06T23:04:22.015Z"}},{"type":"Public","name":"inferenceable","owner":"HyperMink","isFork":false,"description":"Scalable AI Inference Server for CPU and GPU with Node.js | Utilizes llama.cpp and parts of llamafile C/C++ core under the hood.","allTopics":["nodejs","ai","inference","vision","llama","llms","llava","llama3","phi3"],"primaryLanguage":{"name":"JavaScript","color":"#f1e05a"},"pullRequestCount":0,"issueCount":0,"starsCount":14,"forksCount":0,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-30T06:04:33.263Z"}},{"type":"Public","name":"llama.cpp","owner":"HyperMink","isFork":true,"description":"LLM inference in C/C++","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":8692,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-30T00:59:56.608Z"}}],"repositoryCount":3,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}