{"payload":{"pageCount":35,"repositories":[{"type":"Public","name":"faiss","owner":"facebookresearch","isFork":false,"description":"A library for efficient similarity search and clustering of dense vectors.","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":23,"issueCount":346,"starsCount":29228,"forksCount":3468,"license":"MIT License","participation":[4,0,1,0,9,1,7,1,7,3,2,0,9,5,1,2,0,5,3,3,0,9,6,7,4,3,2,8,16,28,9,2,14,3,2,1,1,13,8,4,7,3,9,11,3,15,21,6,2,7,5,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-29T06:46:02.025Z"}},{"type":"Public","name":"DCPerf","owner":"facebookresearch","isFork":false,"description":"Repository of DCPerf benchmarks for external collaborations. ","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":5,"starsCount":1,"forksCount":0,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-29T03:38:53.893Z"}},{"type":"Public","name":"fbpcs","owner":"facebookresearch","isFork":false,"description":" FBPCS (Facebook Private Computation Solutions) leverages secure multi-party computation (MPC) to output aggregated data without making unencrypted, readable data available to the other party or any third parties. Facebook provides impression & opportunity data, and the advertiser provides conversion / outcome data.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":95,"issueCount":0,"starsCount":140,"forksCount":157,"license":"MIT License","participation":[1,2,0,0,0,3,1,7,2,2,3,5,3,3,1,1,2,1,6,0,0,0,1,1,0,0,0,2,4,11,5,5,0,0,0,1,1,1,1,0,2,1,0,0,6,4,1,0,1,0,1,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-28T23:04:02.040Z"}},{"type":"Public","name":"co-tracker","owner":"facebookresearch","isFork":false,"description":"CoTracker is a model for tracking any point (pixel) on a video.","allTopics":["optical-flow","point-tracking","track-anything"],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":4,"issueCount":35,"starsCount":2510,"forksCount":172,"license":"Other","participation":[0,0,10,7,1,0,0,0,1,1,1,0,0,0,0,0,0,3,0,0,0,0,0,0,1,6,3,2,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-28T20:15:15.252Z"}},{"type":"Public","name":"fairseq2","owner":"facebookresearch","isFork":false,"description":"FAIR Sequence Modeling Toolkit 2","allTopics":["python","machine-learning","deep-learning","pytorch","artificial-intelligence"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":9,"issueCount":45,"starsCount":613,"forksCount":59,"license":"MIT License","participation":[7,8,9,16,8,2,3,8,5,10,15,12,10,9,10,9,2,11,12,10,8,25,2,17,19,5,7,14,0,9,13,1,15,8,11,6,2,30,6,28,7,10,17,15,5,14,22,6,31,12,5,11],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-28T19:09:52.008Z"}},{"type":"Public","name":"fastMRI","owner":"facebookresearch","isFork":false,"description":"A large-scale dataset of both raw MRI measurements and clinical MRI images.","allTopics":["mri","convolutional-neural-networks","mri-reconstruction","fastmri","fastmri-challenge","fastmri-dataset","deep-learning","pytorch","medical-imaging"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":15,"starsCount":1270,"forksCount":370,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-28T18:31:13.158Z"}},{"type":"Public","name":"hot3d","owner":"facebookresearch","isFork":false,"description":"HOT3D: A dataset for egocentric 3D hand and object tracking","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":0,"starsCount":37,"forksCount":6,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,6,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-28T17:27:17.097Z"}},{"type":"Public","name":"generative-recommenders","owner":"facebookresearch","isFork":false,"description":"Repository hosting code used to reproduce results in \"Actions Speak Louder than Words: Trillion-Parameter Sequential Transducers for Generative Recommendations\" (https://arxiv.org/abs/2402.17152, ICML'24).","allTopics":["recommendations","recsys","recommender-systems","actions-speak-louder-than-words","generative-recommenders","hstu"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":9,"issueCount":7,"starsCount":468,"forksCount":78,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,4,2,0,2,1,1,2,0,3,4],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-28T17:08:33.169Z"}},{"type":"Public","name":"habitat-sim","owner":"facebookresearch","isFork":false,"description":"A flexible, high-performance 3D simulator for Embodied AI research.","allTopics":["simulator","ai","computer-vision","cplusplus","robotics","sim2real"],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":43,"issueCount":145,"starsCount":2439,"forksCount":402,"license":"MIT License","participation":[4,5,2,4,1,5,6,2,1,1,2,3,6,5,6,1,3,0,1,3,1,2,2,6,2,0,1,3,0,2,3,3,2,0,3,2,2,2,0,3,1,1,5,5,5,3,3,2,1,2,1,3],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-28T17:01:48.251Z"}},{"type":"Public","name":"spot-sim2real","owner":"facebookresearch","isFork":false,"description":"Spot Sim2Real Infrastructure ","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":21,"issueCount":30,"starsCount":50,"forksCount":3,"license":"MIT License","participation":[0,0,0,1,1,1,1,0,2,1,0,4,0,0,3,1,0,7,1,0,0,0,2,0,0,0,2,2,1,1,2,0,5,0,0,3,1,3,4,4,1,0,0,1,0,1,0,0,1,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-28T12:03:08.376Z"}},{"type":"Public","name":"BenchMARL","owner":"facebookresearch","isFork":false,"description":"A collection of MARL benchmarks based on TorchRL","allTopics":["benchmark","machine-learning","reinforcement-learning","robotics","torch","pytorch","multi-agent","rl","multi-agent-reinforcement-learning","marl"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":4,"starsCount":186,"forksCount":20,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,4,5,3,2,2,0,9,7,0,2,1,0,2,9,1,5,0,0,1,6,2,0,0,0,1,2,3,7,2,0,0,0,0,0,11,3,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-28T09:34:35.227Z"}},{"type":"Public","name":"aepsych","owner":"facebookresearch","isFork":false,"description":"AEPsych is a tool for adaptive experimentation in psychophysics and perception research, built on top of gpytorch and botorch. ","allTopics":[],"primaryLanguage":{"name":"HTML","color":"#e34c26"},"pullRequestCount":3,"issueCount":11,"starsCount":48,"forksCount":35,"license":"Other","participation":[2,2,0,2,3,1,2,1,1,2,0,0,0,1,0,1,0,0,1,0,0,0,1,1,1,0,0,0,1,1,5,3,0,1,0,0,1,2,1,0,0,0,0,2,2,1,1,0,5,0,0,4],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-28T09:23:57.163Z"}},{"type":"Public","name":"InterWild","owner":"facebookresearch","isFork":false,"description":"Official PyTorch implementation of \"Bringing Inputs to Shared Domains for 3D Interacting Hands Recovery in the Wild\", CVPR 2023","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":17,"starsCount":158,"forksCount":15,"license":"Other","participation":[0,1,0,1,0,0,0,0,1,0,0,1,0,0,0,7,5,11,0,1,0,1,0,0,0,2,0,0,0,0,0,1,0,0,0,0,4,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-28T00:05:19.506Z"}},{"type":"Public","name":"projectaria_tools","owner":"facebookresearch","isFork":false,"description":"projectaria_tools is an C++/Python open-source toolkit to interact with Project Aria data","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":8,"starsCount":381,"forksCount":47,"license":"Apache License 2.0","participation":[0,5,11,3,6,14,11,8,7,10,12,13,33,1,1,12,23,20,10,13,0,5,7,12,7,3,5,4,5,10,18,10,8,2,14,8,26,7,1,4,5,5,7,8,8,13,4,9,8,20,4,5],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-27T22:28:43.404Z"}},{"type":"Public","name":"Mephisto","owner":"facebookresearch","isFork":false,"description":"A suite of tools for managing crowdsourcing tasks from the inception through to data packaging for research use. ","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":4,"issueCount":44,"starsCount":298,"forksCount":75,"license":"MIT License","participation":[4,10,9,10,13,5,12,17,2,2,9,7,10,12,2,7,7,1,6,5,0,6,2,3,2,2,0,3,3,6,4,2,3,5,7,12,26,9,2,3,1,1,1,2,31,25,4,0,4,2,3,26],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-27T19:52:03.474Z"}},{"type":"Public","name":"jepa","owner":"facebookresearch","isFork":false,"description":"PyTorch code and models for V-JEPA self-supervised learning from video.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":10,"issueCount":34,"starsCount":2525,"forksCount":242,"license":"Other","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,27,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-27T19:42:14.579Z"}},{"type":"Public","name":"fbcddisgraph","owner":"facebookresearch","isFork":false,"description":"The codes reproduce the figures and statistics in the paper, \"A graphical method of cumulative differences between two subpopulations,\" by Mark Tygert. The repo also provides the LaTeX and BibTex sources required for replicating the paper.","allTopics":[],"primaryLanguage":{"name":"TeX","color":"#3D6117"},"pullRequestCount":0,"issueCount":0,"starsCount":3,"forksCount":1,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-27T19:25:36.631Z"}},{"type":"Public","name":"nevergrad","owner":"facebookresearch","isFork":false,"description":"A Python toolbox for performing gradient-free optimization","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":45,"issueCount":88,"starsCount":3882,"forksCount":349,"license":"MIT License","participation":[4,1,2,2,0,0,0,0,1,1,0,7,1,4,2,0,0,0,1,0,3,1,1,0,0,0,0,0,7,0,0,0,0,4,0,0,0,0,2,0,1,0,0,0,0,0,0,0,1,0,2,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-27T17:27:11.824Z"}},{"type":"Public","name":"HolisticTraceAnalysis","owner":"facebookresearch","isFork":false,"description":"A library to analyze PyTorch traces.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":4,"issueCount":14,"starsCount":249,"forksCount":32,"license":"MIT License","participation":[0,0,1,1,1,0,0,0,1,1,0,0,0,2,3,1,1,3,2,3,0,0,0,1,1,0,0,1,3,0,1,1,1,0,1,4,1,2,2,1,1,0,3,0,5,8,3,0,0,0,3,3],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-27T17:23:51.397Z"}},{"type":"Public","name":"ucc","owner":"facebookresearch","isFork":true,"description":"Unified Communication Collectives Library","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":4,"issueCount":0,"starsCount":4,"forksCount":85,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-27T16:27:00.450Z"}},{"type":"Public","name":"habitat-lab","owner":"facebookresearch","isFork":false,"description":"A modular high-level library to train embodied AI agents across a variety of tasks and environments.","allTopics":["python","simulator","research","reinforcement-learning","ai","computer-vision","deep-learning","robotics","deep-reinforcement-learning","sim2real"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":109,"issueCount":190,"starsCount":1807,"forksCount":459,"license":"MIT License","participation":[1,1,6,4,2,0,1,7,2,2,2,0,3,2,7,13,2,3,1,1,2,1,2,0,0,0,1,3,5,5,8,6,4,8,2,5,15,10,6,7,14,0,4,12,5,17,10,1,1,1,4,6],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-27T16:10:22.025Z"}},{"type":"Public","name":"ClassyVision","owner":"facebookresearch","isFork":false,"description":"An end-to-end PyTorch framework for image and video classification","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":40,"issueCount":13,"starsCount":1587,"forksCount":280,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-27T16:05:09.265Z"}},{"type":"Public","name":"goliath","owner":"facebookresearch","isFork":false,"description":"Goliath Dataset Release","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":1,"starsCount":53,"forksCount":3,"license":"Other","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,4,0,10,0,1,1,1,0,0,0,0,8,7,0,12],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-27T15:34:12.212Z"}},{"type":"Public","name":"pytorch3d","owner":"facebookresearch","isFork":false,"description":"PyTorch3D is FAIR's library of reusable components for deep learning with 3D data","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":20,"issueCount":254,"starsCount":8467,"forksCount":1274,"license":"Other","participation":[6,5,1,0,0,0,1,0,1,1,0,2,0,0,0,2,0,5,0,5,0,1,6,1,3,0,2,0,2,2,2,2,2,2,0,3,1,0,2,3,0,1,0,1,1,1,0,1,0,0,7,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-27T14:39:57.491Z"}},{"type":"Public","name":"xformers","owner":"facebookresearch","isFork":false,"description":"Hackable and optimized Transformers building blocks, supporting a composable construction.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":12,"issueCount":252,"starsCount":7958,"forksCount":563,"license":"Other","participation":[4,4,8,2,3,4,9,5,3,12,1,17,6,4,20,3,12,8,4,1,3,13,13,3,0,0,1,0,2,9,10,3,1,6,5,5,7,2,7,10,2,1,1,5,10,2,7,1,7,11,3,4],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-27T12:57:30.284Z"}},{"type":"Public","name":"param","owner":"facebookresearch","isFork":false,"description":"PArametrized Recommendation and Ai Model benchmark is a repository for development of numerous uBenchmarks as well as end to end nets for evaluation of training and inference platforms.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":8,"issueCount":7,"starsCount":110,"forksCount":56,"license":"MIT License","participation":[5,3,5,8,2,1,6,2,5,1,3,10,1,0,7,0,2,1,3,1,0,6,3,1,0,1,0,1,2,2,0,0,0,0,0,3,3,0,1,0,0,2,3,3,1,2,4,1,2,0,1,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-27T11:55:06.744Z"}},{"type":"Public","name":"dinov2","owner":"facebookresearch","isFork":false,"description":"PyTorch code and models for the DINOv2 self-supervised learning method.","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":23,"issueCount":186,"starsCount":8286,"forksCount":696,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,3,10,0,0,0,1,4,0,0,5,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-27T08:35:42.442Z"}},{"type":"Public","name":"EgoVLPv2","owner":"facebookresearch","isFork":false,"description":"Code release for \"EgoVLPv2: Egocentric Video-Language Pre-training with Fusion in the Backbone\" [ICCV, 2023]","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":5,"starsCount":79,"forksCount":11,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,2,2,0,0,0,0,1,0,0,5,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,0,0,0,0,0,0,0,0,0,0,0,0,1,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-27T03:36:42.626Z"}},{"type":"Public","name":"ava-256","owner":"facebookresearch","isFork":false,"description":"Train universal codec avatars","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":2,"issueCount":0,"starsCount":18,"forksCount":0,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-27T02:26:31.083Z"}},{"type":"Public","name":"vggsfm","owner":"facebookresearch","isFork":false,"description":"[CVPR 2024 Highlight] VGGSfM Visual Geometry Grounded Deep Structure From Motion","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":8,"starsCount":441,"forksCount":28,"license":"Other","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,22,7,1,0,1,0,0,1,0,25],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-26T23:16:11.704Z"}}],"repositoryCount":1021,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}