Scores on benchmarks
Model rank shown below is with respect to all public models..142 |
average_vision
rank 346
81 benchmarks |
|
.223 |
neural_vision
rank 332
38 benchmarks |
|
.105 |
V1
rank 345
24 benchmarks |
|
.316 |
FreemanZiemba2013.V1-pls
v2
[reference]
rank 45
|
|
recordings from
102
sites in
V1
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.176 |
V2
rank 117
2 benchmarks |
|
.353 |
FreemanZiemba2013.V2-pls
v2
[reference]
rank 22
|
|
recordings from
103
sites in
V2
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.355 |
V4
rank 272
5 benchmarks |
|
.438 |
SanghaviJozwik2020.V4-pls
v1
[reference]
rank 269
|
|
recordings from
50
sites in
V4
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.617 |
Sanghavi2020.V4-pls
v1
[reference]
rank 198
|
|
recordings from
47
sites in
V4
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.170 |
SanghaviMurty2020.V4-pls
v1
[reference]
rank 290
|
|
recordings from
46
sites in
V4
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.550 |
MajajHong2015.V4-pls
v3
[reference]
rank 287
|
|
recordings from
88
sites in
V4
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.255 |
IT
rank 285
7 benchmarks |
|
.313 |
SanghaviMurty2020.IT-pls
v1
[reference]
rank 277
|
|
recordings from
29
sites in
IT
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.496 |
Sanghavi2020.IT-pls
v1
[reference]
rank 258
|
|
recordings from
88
sites in
IT
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.471 |
SanghaviJozwik2020.IT-pls
v1
[reference]
rank 221
|
|
recordings from
26
sites in
IT
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.508 |
MajajHong2015.IT-pls
v3
[reference]
rank 204
|
|
recordings from
168
sites in
IT
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
X |
Kar2019-ost
v2
[reference]
rank X
|
|
recordings from
424
sites in
IT
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.060 |
behavior_vision
rank 314
43 benchmarks |
|
.370 |
Rajalingham2018-i2n
v2
[reference]
rank 280
|
|
match-to-sample task
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.114 |
Geirhos2021-error_consistency
[reference]
rank 199
17 benchmarks |
|
.134 |
Geirhos2021colour-error_consistency
v1
[reference]
rank 205
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.123 |
Geirhos2021contrast-error_consistency
v1
[reference]
rank 163
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.128 |
Geirhos2021cueconflict-error_consistency
v1
[reference]
rank 216
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.069 |
Geirhos2021edge-error_consistency
v1
[reference]
rank 183
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.281 |
Geirhos2021eidolonI-error_consistency
v1
[reference]
rank 161
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.252 |
Geirhos2021eidolonII-error_consistency
v1
[reference]
rank 176
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.167 |
Geirhos2021eidolonIII-error_consistency
v1
[reference]
rank 205
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.055 |
Geirhos2021falsecolour-error_consistency
v1
[reference]
rank 227
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.045 |
Geirhos2021highpass-error_consistency
v1
[reference]
rank 173
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.056 |
Geirhos2021lowpass-error_consistency
v1
[reference]
rank 238
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.060 |
Geirhos2021phasescrambling-error_consistency
v1
[reference]
rank 191
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.097 |
Geirhos2021powerequalisation-error_consistency
v1
[reference]
rank 139
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.049 |
Geirhos2021rotation-error_consistency
v1
[reference]
rank 242
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.203 |
Geirhos2021silhouette-error_consistency
v1
[reference]
rank 211
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.041 |
Geirhos2021sketch-error_consistency
v1
[reference]
rank 227
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.139 |
Geirhos2021stylized-error_consistency
v1
[reference]
rank 191
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.038 |
Geirhos2021uniformnoise-error_consistency
v1
[reference]
rank 225
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.295 |
engineering_vision
rank 190
25 benchmarks |
|
.577 |
ImageNet-top1
v1
[reference]
rank 189
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.202 |
ImageNet-C-top1
[reference]
rank 174
4 benchmarks |
|
.099 |
ImageNet-C-noise-top1
v2
[reference]
rank 202
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.190 |
ImageNet-C-blur-top1
v2
[reference]
rank 172
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.229 |
ImageNet-C-weather-top1
v2
[reference]
rank 175
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.291 |
ImageNet-C-digital-top1
v2
[reference]
rank 172
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.069 |
ObjectNet-top1
v1
[reference]
rank 111
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.434 |
Geirhos2021-top1
[reference]
rank 216
17 benchmarks |
|
.864 |
Geirhos2021colour-top1
v1
[reference]
rank 211
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.435 |
Geirhos2021contrast-top1
v1
[reference]
rank 208
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.189 |
Geirhos2021cueconflict-top1
v1
[reference]
rank 175
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.294 |
Geirhos2021edge-top1
v1
[reference]
rank 104
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.474 |
Geirhos2021eidolonI-top1
v1
[reference]
rank 174
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.448 |
Geirhos2021eidolonII-top1
v1
[reference]
rank 211
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.438 |
Geirhos2021eidolonIII-top1
v1
[reference]
rank 211
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.777 |
Geirhos2021falsecolour-top1
v1
[reference]
rank 222
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.264 |
Geirhos2021highpass-top1
v1
[reference]
rank 198
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.345 |
Geirhos2021lowpass-top1
v1
[reference]
rank 192
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.520 |
Geirhos2021phasescrambling-top1
v1
[reference]
rank 194
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.505 |
Geirhos2021powerequalisation-top1
v1
[reference]
rank 205
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.482 |
Geirhos2021rotation-top1
v1
[reference]
rank 220
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.431 |
Geirhos2021silhouette-top1
v1
[reference]
rank 177
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.450 |
Geirhos2021sketch-top1
v1
[reference]
rank 223
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.279 |
Geirhos2021stylized-top1
v1
[reference]
rank 227
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.177 |
Geirhos2021uniformnoise-top1
v1
[reference]
rank 234
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.195 |
Hermann2020
[reference]
rank 190
2 benchmarks |
|
.151 |
Hermann2020cueconflict-shape_match
v1
[reference]
rank 173
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.239 |
Hermann2020cueconflict-shape_bias
v1
[reference]
rank 186
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
How to use
from brainscore_vision import load_model model = load_model("alexnet") model.start_task(...) model.start_recording(...) model.look_at(...)
Benchmarks bibtex
@Article{Freeman2013, author={Freeman, Jeremy and Ziemba, Corey M. and Heeger, David J. and Simoncelli, Eero P. and Movshon, J. Anthony}, title={A functional and perceptual signature of the second visual area in primates}, journal={Nature Neuroscience}, year={2013}, month={Jul}, day={01}, volume={16}, number={7}, pages={974-981}, abstract={The authors examined neuronal responses in V1 and V2 to synthetic texture stimuli that replicate higher-order statistical dependencies found in natural images. V2, but not V1, responded differentially to these textures, in both macaque (single neurons) and human (fMRI). Human detection of naturalistic structure in the same images was predicted by V2 responses, suggesting a role for V2 in representing natural image structure.}, issn={1546-1726}, doi={10.1038/nn.3402}, url={https://doi.org/10.1038/nn.3402} } @misc{Sanghavi_Jozwik_DiCarlo_2021, title={SanghaviJozwik2020}, url={osf.io/fhy36}, DOI={10.17605/OSF.IO/FHY36}, publisher={OSF}, author={Sanghavi, Sachi and Jozwik, Kamila M and DiCarlo, James J}, year={2021}, month={Nov} } @misc{Sanghavi_DiCarlo_2021, title={Sanghavi2020}, url={osf.io/chwdk}, DOI={10.17605/OSF.IO/CHWDK}, publisher={OSF}, author={Sanghavi, Sachi and DiCarlo, James J}, year={2021}, month={Nov} } @misc{Sanghavi_Murty_DiCarlo_2021, title={SanghaviMurty2020}, url={osf.io/fchme}, DOI={10.17605/OSF.IO/FCHME}, publisher={OSF}, author={Sanghavi, Sachi and Murty, N A R and DiCarlo, James J}, year={2021}, month={Nov} } @article {Majaj13402, author = {Majaj, Najib J. and Hong, Ha and Solomon, Ethan A. and DiCarlo, James J.}, title = {Simple Learned Weighted Sums of Inferior Temporal Neuronal Firing Rates Accurately Predict Human Core Object Recognition Performance}, volume = {35}, number = {39}, pages = {13402--13418}, year = {2015}, doi = {10.1523/JNEUROSCI.5181-14.2015}, publisher = {Society for Neuroscience}, abstract = {To go beyond qualitative models of the biological substrate of object recognition, we ask: can a single ventral stream neuronal linking hypothesis quantitatively account for core object recognition performance over a broad range of tasks? We measured human performance in 64 object recognition tests using thousands of challenging images that explore shape similarity and identity preserving object variation. We then used multielectrode arrays to measure neuronal population responses to those same images in visual areas V4 and inferior temporal (IT) cortex of monkeys and simulated V1 population responses. We tested leading candidate linking hypotheses and control hypotheses, each postulating how ventral stream neuronal responses underlie object recognition behavior. Specifically, for each hypothesis, we computed the predicted performance on the 64 tests and compared it with the measured pattern of human performance. All tested hypotheses based on low- and mid-level visually evoked activity (pixels, V1, and V4) were very poor predictors of the human behavioral pattern. However, simple learned weighted sums of distributed average IT firing rates exactly predicted the behavioral pattern. More elaborate linking hypotheses relying on IT trial-by-trial correlational structure, finer IT temporal codes, or ones that strictly respect the known spatial substructures of IT ({ extquotedblleft}face patches{ extquotedblright}) did not improve predictive power. Although these results do not reject those more elaborate hypotheses, they suggest a simple, sufficient quantitative model: each object recognition task is learned from the spatially distributed mean firing rates (100 ms) of \~{}60,000 IT neurons and is executed as a simple weighted sum of those firing rates.SIGNIFICANCE STATEMENT We sought to go beyond qualitative models of visual object recognition and determine whether a single neuronal linking hypothesis can quantitatively account for core object recognition behavior. To achieve this, we designed a database of images for evaluating object recognition performance. We used multielectrode arrays to characterize hundreds of neurons in the visual ventral stream of nonhuman primates and measured the object recognition performance of \>100 human observers. Remarkably, we found that simple learned weighted sums of firing rates of neurons in monkey inferior temporal (IT) cortex accurately predicted human performance. Although previous work led us to expect that IT would outperform V4, we were surprised by the quantitative precision with which simple IT-based linking hypotheses accounted for human behavior.}, issn = {0270-6474}, URL = {https://www.jneurosci.org/content/35/39/13402}, eprint = {https://www.jneurosci.org/content/35/39/13402.full.pdf}, journal = {Journal of Neuroscience}} @Article{Kar2019, author={Kar, Kohitij and Kubilius, Jonas and Schmidt, Kailyn and Issa, Elias B. and DiCarlo, James J.}, title={Evidence that recurrent circuits are critical to the ventral stream's execution of core object recognition behavior}, journal={Nature Neuroscience}, year={2019}, month={Jun}, day={01}, volume={22}, number={6}, pages={974-983}, abstract={Non-recurrent deep convolutional neural networks (CNNs) are currently the best at modeling core object recognition, a behavior that is supported by the densely recurrent primate ventral stream, culminating in the inferior temporal (IT) cortex. If recurrence is critical to this behavior, then primates should outperform feedforward-only deep CNNs for images that require additional recurrent processing beyond the feedforward IT response. Here we first used behavioral methods to discover hundreds of these `challenge' images. Second, using large-scale electrophysiology, we observed that behaviorally sufficient object identity solutions emerged { extasciitilde}30{ hinspace}ms later in the IT cortex for challenge images compared with primate performance-matched `control' images. Third, these behaviorally critical late-phase IT response patterns were poorly predicted by feedforward deep CNN activations. Notably, very-deep CNNs and shallower recurrent CNNs better predicted these late IT responses, suggesting that there is a functional equivalence between additional nonlinear transformations and recurrence. Beyond arguing that recurrent circuits are critical for rapid object identification, our results provide strong constraints for future recurrent model development.}, issn={1546-1726}, doi={10.1038/s41593-019-0392-5}, url={https://doi.org/10.1038/s41593-019-0392-5} } @article {Rajalingham240614, author = {Rajalingham, Rishi and Issa, Elias B. and Bashivan, Pouya and Kar, Kohitij and Schmidt, Kailyn and DiCarlo, James J.}, title = {Large-scale, high-resolution comparison of the core visual object recognition behavior of humans, monkeys, and state-of-the-art deep artificial neural networks}, elocation-id = {240614}, year = {2018}, doi = {10.1101/240614}, publisher = {Cold Spring Harbor Laboratory}, abstract = {Primates{ extemdash}including humans{ extemdash}can typically recognize objects in visual images at a glance even in the face of naturally occurring identity-preserving image transformations (e.g. changes in viewpoint). A primary neuroscience goal is to uncover neuron-level mechanistic models that quantitatively explain this behavior by predicting primate performance for each and every image. Here, we applied this stringent behavioral prediction test to the leading mechanistic models of primate vision (specifically, deep, convolutional, artificial neural networks; ANNs) by directly comparing their behavioral signatures against those of humans and rhesus macaque monkeys. Using high-throughput data collection systems for human and monkey psychophysics, we collected over one million behavioral trials for 2400 images over 276 binary object discrimination tasks. Consistent with previous work, we observed that state-of-the-art deep, feed-forward convolutional ANNs trained for visual categorization (termed DCNNIC models) accurately predicted primate patterns of object-level confusion. However, when we examined behavioral performance for individual images within each object discrimination task, we found that all tested DCNNIC models were significantly non-predictive of primate performance, and that this prediction failure was not accounted for by simple image attributes, nor rescued by simple model modifications. These results show that current DCNNIC models cannot account for the image-level behavioral patterns of primates, and that new ANN models are needed to more precisely capture the neural mechanisms underlying primate object vision. To this end, large-scale, high-resolution primate behavioral benchmarks{ extemdash}such as those obtained here{ extemdash}could serve as direct guides for discovering such models.SIGNIFICANCE STATEMENT Recently, specific feed-forward deep convolutional artificial neural networks (ANNs) models have dramatically advanced our quantitative understanding of the neural mechanisms underlying primate core object recognition. In this work, we tested the limits of those ANNs by systematically comparing the behavioral responses of these models with the behavioral responses of humans and monkeys, at the resolution of individual images. Using these high-resolution metrics, we found that all tested ANN models significantly diverged from primate behavior. Going forward, these high-resolution, large-scale primate behavioral benchmarks could serve as direct guides for discovering better ANN models of the primate visual system.}, URL = {https://www.biorxiv.org/content/early/2018/02/12/240614}, eprint = {https://www.biorxiv.org/content/early/2018/02/12/240614.full.pdf}, journal = {bioRxiv} } @article{geirhos2021partial, title={Partial success in closing the gap between human and machine vision}, author={Geirhos, Robert and Narayanappa, Kantharaju and Mitzkus, Benjamin and Thieringer, Tizian and Bethge, Matthias and Wichmann, Felix A and Brendel, Wieland}, journal={Advances in Neural Information Processing Systems}, volume={34}, year={2021}, url={https://openreview.net/forum?id=QkljT4mrfs} } @INPROCEEDINGS{5206848, author={J. {Deng} and W. {Dong} and R. {Socher} and L. {Li} and {Kai Li} and {Li Fei-Fei}}, booktitle={2009 IEEE Conference on Computer Vision and Pattern Recognition}, title={ImageNet: A large-scale hierarchical image database}, year={2009}, volume={}, number={}, pages={248-255}, } @ARTICLE{Hendrycks2019-di, title = "Benchmarking Neural Network Robustness to Common Corruptions and Perturbations", author = "Hendrycks, Dan and Dietterich, Thomas", abstract = "In this paper we establish rigorous benchmarks for image classifier robustness. Our first benchmark, ImageNet-C, standardizes and expands the corruption robustness topic, while showing which classifiers are preferable in safety-critical applications. Then we propose a new dataset called ImageNet-P which enables researchers to benchmark a classifier's robustness to common perturbations. Unlike recent robustness research, this benchmark evaluates performance on common corruptions and perturbations not worst-case adversarial perturbations. We find that there are negligible changes in relative corruption robustness from AlexNet classifiers to ResNet classifiers. Afterward we discover ways to enhance corruption and perturbation robustness. We even find that a bypassed adversarial defense provides substantial common perturbation robustness. Together our benchmarks may aid future work toward networks that robustly generalize.", month = mar, year = 2019, archivePrefix = "arXiv", primaryClass = "cs.LG", eprint = "1903.12261", url = "https://arxiv.org/abs/1903.12261" } @inproceedings{DBLP:conf/nips/BarbuMALWGTK19, author = {Andrei Barbu and David Mayo and Julian Alverio and William Luo and Christopher Wang and Dan Gutfreund and Josh Tenenbaum and Boris Katz}, title = {ObjectNet: {A} large-scale bias-controlled dataset for pushing the limits of object recognition models}, booktitle = {NeurIPS 2019}, pages = {9448--9458}, year = {2019}, url = {https://proceedings.neurips.cc/paper/2019/hash/97af07a14cacba681feacf3012730892-Abstract.html}, } @article{hermann2020origins, title={The origins and prevalence of texture bias in convolutional neural networks}, author={Hermann, Katherine and Chen, Ting and Kornblith, Simon}, journal={Advances in Neural Information Processing Systems}, volume={33}, pages={19000--19015}, year={2020}, url={https://proceedings.neurips.cc/paper/2020/hash/db5f9f42a7157abe65bb145000b5871a-Abstract.html} }