Scores on benchmarks
Model rank shown below is with respect to all public models..236 |
average_vision
rank 169
81 benchmarks |
|
.471 |
behavior_vision
rank 23
43 benchmarks |
|
.573 |
Rajalingham2018-i2n
v2
[reference]
rank 23
|
|
match-to-sample task
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.520 |
Geirhos2021-error_consistency
[reference]
rank 32
17 benchmarks |
|
.691 |
Geirhos2021colour-error_consistency
v1
[reference]
rank 34
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.376 |
Geirhos2021contrast-error_consistency
v1
[reference]
rank 60
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.402 |
Geirhos2021cueconflict-error_consistency
v1
[reference]
rank 30
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.151 |
Geirhos2021edge-error_consistency
v1
[reference]
rank 53
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.638 |
Geirhos2021eidolonI-error_consistency
v1
[reference]
rank 18
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.646 |
Geirhos2021eidolonII-error_consistency
v1
[reference]
rank 16
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.489 |
Geirhos2021eidolonIII-error_consistency
v1
[reference]
rank 41
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.537 |
Geirhos2021falsecolour-error_consistency
v1
[reference]
rank 52
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.303 |
Geirhos2021highpass-error_consistency
v1
[reference]
rank 30
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.498 |
Geirhos2021lowpass-error_consistency
v1
[reference]
rank 29
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.572 |
Geirhos2021phasescrambling-error_consistency
v1
[reference]
rank 14
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.599 |
Geirhos2021powerequalisation-error_consistency
v1
[reference]
rank 17
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.561 |
Geirhos2021rotation-error_consistency
v1
[reference]
rank 9
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.856 |
Geirhos2021silhouette-error_consistency
v1
[reference]
rank 28
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.253 |
Geirhos2021sketch-error_consistency
v1
[reference]
rank 43
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.603 |
Geirhos2021stylized-error_consistency
v1
[reference]
rank 36
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.670 |
Geirhos2021uniformnoise-error_consistency
v1
[reference]
rank 14
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.700 |
Baker2022
rank 21
3 benchmarks |
|
.858 |
Baker2022fragmented-accuracy_delta
v1
[reference]
rank 31
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.873 |
Baker2022frankenstein-accuracy_delta
v1
[reference]
rank 13
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.367 |
Baker2022inverted-accuracy_delta
v1
[reference]
rank 38
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.570 |
Maniquet2024
rank 79
2 benchmarks |
|
.410 |
Maniquet2024-confusion_similarity
v1
[reference]
rank 116
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.729 |
Maniquet2024-tasks_consistency
v1
[reference]
rank 14
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.493 |
Ferguson2024
[reference]
rank 76
14 benchmarks |
|
1.0 |
Ferguson2024half-value_delta
v1
[reference]
rank 1
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.706 |
Ferguson2024gray_hard-value_delta
v1
[reference]
rank 57
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.372 |
Ferguson2024lle-value_delta
v1
[reference]
rank 114
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.031 |
Ferguson2024juncture-value_delta
v1
[reference]
rank 162
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.952 |
Ferguson2024color-value_delta
v1
[reference]
rank 63
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
1.0 |
Ferguson2024round_v-value_delta
v1
[reference]
rank 1
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.067 |
Ferguson2024eighth-value_delta
v1
[reference]
rank 139
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.155 |
Ferguson2024quarter-value_delta
v1
[reference]
rank 162
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.762 |
Ferguson2024convergence-value_delta
v1
[reference]
rank 30
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.783 |
Ferguson2024round_f-value_delta
v1
[reference]
rank 30
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.406 |
Ferguson2024llh-value_delta
v1
[reference]
rank 123
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.014 |
Ferguson2024circle_line-value_delta
v1
[reference]
rank 202
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.160 |
Ferguson2024gray_easy-value_delta
v1
[reference]
rank 133
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.489 |
Ferguson2024tilted_line-value_delta
v1
[reference]
rank 130
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.326 |
Hebart2023-match
v1
rank 75
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.112 |
BMD2024
rank 147
4 benchmarks |
|
.104 |
BMD2024.dotted_1Behavioral-accuracy_distance
v1
rank 126
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.114 |
BMD2024.texture_1Behavioral-accuracy_distance
v1
rank 139
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.105 |
BMD2024.texture_2Behavioral-accuracy_distance
v1
rank 144
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.126 |
BMD2024.dotted_2Behavioral-accuracy_distance
v1
rank 107
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.478 |
Coggan2024_behavior-ConditionWiseAccuracySimilarity
v1
rank 49
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.379 |
engineering_vision
rank 109
25 benchmarks |
|
.772 |
ImageNet-top1
v1
[reference]
rank 41
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.362 |
ImageNet-C-top1
[reference]
rank 95
4 benchmarks |
|
.361 |
ImageNet-C-blur-top1
v2
[reference]
rank 72
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.553 |
ImageNet-C-weather-top1
v2
[reference]
rank 34
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.533 |
ImageNet-C-digital-top1
v2
[reference]
rank 51
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.569 |
Geirhos2021-top1
[reference]
rank 100
17 benchmarks |
|
.975 |
Geirhos2021colour-top1
v1
[reference]
rank 86
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.955 |
Geirhos2021contrast-top1
v1
[reference]
rank 44
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.200 |
Geirhos2021cueconflict-top1
v1
[reference]
rank 151
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.150 |
Geirhos2021edge-top1
v1
[reference]
rank 230
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.424 |
Geirhos2021eidolonI-top1
v1
[reference]
rank 236
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.506 |
Geirhos2021eidolonII-top1
v1
[reference]
rank 144
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.515 |
Geirhos2021eidolonIII-top1
v1
[reference]
rank 140
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.964 |
Geirhos2021falsecolour-top1
v1
[reference]
rank 65
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.358 |
Geirhos2021highpass-top1
v1
[reference]
rank 147
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.404 |
Geirhos2021lowpass-top1
v1
[reference]
rank 140
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.633 |
Geirhos2021phasescrambling-top1
v1
[reference]
rank 94
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.804 |
Geirhos2021powerequalisation-top1
v1
[reference]
rank 66
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.864 |
Geirhos2021rotation-top1
v1
[reference]
rank 28
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.487 |
Geirhos2021silhouette-top1
v1
[reference]
rank 138
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.610 |
Geirhos2021sketch-top1
v1
[reference]
rank 118
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.328 |
Geirhos2021stylized-top1
v1
[reference]
rank 204
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.497 |
Geirhos2021uniformnoise-top1
v1
[reference]
rank 96
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.192 |
Hermann2020
[reference]
rank 203
2 benchmarks |
|
.158 |
Hermann2020cueconflict-shape_match
v1
[reference]
rank 155
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.226 |
Hermann2020cueconflict-shape_bias
v1
[reference]
rank 207
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
How to use
from brainscore_vision import load_model model = load_model("resnet_152_v2") model.start_task(...) model.start_recording(...) model.look_at(...)
Benchmarks bibtex
@article {Rajalingham240614, author = {Rajalingham, Rishi and Issa, Elias B. and Bashivan, Pouya and Kar, Kohitij and Schmidt, Kailyn and DiCarlo, James J.}, title = {Large-scale, high-resolution comparison of the core visual object recognition behavior of humans, monkeys, and state-of-the-art deep artificial neural networks}, elocation-id = {240614}, year = {2018}, doi = {10.1101/240614}, publisher = {Cold Spring Harbor Laboratory}, abstract = {Primates{ extemdash}including humans{ extemdash}can typically recognize objects in visual images at a glance even in the face of naturally occurring identity-preserving image transformations (e.g. changes in viewpoint). A primary neuroscience goal is to uncover neuron-level mechanistic models that quantitatively explain this behavior by predicting primate performance for each and every image. Here, we applied this stringent behavioral prediction test to the leading mechanistic models of primate vision (specifically, deep, convolutional, artificial neural networks; ANNs) by directly comparing their behavioral signatures against those of humans and rhesus macaque monkeys. Using high-throughput data collection systems for human and monkey psychophysics, we collected over one million behavioral trials for 2400 images over 276 binary object discrimination tasks. Consistent with previous work, we observed that state-of-the-art deep, feed-forward convolutional ANNs trained for visual categorization (termed DCNNIC models) accurately predicted primate patterns of object-level confusion. However, when we examined behavioral performance for individual images within each object discrimination task, we found that all tested DCNNIC models were significantly non-predictive of primate performance, and that this prediction failure was not accounted for by simple image attributes, nor rescued by simple model modifications. These results show that current DCNNIC models cannot account for the image-level behavioral patterns of primates, and that new ANN models are needed to more precisely capture the neural mechanisms underlying primate object vision. To this end, large-scale, high-resolution primate behavioral benchmarks{ extemdash}such as those obtained here{ extemdash}could serve as direct guides for discovering such models.SIGNIFICANCE STATEMENT Recently, specific feed-forward deep convolutional artificial neural networks (ANNs) models have dramatically advanced our quantitative understanding of the neural mechanisms underlying primate core object recognition. In this work, we tested the limits of those ANNs by systematically comparing the behavioral responses of these models with the behavioral responses of humans and monkeys, at the resolution of individual images. Using these high-resolution metrics, we found that all tested ANN models significantly diverged from primate behavior. Going forward, these high-resolution, large-scale primate behavioral benchmarks could serve as direct guides for discovering better ANN models of the primate visual system.}, URL = {https://www.biorxiv.org/content/early/2018/02/12/240614}, eprint = {https://www.biorxiv.org/content/early/2018/02/12/240614.full.pdf}, journal = {bioRxiv} } @article{geirhos2021partial, title={Partial success in closing the gap between human and machine vision}, author={Geirhos, Robert and Narayanappa, Kantharaju and Mitzkus, Benjamin and Thieringer, Tizian and Bethge, Matthias and Wichmann, Felix A and Brendel, Wieland}, journal={Advances in Neural Information Processing Systems}, volume={34}, year={2021}, url={https://openreview.net/forum?id=QkljT4mrfs} } @article{BAKER2022104913, title = {Deep learning models fail to capture the configural nature of human shape perception}, journal = {iScience}, volume = {25}, number = {9}, pages = {104913}, year = {2022}, issn = {2589-0042}, doi = {https://doi.org/10.1016/j.isci.2022.104913}, url = {https://www.sciencedirect.com/science/article/pii/S2589004222011853}, author = {Nicholas Baker and James H. Elder}, keywords = {Biological sciences, Neuroscience, Sensory neuroscience}, abstract = {Summary A hallmark of human object perception is sensitivity to the holistic configuration of the local shape features of an object. Deep convolutional neural networks (DCNNs) are currently the dominant models for object recognition processing in the visual cortex, but do they capture this configural sensitivity? To answer this question, we employed a dataset of animal silhouettes and created a variant of this dataset that disrupts the configuration of each object while preserving local features. While human performance was impacted by this manipulation, DCNN performance was not, indicating insensitivity to object configuration. Modifications to training and architecture to make networks more brain-like did not lead to configural processing, and none of the networks were able to accurately predict trial-by-trial human object judgements. We speculate that to match human configural sensitivity, networks must be trained to solve a broader range of object tasks beyond category recognition.} } @article {Maniquet2024.04.02.587669, author = {Maniquet, Tim and de Beeck, Hans Op and Costantino, Andrea Ivan}, title = {Recurrent issues with deep neural network models of visual recognition}, elocation-id = {2024.04.02.587669}, year = {2024}, doi = {10.1101/2024.04.02.587669}, publisher = {Cold Spring Harbor Laboratory}, URL = {https://www.biorxiv.org/content/early/2024/04/10/2024.04.02.587669}, eprint = {https://www.biorxiv.org/content/early/2024/04/10/2024.04.02.587669.full.pdf}, journal = {bioRxiv} } @misc{ferguson_ngo_lee_dicarlo_schrimpf_2024, title={How Well is Visual Search Asymmetry predicted by a Binary-Choice, Rapid, Accuracy-based Visual-search, Oddball-detection (BRAVO) task?}, url={osf.io/5ba3n}, DOI={10.17605/OSF.IO/5BA3N}, publisher={OSF}, author={Ferguson, Michael E, Jr and Ngo, Jerry and Lee, Michael and DiCarlo, James and Schrimpf, Martin}, year={2024}, month={Jun} } @INPROCEEDINGS{5206848, author={J. {Deng} and W. {Dong} and R. {Socher} and L. {Li} and {Kai Li} and {Li Fei-Fei}}, booktitle={2009 IEEE Conference on Computer Vision and Pattern Recognition}, title={ImageNet: A large-scale hierarchical image database}, year={2009}, volume={}, number={}, pages={248-255}, } @ARTICLE{Hendrycks2019-di, title = "Benchmarking Neural Network Robustness to Common Corruptions and Perturbations", author = "Hendrycks, Dan and Dietterich, Thomas", abstract = "In this paper we establish rigorous benchmarks for image classifier robustness. Our first benchmark, ImageNet-C, standardizes and expands the corruption robustness topic, while showing which classifiers are preferable in safety-critical applications. Then we propose a new dataset called ImageNet-P which enables researchers to benchmark a classifier's robustness to common perturbations. Unlike recent robustness research, this benchmark evaluates performance on common corruptions and perturbations not worst-case adversarial perturbations. We find that there are negligible changes in relative corruption robustness from AlexNet classifiers to ResNet classifiers. Afterward we discover ways to enhance corruption and perturbation robustness. We even find that a bypassed adversarial defense provides substantial common perturbation robustness. Together our benchmarks may aid future work toward networks that robustly generalize.", month = mar, year = 2019, archivePrefix = "arXiv", primaryClass = "cs.LG", eprint = "1903.12261", url = "https://arxiv.org/abs/1903.12261" } @article{hermann2020origins, title={The origins and prevalence of texture bias in convolutional neural networks}, author={Hermann, Katherine and Chen, Ting and Kornblith, Simon}, journal={Advances in Neural Information Processing Systems}, volume={33}, pages={19000--19015}, year={2020}, url={https://proceedings.neurips.cc/paper/2020/hash/db5f9f42a7157abe65bb145000b5871a-Abstract.html} }