Scores on benchmarks
Model rank shown below is with respect to all public models..130 |
average_vision
rank 363
81 benchmarks |
|
.259 |
behavior_vision
rank 150
43 benchmarks |
|
.214 |
Geirhos2021-error_consistency
[reference]
rank 125
17 benchmarks |
|
.554 |
Geirhos2021contrast-error_consistency
v1
[reference]
rank 29
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.316 |
Geirhos2021cueconflict-error_consistency
v1
[reference]
rank 52
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.132 |
Geirhos2021edge-error_consistency
v1
[reference]
rank 65
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.445 |
Geirhos2021eidolonII-error_consistency
v1
[reference]
rank 89
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.333 |
Geirhos2021eidolonIII-error_consistency
v1
[reference]
rank 112
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.392 |
Geirhos2021falsecolour-error_consistency
v1
[reference]
rank 88
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.150 |
Geirhos2021highpass-error_consistency
v1
[reference]
rank 49
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.175 |
Geirhos2021lowpass-error_consistency
v1
[reference]
rank 112
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.162 |
Geirhos2021phasescrambling-error_consistency
v1
[reference]
rank 92
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.133 |
Geirhos2021powerequalisation-error_consistency
v1
[reference]
rank 107
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.175 |
Geirhos2021rotation-error_consistency
v1
[reference]
rank 93
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.674 |
Geirhos2021silhouette-error_consistency
v1
[reference]
rank 58
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.397 |
Baker2022
rank 77
3 benchmarks |
|
.735 |
Baker2022fragmented-accuracy_delta
v1
[reference]
rank 52
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.457 |
Baker2022frankenstein-accuracy_delta
v1
[reference]
rank 86
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.000 |
Baker2022inverted-accuracy_delta
v1
[reference]
rank 54
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.425 |
Maniquet2024
rank 140
2 benchmarks |
|
.381 |
Maniquet2024-confusion_similarity
v1
[reference]
rank 123
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.470 |
Maniquet2024-tasks_consistency
v1
[reference]
rank 162
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.459 |
Ferguson2024
[reference]
rank 106
14 benchmarks |
|
.571 |
Ferguson2024half-value_delta
v1
[reference]
rank 91
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
1.0 |
Ferguson2024gray_hard-value_delta
v1
[reference]
rank 1
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.092 |
Ferguson2024lle-value_delta
v1
[reference]
rank 187
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.056 |
Ferguson2024juncture-value_delta
v1
[reference]
rank 146
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.533 |
Ferguson2024color-value_delta
v1
[reference]
rank 128
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.124 |
Ferguson2024round_v-value_delta
v1
[reference]
rank 192
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.148 |
Ferguson2024eighth-value_delta
v1
[reference]
rank 89
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.127 |
Ferguson2024quarter-value_delta
v1
[reference]
rank 181
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
1.0 |
Ferguson2024convergence-value_delta
v1
[reference]
rank 1
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.383 |
Ferguson2024round_f-value_delta
v1
[reference]
rank 95
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.951 |
Ferguson2024llh-value_delta
v1
[reference]
rank 33
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.845 |
Ferguson2024circle_line-value_delta
v1
[reference]
rank 23
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.426 |
Ferguson2024gray_easy-value_delta
v1
[reference]
rank 88
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.171 |
Ferguson2024tilted_line-value_delta
v1
[reference]
rank 196
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.351 |
Hebart2023-match
v1
rank 49
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.128 |
BMD2024
rank 135
4 benchmarks |
|
.146 |
BMD2024.dotted_1Behavioral-accuracy_distance
v1
rank 96
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.114 |
BMD2024.texture_1Behavioral-accuracy_distance
v1
rank 139
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.105 |
BMD2024.texture_2Behavioral-accuracy_distance
v1
rank 144
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.149 |
BMD2024.dotted_2Behavioral-accuracy_distance
v1
rank 86
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.099 |
Coggan2024_behavior-ConditionWiseAccuracySimilarity
v1
rank 158
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.320 |
engineering_vision
rank 170
25 benchmarks |
|
.640 |
ImageNet-top1
v1
[reference]
rank 168
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.295 |
ImageNet-C-top1
[reference]
rank 131
4 benchmarks |
|
.298 |
ImageNet-C-blur-top1
v2
[reference]
rank 108
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.379 |
ImageNet-C-weather-top1
v2
[reference]
rank 110
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.502 |
ImageNet-C-digital-top1
v2
[reference]
rank 73
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.392 |
Geirhos2021-top1
[reference]
rank 238
17 benchmarks |
|
.219 |
Geirhos2021cueconflict-top1
v1
[reference]
rank 111
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.250 |
Geirhos2021edge-top1
v1
[reference]
rank 154
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.412 |
Geirhos2021eidolonI-top1
v1
[reference]
rank 239
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.425 |
Geirhos2021eidolonII-top1
v1
[reference]
rank 231
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.417 |
Geirhos2021eidolonIII-top1
v1
[reference]
rank 229
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.905 |
Geirhos2021falsecolour-top1
v1
[reference]
rank 155
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.530 |
Geirhos2021highpass-top1
v1
[reference]
rank 55
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.481 |
Geirhos2021phasescrambling-top1
v1
[reference]
rank 223
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.657 |
Geirhos2021powerequalisation-top1
v1
[reference]
rank 143
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.561 |
Geirhos2021rotation-top1
v1
[reference]
rank 195
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.350 |
Geirhos2021silhouette-top1
v1
[reference]
rank 220
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.502 |
Geirhos2021sketch-top1
v1
[reference]
rank 207
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.338 |
Geirhos2021stylized-top1
v1
[reference]
rank 198
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.621 |
Geirhos2021uniformnoise-top1
v1
[reference]
rank 46
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.274 |
Hermann2020
[reference]
rank 98
2 benchmarks |
|
.193 |
Hermann2020cueconflict-shape_match
v1
[reference]
rank 93
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.355 |
Hermann2020cueconflict-shape_bias
v1
[reference]
rank 90
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
How to use
from brainscore_vision import load_model model = load_model("pnasnet_large_pytorch") model.start_task(...) model.start_recording(...) model.look_at(...)
Benchmarks bibtex
@article{geirhos2021partial, title={Partial success in closing the gap between human and machine vision}, author={Geirhos, Robert and Narayanappa, Kantharaju and Mitzkus, Benjamin and Thieringer, Tizian and Bethge, Matthias and Wichmann, Felix A and Brendel, Wieland}, journal={Advances in Neural Information Processing Systems}, volume={34}, year={2021}, url={https://openreview.net/forum?id=QkljT4mrfs} } @article{BAKER2022104913, title = {Deep learning models fail to capture the configural nature of human shape perception}, journal = {iScience}, volume = {25}, number = {9}, pages = {104913}, year = {2022}, issn = {2589-0042}, doi = {https://doi.org/10.1016/j.isci.2022.104913}, url = {https://www.sciencedirect.com/science/article/pii/S2589004222011853}, author = {Nicholas Baker and James H. Elder}, keywords = {Biological sciences, Neuroscience, Sensory neuroscience}, abstract = {Summary A hallmark of human object perception is sensitivity to the holistic configuration of the local shape features of an object. Deep convolutional neural networks (DCNNs) are currently the dominant models for object recognition processing in the visual cortex, but do they capture this configural sensitivity? To answer this question, we employed a dataset of animal silhouettes and created a variant of this dataset that disrupts the configuration of each object while preserving local features. While human performance was impacted by this manipulation, DCNN performance was not, indicating insensitivity to object configuration. Modifications to training and architecture to make networks more brain-like did not lead to configural processing, and none of the networks were able to accurately predict trial-by-trial human object judgements. We speculate that to match human configural sensitivity, networks must be trained to solve a broader range of object tasks beyond category recognition.} } @article {Maniquet2024.04.02.587669, author = {Maniquet, Tim and de Beeck, Hans Op and Costantino, Andrea Ivan}, title = {Recurrent issues with deep neural network models of visual recognition}, elocation-id = {2024.04.02.587669}, year = {2024}, doi = {10.1101/2024.04.02.587669}, publisher = {Cold Spring Harbor Laboratory}, URL = {https://www.biorxiv.org/content/early/2024/04/10/2024.04.02.587669}, eprint = {https://www.biorxiv.org/content/early/2024/04/10/2024.04.02.587669.full.pdf}, journal = {bioRxiv} } @misc{ferguson_ngo_lee_dicarlo_schrimpf_2024, title={How Well is Visual Search Asymmetry predicted by a Binary-Choice, Rapid, Accuracy-based Visual-search, Oddball-detection (BRAVO) task?}, url={osf.io/5ba3n}, DOI={10.17605/OSF.IO/5BA3N}, publisher={OSF}, author={Ferguson, Michael E, Jr and Ngo, Jerry and Lee, Michael and DiCarlo, James and Schrimpf, Martin}, year={2024}, month={Jun} } @INPROCEEDINGS{5206848, author={J. {Deng} and W. {Dong} and R. {Socher} and L. {Li} and {Kai Li} and {Li Fei-Fei}}, booktitle={2009 IEEE Conference on Computer Vision and Pattern Recognition}, title={ImageNet: A large-scale hierarchical image database}, year={2009}, volume={}, number={}, pages={248-255}, } @ARTICLE{Hendrycks2019-di, title = "Benchmarking Neural Network Robustness to Common Corruptions and Perturbations", author = "Hendrycks, Dan and Dietterich, Thomas", abstract = "In this paper we establish rigorous benchmarks for image classifier robustness. Our first benchmark, ImageNet-C, standardizes and expands the corruption robustness topic, while showing which classifiers are preferable in safety-critical applications. Then we propose a new dataset called ImageNet-P which enables researchers to benchmark a classifier's robustness to common perturbations. Unlike recent robustness research, this benchmark evaluates performance on common corruptions and perturbations not worst-case adversarial perturbations. We find that there are negligible changes in relative corruption robustness from AlexNet classifiers to ResNet classifiers. Afterward we discover ways to enhance corruption and perturbation robustness. We even find that a bypassed adversarial defense provides substantial common perturbation robustness. Together our benchmarks may aid future work toward networks that robustly generalize.", month = mar, year = 2019, archivePrefix = "arXiv", primaryClass = "cs.LG", eprint = "1903.12261", url = "https://arxiv.org/abs/1903.12261" } @article{hermann2020origins, title={The origins and prevalence of texture bias in convolutional neural networks}, author={Hermann, Katherine and Chen, Ting and Kornblith, Simon}, journal={Advances in Neural Information Processing Systems}, volume={33}, pages={19000--19015}, year={2020}, url={https://proceedings.neurips.cc/paper/2020/hash/db5f9f42a7157abe65bb145000b5871a-Abstract.html} }