Scores on benchmarks
Model rank shown below is with respect to all public models..405 |
average_vision
rank 18
81 benchmarks |
|
.254 |
neural_vision
rank 304
38 benchmarks |
|
.093 |
V1
rank 365
24 benchmarks |
|
.033 |
Coggan2024_fMRI.V1-rdm
v1
rank 108
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.246 |
FreemanZiemba2013.V1-pls
v2
[reference]
rank 280
|
|
recordings from
102
sites in
V1
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.165 |
V2
rank 176
2 benchmarks |
|
.041 |
Coggan2024_fMRI.V2-rdm
v1
rank 110
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.288 |
FreemanZiemba2013.V2-pls
v2
[reference]
rank 266
|
|
recordings from
103
sites in
V2
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.401 |
V4
rank 39
5 benchmarks |
|
.079 |
Coggan2024_fMRI.V4-rdm
v1
rank 39
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.488 |
SanghaviJozwik2020.V4-pls
v1
[reference]
rank 93
|
|
recordings from
50
sites in
V4
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.637 |
Sanghavi2020.V4-pls
v1
[reference]
rank 92
|
|
recordings from
47
sites in
V4
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.219 |
SanghaviMurty2020.V4-pls
v1
[reference]
rank 116
|
|
recordings from
46
sites in
V4
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.582 |
MajajHong2015.V4-pls
v3
[reference]
rank 119
|
|
recordings from
88
sites in
V4
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.356 |
IT
rank 75
7 benchmarks |
|
.223 |
Bracci2019.anteriorVTC-rdm
v1
rank 142
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.483 |
Coggan2024_fMRI.IT-rdm
v1
rank 56
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.326 |
SanghaviMurty2020.IT-pls
v1
[reference]
rank 258
|
|
recordings from
29
sites in
IT
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.502 |
Sanghavi2020.IT-pls
v1
[reference]
rank 241
|
|
recordings from
88
sites in
IT
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.446 |
SanghaviJozwik2020.IT-pls
v1
[reference]
rank 267
|
|
recordings from
26
sites in
IT
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.513 |
MajajHong2015.IT-pls
v3
[reference]
rank 189
|
|
recordings from
168
sites in
IT
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
X |
Kar2019-ost
v2
[reference]
rank X
|
|
recordings from
424
sites in
IT
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.557 |
behavior_vision
rank 3
43 benchmarks |
|
.561 |
Rajalingham2018-i2n
v2
[reference]
rank 34
|
|
match-to-sample task
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.626 |
Geirhos2021-error_consistency
[reference]
rank 11
17 benchmarks |
|
.787 |
Geirhos2021colour-error_consistency
v1
[reference]
rank 14
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.581 |
Geirhos2021contrast-error_consistency
v1
[reference]
rank 21
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.560 |
Geirhos2021cueconflict-error_consistency
v1
[reference]
rank 20
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.619 |
Geirhos2021edge-error_consistency
v1
[reference]
rank 18
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.667 |
Geirhos2021eidolonI-error_consistency
v1
[reference]
rank 11
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.664 |
Geirhos2021eidolonII-error_consistency
v1
[reference]
rank 9
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.641 |
Geirhos2021eidolonIII-error_consistency
v1
[reference]
rank 3
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.755 |
Geirhos2021falsecolour-error_consistency
v1
[reference]
rank 6
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.367 |
Geirhos2021highpass-error_consistency
v1
[reference]
rank 25
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.579 |
Geirhos2021lowpass-error_consistency
v1
[reference]
rank 15
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.462 |
Geirhos2021phasescrambling-error_consistency
v1
[reference]
rank 26
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.548 |
Geirhos2021powerequalisation-error_consistency
v1
[reference]
rank 23
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.405 |
Geirhos2021rotation-error_consistency
v1
[reference]
rank 30
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.908 |
Geirhos2021silhouette-error_consistency
v1
[reference]
rank 19
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.512 |
Geirhos2021sketch-error_consistency
v1
[reference]
rank 23
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.851 |
Geirhos2021stylized-error_consistency
v1
[reference]
rank 3
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.743 |
Geirhos2021uniformnoise-error_consistency
v1
[reference]
rank 3
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.871 |
Baker2022
rank 4
3 benchmarks |
|
.984 |
Baker2022fragmented-accuracy_delta
v1
[reference]
rank 2
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.929 |
Baker2022frankenstein-accuracy_delta
v1
[reference]
rank 5
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.698 |
Baker2022inverted-accuracy_delta
v1
[reference]
rank 25
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.375 |
Maniquet2024
rank 163
2 benchmarks |
|
.106 |
Maniquet2024-confusion_similarity
v1
[reference]
rank 176
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.644 |
Maniquet2024-tasks_consistency
v1
[reference]
rank 109
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.556 |
Ferguson2024
[reference]
rank 35
14 benchmarks |
|
.771 |
Ferguson2024half-value_delta
v1
[reference]
rank 49
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.298 |
Ferguson2024gray_hard-value_delta
v1
[reference]
rank 135
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
1.0 |
Ferguson2024lle-value_delta
v1
[reference]
rank 1
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.389 |
Ferguson2024juncture-value_delta
v1
[reference]
rank 50
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.719 |
Ferguson2024color-value_delta
v1
[reference]
rank 101
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.621 |
Ferguson2024round_v-value_delta
v1
[reference]
rank 75
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.361 |
Ferguson2024eighth-value_delta
v1
[reference]
rank 47
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.923 |
Ferguson2024quarter-value_delta
v1
[reference]
rank 23
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.132 |
Ferguson2024convergence-value_delta
v1
[reference]
rank 181
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.326 |
Ferguson2024round_f-value_delta
v1
[reference]
rank 104
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.709 |
Ferguson2024llh-value_delta
v1
[reference]
rank 70
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.573 |
Ferguson2024circle_line-value_delta
v1
[reference]
rank 46
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.403 |
Ferguson2024gray_easy-value_delta
v1
[reference]
rank 95
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.560 |
Ferguson2024tilted_line-value_delta
v1
[reference]
rank 119
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.278 |
Hebart2023-match
v1
rank 101
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.655 |
BMD2024
rank 8
4 benchmarks |
|
.770 |
BMD2024.dotted_1Behavioral-accuracy_distance
v1
rank 4
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.817 |
BMD2024.texture_1Behavioral-accuracy_distance
v1
rank 8
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.577 |
BMD2024.texture_2Behavioral-accuracy_distance
v1
rank 11
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.454 |
BMD2024.dotted_2Behavioral-accuracy_distance
v1
rank 8
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.536 |
Coggan2024_behavior-ConditionWiseAccuracySimilarity
v1
rank 35
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.486 |
engineering_vision
rank 28
25 benchmarks |
|
.822 |
ImageNet-top1
v1
[reference]
rank 18
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.499 |
ObjectNet-top1
v1
[reference]
rank 2
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.740 |
Geirhos2021-top1
[reference]
rank 16
17 benchmarks |
|
.994 |
Geirhos2021colour-top1
v1
[reference]
rank 16
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.985 |
Geirhos2021contrast-top1
v1
[reference]
rank 20
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.334 |
Geirhos2021cueconflict-top1
v1
[reference]
rank 32
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.775 |
Geirhos2021edge-top1
v1
[reference]
rank 17
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.560 |
Geirhos2021eidolonI-top1
v1
[reference]
rank 34
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.602 |
Geirhos2021eidolonII-top1
v1
[reference]
rank 23
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.633 |
Geirhos2021eidolonIII-top1
v1
[reference]
rank 31
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.988 |
Geirhos2021falsecolour-top1
v1
[reference]
rank 20
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.838 |
Geirhos2021highpass-top1
v1
[reference]
rank 17
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.556 |
Geirhos2021lowpass-top1
v1
[reference]
rank 26
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.752 |
Geirhos2021phasescrambling-top1
v1
[reference]
rank 38
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.900 |
Geirhos2021powerequalisation-top1
v1
[reference]
rank 31
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.895 |
Geirhos2021rotation-top1
v1
[reference]
rank 21
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.631 |
Geirhos2021silhouette-top1
v1
[reference]
rank 23
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.905 |
Geirhos2021sketch-top1
v1
[reference]
rank 13
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.540 |
Geirhos2021stylized-top1
v1
[reference]
rank 42
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.696 |
Geirhos2021uniformnoise-top1
v1
[reference]
rank 23
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.370 |
Hermann2020
[reference]
rank 36
2 benchmarks |
|
.299 |
Hermann2020cueconflict-shape_match
v1
[reference]
rank 32
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.442 |
Hermann2020cueconflict-shape_bias
v1
[reference]
rank 56
|
|
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
How to use
from brainscore_vision import load_model model = load_model("resnext101_32x48d_wsl") model.start_task(...) model.start_recording(...) model.look_at(...)
Benchmarks bibtex
@inproceedings{santurkar2019computer, title={Computer Vision with a Single (Robust) Classifier}, author={Shibani Santurkar and Dimitris Tsipras and Brandon Tran and Andrew Ilyas and Logan Engstrom and Aleksander Madry}, booktitle={ArXiv preprint arXiv:1906.09453}, year={2019} } @Article{Freeman2013, author={Freeman, Jeremy and Ziemba, Corey M. and Heeger, David J. and Simoncelli, Eero P. and Movshon, J. Anthony}, title={A functional and perceptual signature of the second visual area in primates}, journal={Nature Neuroscience}, year={2013}, month={Jul}, day={01}, volume={16}, number={7}, pages={974-981}, abstract={The authors examined neuronal responses in V1 and V2 to synthetic texture stimuli that replicate higher-order statistical dependencies found in natural images. V2, but not V1, responded differentially to these textures, in both macaque (single neurons) and human (fMRI). Human detection of naturalistic structure in the same images was predicted by V2 responses, suggesting a role for V2 in representing natural image structure.}, issn={1546-1726}, doi={10.1038/nn.3402}, url={https://doi.org/10.1038/nn.3402} } @misc{Sanghavi_Jozwik_DiCarlo_2021, title={SanghaviJozwik2020}, url={osf.io/fhy36}, DOI={10.17605/OSF.IO/FHY36}, publisher={OSF}, author={Sanghavi, Sachi and Jozwik, Kamila M and DiCarlo, James J}, year={2021}, month={Nov} } @misc{Sanghavi_DiCarlo_2021, title={Sanghavi2020}, url={osf.io/chwdk}, DOI={10.17605/OSF.IO/CHWDK}, publisher={OSF}, author={Sanghavi, Sachi and DiCarlo, James J}, year={2021}, month={Nov} } @misc{Sanghavi_Murty_DiCarlo_2021, title={SanghaviMurty2020}, url={osf.io/fchme}, DOI={10.17605/OSF.IO/FCHME}, publisher={OSF}, author={Sanghavi, Sachi and Murty, N A R and DiCarlo, James J}, year={2021}, month={Nov} } @article {Majaj13402, author = {Majaj, Najib J. and Hong, Ha and Solomon, Ethan A. and DiCarlo, James J.}, title = {Simple Learned Weighted Sums of Inferior Temporal Neuronal Firing Rates Accurately Predict Human Core Object Recognition Performance}, volume = {35}, number = {39}, pages = {13402--13418}, year = {2015}, doi = {10.1523/JNEUROSCI.5181-14.2015}, publisher = {Society for Neuroscience}, abstract = {To go beyond qualitative models of the biological substrate of object recognition, we ask: can a single ventral stream neuronal linking hypothesis quantitatively account for core object recognition performance over a broad range of tasks? We measured human performance in 64 object recognition tests using thousands of challenging images that explore shape similarity and identity preserving object variation. We then used multielectrode arrays to measure neuronal population responses to those same images in visual areas V4 and inferior temporal (IT) cortex of monkeys and simulated V1 population responses. We tested leading candidate linking hypotheses and control hypotheses, each postulating how ventral stream neuronal responses underlie object recognition behavior. Specifically, for each hypothesis, we computed the predicted performance on the 64 tests and compared it with the measured pattern of human performance. All tested hypotheses based on low- and mid-level visually evoked activity (pixels, V1, and V4) were very poor predictors of the human behavioral pattern. However, simple learned weighted sums of distributed average IT firing rates exactly predicted the behavioral pattern. More elaborate linking hypotheses relying on IT trial-by-trial correlational structure, finer IT temporal codes, or ones that strictly respect the known spatial substructures of IT ({ extquotedblleft}face patches{ extquotedblright}) did not improve predictive power. Although these results do not reject those more elaborate hypotheses, they suggest a simple, sufficient quantitative model: each object recognition task is learned from the spatially distributed mean firing rates (100 ms) of \~{}60,000 IT neurons and is executed as a simple weighted sum of those firing rates.SIGNIFICANCE STATEMENT We sought to go beyond qualitative models of visual object recognition and determine whether a single neuronal linking hypothesis can quantitatively account for core object recognition behavior. To achieve this, we designed a database of images for evaluating object recognition performance. We used multielectrode arrays to characterize hundreds of neurons in the visual ventral stream of nonhuman primates and measured the object recognition performance of \>100 human observers. Remarkably, we found that simple learned weighted sums of firing rates of neurons in monkey inferior temporal (IT) cortex accurately predicted human performance. Although previous work led us to expect that IT would outperform V4, we were surprised by the quantitative precision with which simple IT-based linking hypotheses accounted for human behavior.}, issn = {0270-6474}, URL = {https://www.jneurosci.org/content/35/39/13402}, eprint = {https://www.jneurosci.org/content/35/39/13402.full.pdf}, journal = {Journal of Neuroscience}} @Article{Kar2019, author={Kar, Kohitij and Kubilius, Jonas and Schmidt, Kailyn and Issa, Elias B. and DiCarlo, James J.}, title={Evidence that recurrent circuits are critical to the ventral stream's execution of core object recognition behavior}, journal={Nature Neuroscience}, year={2019}, month={Jun}, day={01}, volume={22}, number={6}, pages={974-983}, abstract={Non-recurrent deep convolutional neural networks (CNNs) are currently the best at modeling core object recognition, a behavior that is supported by the densely recurrent primate ventral stream, culminating in the inferior temporal (IT) cortex. If recurrence is critical to this behavior, then primates should outperform feedforward-only deep CNNs for images that require additional recurrent processing beyond the feedforward IT response. Here we first used behavioral methods to discover hundreds of these `challenge' images. Second, using large-scale electrophysiology, we observed that behaviorally sufficient object identity solutions emerged { extasciitilde}30{ hinspace}ms later in the IT cortex for challenge images compared with primate performance-matched `control' images. Third, these behaviorally critical late-phase IT response patterns were poorly predicted by feedforward deep CNN activations. Notably, very-deep CNNs and shallower recurrent CNNs better predicted these late IT responses, suggesting that there is a functional equivalence between additional nonlinear transformations and recurrence. Beyond arguing that recurrent circuits are critical for rapid object identification, our results provide strong constraints for future recurrent model development.}, issn={1546-1726}, doi={10.1038/s41593-019-0392-5}, url={https://doi.org/10.1038/s41593-019-0392-5} } @article {Rajalingham240614, author = {Rajalingham, Rishi and Issa, Elias B. and Bashivan, Pouya and Kar, Kohitij and Schmidt, Kailyn and DiCarlo, James J.}, title = {Large-scale, high-resolution comparison of the core visual object recognition behavior of humans, monkeys, and state-of-the-art deep artificial neural networks}, elocation-id = {240614}, year = {2018}, doi = {10.1101/240614}, publisher = {Cold Spring Harbor Laboratory}, abstract = {Primates{ extemdash}including humans{ extemdash}can typically recognize objects in visual images at a glance even in the face of naturally occurring identity-preserving image transformations (e.g. changes in viewpoint). A primary neuroscience goal is to uncover neuron-level mechanistic models that quantitatively explain this behavior by predicting primate performance for each and every image. Here, we applied this stringent behavioral prediction test to the leading mechanistic models of primate vision (specifically, deep, convolutional, artificial neural networks; ANNs) by directly comparing their behavioral signatures against those of humans and rhesus macaque monkeys. Using high-throughput data collection systems for human and monkey psychophysics, we collected over one million behavioral trials for 2400 images over 276 binary object discrimination tasks. Consistent with previous work, we observed that state-of-the-art deep, feed-forward convolutional ANNs trained for visual categorization (termed DCNNIC models) accurately predicted primate patterns of object-level confusion. However, when we examined behavioral performance for individual images within each object discrimination task, we found that all tested DCNNIC models were significantly non-predictive of primate performance, and that this prediction failure was not accounted for by simple image attributes, nor rescued by simple model modifications. These results show that current DCNNIC models cannot account for the image-level behavioral patterns of primates, and that new ANN models are needed to more precisely capture the neural mechanisms underlying primate object vision. To this end, large-scale, high-resolution primate behavioral benchmarks{ extemdash}such as those obtained here{ extemdash}could serve as direct guides for discovering such models.SIGNIFICANCE STATEMENT Recently, specific feed-forward deep convolutional artificial neural networks (ANNs) models have dramatically advanced our quantitative understanding of the neural mechanisms underlying primate core object recognition. In this work, we tested the limits of those ANNs by systematically comparing the behavioral responses of these models with the behavioral responses of humans and monkeys, at the resolution of individual images. Using these high-resolution metrics, we found that all tested ANN models significantly diverged from primate behavior. Going forward, these high-resolution, large-scale primate behavioral benchmarks could serve as direct guides for discovering better ANN models of the primate visual system.}, URL = {https://www.biorxiv.org/content/early/2018/02/12/240614}, eprint = {https://www.biorxiv.org/content/early/2018/02/12/240614.full.pdf}, journal = {bioRxiv} } @article{geirhos2021partial, title={Partial success in closing the gap between human and machine vision}, author={Geirhos, Robert and Narayanappa, Kantharaju and Mitzkus, Benjamin and Thieringer, Tizian and Bethge, Matthias and Wichmann, Felix A and Brendel, Wieland}, journal={Advances in Neural Information Processing Systems}, volume={34}, year={2021}, url={https://openreview.net/forum?id=QkljT4mrfs} } @article{BAKER2022104913, title = {Deep learning models fail to capture the configural nature of human shape perception}, journal = {iScience}, volume = {25}, number = {9}, pages = {104913}, year = {2022}, issn = {2589-0042}, doi = {https://doi.org/10.1016/j.isci.2022.104913}, url = {https://www.sciencedirect.com/science/article/pii/S2589004222011853}, author = {Nicholas Baker and James H. Elder}, keywords = {Biological sciences, Neuroscience, Sensory neuroscience}, abstract = {Summary A hallmark of human object perception is sensitivity to the holistic configuration of the local shape features of an object. Deep convolutional neural networks (DCNNs) are currently the dominant models for object recognition processing in the visual cortex, but do they capture this configural sensitivity? To answer this question, we employed a dataset of animal silhouettes and created a variant of this dataset that disrupts the configuration of each object while preserving local features. While human performance was impacted by this manipulation, DCNN performance was not, indicating insensitivity to object configuration. Modifications to training and architecture to make networks more brain-like did not lead to configural processing, and none of the networks were able to accurately predict trial-by-trial human object judgements. We speculate that to match human configural sensitivity, networks must be trained to solve a broader range of object tasks beyond category recognition.} } @article {Maniquet2024.04.02.587669, author = {Maniquet, Tim and de Beeck, Hans Op and Costantino, Andrea Ivan}, title = {Recurrent issues with deep neural network models of visual recognition}, elocation-id = {2024.04.02.587669}, year = {2024}, doi = {10.1101/2024.04.02.587669}, publisher = {Cold Spring Harbor Laboratory}, URL = {https://www.biorxiv.org/content/early/2024/04/10/2024.04.02.587669}, eprint = {https://www.biorxiv.org/content/early/2024/04/10/2024.04.02.587669.full.pdf}, journal = {bioRxiv} } @misc{ferguson_ngo_lee_dicarlo_schrimpf_2024, title={How Well is Visual Search Asymmetry predicted by a Binary-Choice, Rapid, Accuracy-based Visual-search, Oddball-detection (BRAVO) task?}, url={osf.io/5ba3n}, DOI={10.17605/OSF.IO/5BA3N}, publisher={OSF}, author={Ferguson, Michael E, Jr and Ngo, Jerry and Lee, Michael and DiCarlo, James and Schrimpf, Martin}, year={2024}, month={Jun} } @INPROCEEDINGS{5206848, author={J. {Deng} and W. {Dong} and R. {Socher} and L. {Li} and {Kai Li} and {Li Fei-Fei}}, booktitle={2009 IEEE Conference on Computer Vision and Pattern Recognition}, title={ImageNet: A large-scale hierarchical image database}, year={2009}, volume={}, number={}, pages={248-255}, } @inproceedings{DBLP:conf/nips/BarbuMALWGTK19, author = {Andrei Barbu and David Mayo and Julian Alverio and William Luo and Christopher Wang and Dan Gutfreund and Josh Tenenbaum and Boris Katz}, title = {ObjectNet: {A} large-scale bias-controlled dataset for pushing the limits of object recognition models}, booktitle = {NeurIPS 2019}, pages = {9448--9458}, year = {2019}, url = {https://proceedings.neurips.cc/paper/2019/hash/97af07a14cacba681feacf3012730892-Abstract.html}, } @article{hermann2020origins, title={The origins and prevalence of texture bias in convolutional neural networks}, author={Hermann, Katherine and Chen, Ting and Kornblith, Simon}, journal={Advances in Neural Information Processing Systems}, volume={33}, pages={19000--19015}, year={2020}, url={https://proceedings.neurips.cc/paper/2020/hash/db5f9f42a7157abe65bb145000b5871a-Abstract.html} }