resnet18-deepcluster
Scores on benchmarks
Model rank shown below is with respect to all public models..106 |
average_vision
rank 383
81 benchmarks |
.106
0
ceiling
best
median
|
.211 |
neural_vision
rank 352
38 benchmarks |
.211
0
ceiling
best
median
|
.086 |
V1
rank 387
24 benchmarks |
.086
0
ceiling
best
median
|
.258 |
FreemanZiemba2013.V1-pls
v2
[reference]
rank 239
|
.258
0
ceiling
best
median
|
recordings from
102
sites in
V1
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.153 |
V2
rank 248
2 benchmarks |
.153
0
ceiling
best
median
|
.306 |
FreemanZiemba2013.V2-pls
v2
[reference]
rank 173
|
.306
0
ceiling
best
median
|
recordings from
103
sites in
V2
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.349 |
V4
rank 299
5 benchmarks |
.349
0
ceiling
best
median
|
.405 |
SanghaviJozwik2020.V4-pls
v1
[reference]
rank 322
|
.405
0
ceiling
best
median
|
recordings from
50
sites in
V4
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.587 |
Sanghavi2020.V4-pls
v1
[reference]
rank 288
|
.587
0
ceiling
best
median
|
recordings from
47
sites in
V4
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.208 |
SanghaviMurty2020.V4-pls
v1
[reference]
rank 162
|
.208
0
ceiling
best
median
|
recordings from
46
sites in
V4
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.545 |
MajajHong2015.V4-pls
v3
[reference]
rank 315
|
.545
0
ceiling
best
median
|
recordings from
88
sites in
V4
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.257 |
IT
rank 278
7 benchmarks |
.257
0
ceiling
best
median
|
.320 |
SanghaviMurty2020.IT-pls
v1
[reference]
rank 265
|
.320
0
ceiling
best
median
|
recordings from
29
sites in
IT
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.509 |
Sanghavi2020.IT-pls
v1
[reference]
rank 215
|
.509
0
ceiling
best
median
|
recordings from
88
sites in
IT
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.468 |
SanghaviJozwik2020.IT-pls
v1
[reference]
rank 236
|
.468
0
ceiling
best
median
|
recordings from
26
sites in
IT
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
.506 |
MajajHong2015.IT-pls
v3
[reference]
rank 221
|
.506
0
ceiling
best
median
|
recordings from
168
sites in
IT
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
X |
Kar2019-ost
v2
[reference]
rank X
|
X
0
ceiling
best
median
|
recordings from
424
sites in
IT
![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() ![]() |
How to use
from brainscore_vision import load_model model = load_model("resnet18-deepcluster") model.start_task(...) model.start_recording(...) model.look_at(...)
Benchmarks bibtex
@Article{Freeman2013, author={Freeman, Jeremy and Ziemba, Corey M. and Heeger, David J. and Simoncelli, Eero P. and Movshon, J. Anthony}, title={A functional and perceptual signature of the second visual area in primates}, journal={Nature Neuroscience}, year={2013}, month={Jul}, day={01}, volume={16}, number={7}, pages={974-981}, abstract={The authors examined neuronal responses in V1 and V2 to synthetic texture stimuli that replicate higher-order statistical dependencies found in natural images. V2, but not V1, responded differentially to these textures, in both macaque (single neurons) and human (fMRI). Human detection of naturalistic structure in the same images was predicted by V2 responses, suggesting a role for V2 in representing natural image structure.}, issn={1546-1726}, doi={10.1038/nn.3402}, url={https://doi.org/10.1038/nn.3402} } @misc{Sanghavi_Jozwik_DiCarlo_2021, title={SanghaviJozwik2020}, url={osf.io/fhy36}, DOI={10.17605/OSF.IO/FHY36}, publisher={OSF}, author={Sanghavi, Sachi and Jozwik, Kamila M and DiCarlo, James J}, year={2021}, month={Nov} } @misc{Sanghavi_DiCarlo_2021, title={Sanghavi2020}, url={osf.io/chwdk}, DOI={10.17605/OSF.IO/CHWDK}, publisher={OSF}, author={Sanghavi, Sachi and DiCarlo, James J}, year={2021}, month={Nov} } @misc{Sanghavi_Murty_DiCarlo_2021, title={SanghaviMurty2020}, url={osf.io/fchme}, DOI={10.17605/OSF.IO/FCHME}, publisher={OSF}, author={Sanghavi, Sachi and Murty, N A R and DiCarlo, James J}, year={2021}, month={Nov} } @article {Majaj13402, author = {Majaj, Najib J. and Hong, Ha and Solomon, Ethan A. and DiCarlo, James J.}, title = {Simple Learned Weighted Sums of Inferior Temporal Neuronal Firing Rates Accurately Predict Human Core Object Recognition Performance}, volume = {35}, number = {39}, pages = {13402--13418}, year = {2015}, doi = {10.1523/JNEUROSCI.5181-14.2015}, publisher = {Society for Neuroscience}, abstract = {To go beyond qualitative models of the biological substrate of object recognition, we ask: can a single ventral stream neuronal linking hypothesis quantitatively account for core object recognition performance over a broad range of tasks? We measured human performance in 64 object recognition tests using thousands of challenging images that explore shape similarity and identity preserving object variation. We then used multielectrode arrays to measure neuronal population responses to those same images in visual areas V4 and inferior temporal (IT) cortex of monkeys and simulated V1 population responses. We tested leading candidate linking hypotheses and control hypotheses, each postulating how ventral stream neuronal responses underlie object recognition behavior. Specifically, for each hypothesis, we computed the predicted performance on the 64 tests and compared it with the measured pattern of human performance. All tested hypotheses based on low- and mid-level visually evoked activity (pixels, V1, and V4) were very poor predictors of the human behavioral pattern. However, simple learned weighted sums of distributed average IT firing rates exactly predicted the behavioral pattern. More elaborate linking hypotheses relying on IT trial-by-trial correlational structure, finer IT temporal codes, or ones that strictly respect the known spatial substructures of IT ({ extquotedblleft}face patches{ extquotedblright}) did not improve predictive power. Although these results do not reject those more elaborate hypotheses, they suggest a simple, sufficient quantitative model: each object recognition task is learned from the spatially distributed mean firing rates (100 ms) of \~{}60,000 IT neurons and is executed as a simple weighted sum of those firing rates.SIGNIFICANCE STATEMENT We sought to go beyond qualitative models of visual object recognition and determine whether a single neuronal linking hypothesis can quantitatively account for core object recognition behavior. To achieve this, we designed a database of images for evaluating object recognition performance. We used multielectrode arrays to characterize hundreds of neurons in the visual ventral stream of nonhuman primates and measured the object recognition performance of \>100 human observers. Remarkably, we found that simple learned weighted sums of firing rates of neurons in monkey inferior temporal (IT) cortex accurately predicted human performance. Although previous work led us to expect that IT would outperform V4, we were surprised by the quantitative precision with which simple IT-based linking hypotheses accounted for human behavior.}, issn = {0270-6474}, URL = {https://www.jneurosci.org/content/35/39/13402}, eprint = {https://www.jneurosci.org/content/35/39/13402.full.pdf}, journal = {Journal of Neuroscience}} @Article{Kar2019, author={Kar, Kohitij and Kubilius, Jonas and Schmidt, Kailyn and Issa, Elias B. and DiCarlo, James J.}, title={Evidence that recurrent circuits are critical to the ventral stream's execution of core object recognition behavior}, journal={Nature Neuroscience}, year={2019}, month={Jun}, day={01}, volume={22}, number={6}, pages={974-983}, abstract={Non-recurrent deep convolutional neural networks (CNNs) are currently the best at modeling core object recognition, a behavior that is supported by the densely recurrent primate ventral stream, culminating in the inferior temporal (IT) cortex. If recurrence is critical to this behavior, then primates should outperform feedforward-only deep CNNs for images that require additional recurrent processing beyond the feedforward IT response. Here we first used behavioral methods to discover hundreds of these `challenge' images. Second, using large-scale electrophysiology, we observed that behaviorally sufficient object identity solutions emerged { extasciitilde}30{ hinspace}ms later in the IT cortex for challenge images compared with primate performance-matched `control' images. Third, these behaviorally critical late-phase IT response patterns were poorly predicted by feedforward deep CNN activations. Notably, very-deep CNNs and shallower recurrent CNNs better predicted these late IT responses, suggesting that there is a functional equivalence between additional nonlinear transformations and recurrence. Beyond arguing that recurrent circuits are critical for rapid object identification, our results provide strong constraints for future recurrent model development.}, issn={1546-1726}, doi={10.1038/s41593-019-0392-5}, url={https://doi.org/10.1038/s41593-019-0392-5} }