-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpolychronies.bib
6939 lines (6501 loc) · 616 KB
/
polychronies.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
@article{kreuz_measuring_2007,
title = {Measuring spike train synchrony},
volume = {165},
issn = {0165-0270},
doi = {10.1016/j.jneumeth.2007.05.031},
abstract = {Estimating the degree of synchrony or reliability between two or more spike trains is a frequent task in both experimental and computational neuroscience. In recent years, many different methods have been proposed that typically compare the timing of spikes on a certain time scale to be optimized by the analyst. Here, we propose the ISI-distance, a simple complementary approach that extracts information from the interspike intervals by evaluating the ratio of the instantaneous firing rates. The method is parameter free, time scale independent and easy to visualize as illustrated by an application to real neuronal spike trains obtained in vitro from rat slices. In a comparison with existing approaches on spike trains extracted from a simulated Hindemarsh-Rose network, the ISI-distance performs as well as the best time-scale-optimized measure based on spike timing.},
language = {eng},
number = {1},
journal = {Journal of Neuroscience Methods},
author = {Kreuz, Thomas and Haas, Julie S. and Morelli, Alice and Abarbanel, Henry D. I. and Politi, Antonio},
month = sep,
year = {2007},
pmid = {17628690},
keywords = {Action Potentials, Animals, Electrophysiology, Neurons, Rats, ⛔ No INSPIRE recid found},
pages = {151--161},
}
@article{chicharro_what_2011,
title = {What can spike train distances tell us about the neural code?},
volume = {199},
doi = {10/dsjcrr},
number = {1},
journal = {Journal of neuroscience methods},
author = {Chicharro, Daniel and Kreuz, Thomas and Andrzejak, Ralph G},
month = jul,
year = {2011},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
pages = {146--165},
}
@article{kreuz_monitoring_2013,
title = {Monitoring spike train synchrony},
volume = {109},
doi = {10/f4r83w},
number = {5},
journal = {Journal of Neurophysiology},
author = {Kreuz, T and Chicharro, D and Houghton, C and Andrzejak, R G and Mormann, F},
month = mar,
year = {2013},
keywords = {⛔ No INSPIRE recid found},
pages = {1457--1472},
}
@article{Kreuz:2016qgo,
title = {Leaders and followers: quantifying consistency in spatio-temporal propagation patterns},
volume = {19},
issn = {1367-2630},
shorttitle = {Leaders and followers},
url = {https://doi.org/10.1088%2F1367-2630%2Faa68c3},
doi = {10.1088/1367-2630/aa68c3},
abstract = {Repetitive spatio-temporal propagation patterns are encountered in fields as wide-ranging as climatology, social communication and network science. In neuroscience, perfectly consistent repetitions of the same global propagation pattern are called a synfire pattern. For any recording of sequences of discrete events (in neuroscience terminology: sets of spike trains) the questions arise how closely it resembles such a synfire pattern and which are the spike trains that lead/follow. Here we address these questions and introduce an algorithm built on two new indicators, termed SPIKE-Order and Spike Train Order, that define the Synfire Indicator value, which allows to sort multiple spike trains from leader to follower and to quantify the consistency of the temporal leader-follower relationships for both the original and the optimized sorting. We demonstrate our new approach using artificially generated datasets before we apply it to analyze the consistency of propagation patterns in two real datasets from neuroscience (Giant Depolarized Potentials in mice slices) and climatology (El Ni{\textasciitilde}no sea surface temperature recordings). The new algorithm is distinguished by conceptual and practical simplicity, low computational cost, as well as flexibility and universality.},
language = {en},
number = {4},
urldate = {2019-03-18},
journal = {New Journal of Physics},
author = {Kreuz, Thomas and Satuvuori, Eero and Pofahl, Martin and Mulansky, Mario},
month = apr,
year = {2017},
note = {2 citations (INSPIRE 2023/7/20)
0 citations w/o self (INSPIRE 2023/7/20)
arXiv:1610.07986 [physics.data-an]},
pages = {043028},
}
@article{grimaldi_robust_2023,
title = {A robust event-driven approach to always-on object recognition},
doi = {https://laurentperrinet.github.io/publication/grimaldi-23/},
abstract = {We propose a neuromimetic architecture able to perform always-on pattern recognition. To achieve this, we extended an existing event-based algorithm [1], which introduced novel spatio-temporal features as a Hierarchy Of Time-Surfaces (HOTS). Built from asynchronous events acquired by a neuromorphic camera, these time surfaces allow to code the local dynamics of a visual scene and to create an efficient event-based pattern recognition architecture. Inspired by neuroscience, we extended this method to increase its performance. Our first contribution was to add a homeostatic gain control on the activity of neurons to improve the learning of spatio-temporal patterns [2]. A second contribution is to draw an analogy between the HOTS algorithm and Spiking Neural Networks (SNN). Following that analogy, our last contribution is to modify the classification layer and remodel the offline pattern categorization method previously used into an online and event-driven one. This classifier uses the spiking output of the network to define novel time surfaces and we then perform online classification with a neuromimetic implementation of a multinomial logistic regression. Not only do these improvements increase consistently the performances of the network, they also make this event-driven pattern recognition algorithm online and bio-realistic. Results were validated on different datasets: DVS barrel [3], Poker-DVS [4] and N-MNIST [5]. We foresee to develop the SNN version of the method and to extend this fully event-driven approach to more naturalistic tasks, notably for always-on, ultra-fast object categorization.},
urldate = {2022-01-13},
journal = {Neural Networks},
author = {Grimaldi, Antoine and Boutin, Victor and Ieng, Sio-Hoi and Benosman, Ryad and Perrinet, Laurent U},
year = {2023},
keywords = {efficient coding, event-based vision, homeostasis, neuromorphic hardware, online classification},
}
@article{grimaldi_learning_2023,
title = {Learning heterogeneous delays in a layer of spiking neurons for fast motion detection},
copyright = {Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC-BY-NC-SA)},
url = {https://laurentperrinet.github.io/publication/grimaldi-23-bc/},
abstract = {The response of a biological neuron depends on the precise timing of afferent spikes. This temporal aspect of the neuronal code is essential in understanding information processing in neurobiology and applies particularly well to the output of neuromorphic hardware such as event-based cameras. Ho...},
language = {en},
urldate = {2022-11-06},
journal = {Biological Cybernetics},
author = {Grimaldi, Antoine and Perrinet, Laurent U},
month = jun,
year = {2023},
keywords = {⛔ No INSPIRE recid found},
}
@inproceedings{grimaldi_event-based_2021,
title = {From event-based computations to a bio-plausible {Spiking} {Neural} {Network}},
copyright = {All rights reserved},
url = {https://symposium.fchampalimaud.science/Poster-sessions},
abstract = {We propose a neuromimetic online classifier for always-on digit recognition. To achieve this, we extend an existing event-based algorithm which introduced novel spatio-temporal features: time surfaces. Built from asynchronous events acquired by a neuromorphic camera, these time surfaces allow to code the local dynamics of a visual scene and create an efficient hierarchical event-based pattern recognition architecture. Its formalism was previously adapted in the computational neuroscience domain by showing it may be implemented using a Spiking Neural Network (SNN) of leaky integrate-and-fire models and Hebbian learning. Here, we add an online classification layer using a multinomial logistic regression which is compatible with a neural implementation. A decision can be taken at any arbitrary time by taking the of the probability values associated to each class. We extend the parallel with computational neuroscience by demonstrating that this classification layer is also equivalent to a layer of spiking neurons with a Hebbian-like learning mechanism. Our method obtains state-of-the-art performances on the N-MNIST dataset and we show that it is robust to both spatial and temporal jitter. As a summary, we were able to develop a neuromimetic SNN model for online digit classification. We aim at pursuing the study of this architecture for natural scenes and hope to offer insights on the efficiency of neural computations, and in particular how mechanisms of decision-making may be formed.},
booktitle = {Champalimaud research symposium ({CRS21})},
author = {Grimaldi, Antoine and Boutin, Victor and Ieng, Sio-Hoi and Benosman, Ryad and Perrinet, Laurent U},
month = oct,
year = {2021},
note = {tex.grants: aprovis3D,anr-anb},
keywords = {\#nosource, efficient coding, event-based vision, homeostasis, neuromorphic hardware, online classification, ⛔ No INSPIRE recid found},
}
@inproceedings{grimaldi_robust_2021,
title = {A robust bio-inspired approach to event-driven object recognition},
copyright = {All rights reserved},
url = {https://laurentperrinet.github.io/publication/grimaldi-21-cosyne/},
abstract = {We propose a neuromimetic architecture able to perform online pattern recognition. To achieve this, we extended the existing event-based algorithm from Lagorce et al (2017) which introduced novel spatio-temporal features: time-surfaces. Built from asynchronous events acquired by a neuromorphic camera, these time surfaces allow to code the local dynamics of a visual scene and to create an efficient hierarchical event-based pattern recognition architecture. Inspired by biological findings and the efficient coding hypothesis, our main contribution is to integrate homeostatic regulation to the Hebbian learning rule. Indeed, in order to be optimally informative, average neural activity within a layer should be equally balanced across neurons. We used that principle to regularize neurons within the same layer by setting a gain depending on their past activity and such that they emit spikes with balanced firing rates. The efficiency of this technique was first demonstrated through a robust improvement in spatio-temporal patterns which were learned during the training phase. We validated classification performance with the widely used N-MNIST dataset reaching 87.3\% accuracy with homeostasis compared to 72.5\% accuracy without homeostasis. Finally, by studying the impact of input jitter on classification highlights resilience of this method. We expect to extend this fully event-driven approach to more naturalistic tasks, notably for ultra-fast object categorization.},
booktitle = {Computational and systems neuroscience (cosyne) 2021},
author = {Grimaldi, Antoine and Boutin, Victor and Ieng, Sio-Hoi and Perrinet, Laurent U and Benosman, Ryad},
month = feb,
year = {2021},
keywords = {\#nosource, efficient coding, event-based vision, homeostasis, neuromorphic hardware, online classification, ⛔ No INSPIRE recid found},
}
@article{grimaldi_precise_2023,
title = {Precise {Spiking} {Motifs} in {Neurobiological} and {Neuromorphic} {Data}},
volume = {13},
copyright = {http://creativecommons.org/licenses/by/3.0/},
issn = {2076-3425},
url = {https://www.mdpi.com/2076-3425/13/1/68},
doi = {10.3390/brainsci13010068},
abstract = {Why do neurons communicate through spikes? By definition, spikes are all-or-none neural events which occur at continuous times. In other words, spikes are on one side binary, existing or not without further details, and on the other, can occur at any asynchronous time, without the need for a centralized clock. This stands in stark contrast to the analog representation of values and the discretized timing classically used in digital processing and at the base of modern-day neural networks. As neural systems almost systematically use this so-called event-based representation in the living world, a better understanding of this phenomenon remains a fundamental challenge in neurobiology in order to better interpret the profusion of recorded data. With the growing need for intelligent embedded systems, it also emerges as a new computing paradigm to enable the efficient operation of a new class of sensors and event-based computers, called neuromorphic, which could enable significant gains in computation time and energy consumption—a major societal issue in the era of the digital economy and global warming. In this review paper, we provide evidence from biology, theory and engineering that the precise timing of spikes plays a crucial role in our understanding of the efficiency of neural networks.},
language = {en},
number = {1},
urldate = {2023-01-31},
journal = {Brain Sciences},
author = {Grimaldi, Antoine and Gruel, Amélie and Besnainou, Camille and Jérémie, Jean-Nicolas and Martinet, Jean and Perrinet, Laurent U.},
month = jan,
year = {2023},
note = {Number: 1
Publisher: Multidisciplinary Digital Publishing Institute},
keywords = {asynchronous computing, computational neuroscience, heterogeneous delays, neurobiology, neuromorphic engineering, polychronization, spikes, spiking motifs, ⛔ No INSPIRE recid found},
pages = {68},
}
@article{vacher_bayesian_2018,
title = {Bayesian modeling of motion perception using dynamical stochastic textures},
copyright = {All rights reserved},
url = {https://www.mitpressjournals.org/doi/abs/10.1162/necoₐ₀1142},
doi = {10.1162/neco_a_01142},
abstract = {A common practice to account for psychophysical biases in vision is to frame them as consequences of a dynamic process relying on optimal inference with respect to a generative model. The present study details the complete formulation of such a gen-erative model intended to probe visual motion perception. It is first derived in a set of axiomatic steps constrained by biological plausibility. We then extend previous con-tributions by detailing three equivalent formulations of the Gaussian dynamic texture model. First, the composite dynamic textures are constructed by the random aggrega-tion of warped patterns, which can be viewed as 3D Gaussian fields. Second, these textures are cast as solutions to a stochastic partial differential equation (sPDE). This essential step enables real time, on-the-fly, texture synthesis using time-discretized auto-regressive processes. It also allows for the derivation of a local motion-energy model, which corresponds to the log-likelihood of the probability density. The log-likelihoods are finally essential for the construction of a Bayesian inference framework. We use the model to probe speed perception in humans psychophysically using zoom-like changes in stimulus spatial frequency content. The likelihood is contained within the genera-tive model and we chose a slow speed prior consistent with previous literature. We then validated the fitting process of the model using synthesized data. The human data replicates previous findings that relative perceived speed is positively biased by spatial frequency increments. The effect cannot be fully accounted for by previous models, but the current prior acting on the spatio-temporal likelihoods has proved necessary in accounting for the perceptual bias.},
journal = {Neural Computation},
author = {Vacher, Jonathan and Meso, Andrew Isaac and Perrinet, Laurent U and Peyré, Gabriel},
month = nov,
year = {2018},
note = {tex.ids= Vacher18
tex.bdsk-url-2: https://doi.org/10.1162/necoₐ₀1142
tex.date-modified: 2019-11-12 13:43:38 +0100
tex.grants: anr-speed
tex.preprint: https://arxiv.org/abs/1611.01390
tex.url: https://www.mitpressjournals.org/doi/abs/10.1162/neco\_a\_01142
publisher: MIT Press},
keywords = {\#nosource, Bayesian model, Psychophysics, motion detection, motion-clouds, ⛔ No INSPIRE recid found},
}
@inproceedings{mansour_pour_speed_2018,
title = {Speed uncertainty and motion perception with naturalistic random textures},
copyright = {All rights reserved},
url = {https://laurentperrinet.github.io/publication/mansour-18-vss},
doi = {10.1167/18.10.345},
abstract = {It is still not fully understood how visual system integrates motion energy across different spatial and temporal frequencies to build a coherent percept of the global motion under the complex, noisy naturalistic conditions. We addressed this question by manipulating local speed variability distribution (i. e. speed bandwidth) using a well-controlled class of broadband random-texture stimuli called Motion Clouds (MCs) with continuous naturalistic spatiotemporal frequency spectra (Sanz-Leon et al., 2012, ; Simoncini et al., 2012). In a first 2AFC experiment on speed discrimination, participants had to compare the speed of a broad speed bandwidth MC (range: 0.05-8 $^{\textrm{∘}}$/s) moving at 1 of 5 possible mean speeds (ranging from 5 to 13 $^{\textrm{∘}}$/s) to that of another MC with a small speed bandwidth (SD: 0.05 $^{\textrm{∘}}$/s), always moving at a mean speed of 10 $^{\textrm{∘}}$/s . We found that MCs with larger speed bandwidth (between 0.05-0.5 $^{\textrm{∘}}$/s) were perceived moving faster. Within this range, speed uncertainty results in over-estimating stimulus velocity. However, beyond a critical bandwidth (SD: 0.5 $^{\textrm{∘}}$/s), perception of a coherent speed was lost. In a second 2AFC experiment on direction discrimination, participants had to estimate the motion direction of moving MCs with different speed bandwidths. We found that for large band MCs participant could no longer discriminate motion direction. These results suggest that when increasing speed bandwidth from small to large range, the observer experiences different perceptual regimes. We then decided to run a Maximum Likelihood Difference Scaling (Knoblauch \& Maloney, 2008) experiment with our speed bandwidth stimuli to investigate these different possible perceptual regimes. We identified three regimes within this space that correspond to motion coherency, motion transparency and motion incoherency. These results allow to further characterize the shape of the interactions kernel observed between different speed tuned channels and different spatiotemporal scales (Gekas et al ., 2017) that underlies global velocity estimation.},
booktitle = {Journal of {Vision}, {Vol}.18, 345, proceedings of {VSS}},
author = {Mansour Pour, Kiana and Gekas, Nikos and Mamassian, Pascal and Perrinet, Laurent U and Montagnini, Anna and Masson, Guillaume S},
year = {2018},
note = {00000
tex.bdsk-url-2: https://doi.org/10.1167/18.10.345
tex.date-added: 2019-02-25 23:39:35 +0100
tex.date-modified: 2019-07-23 11:33:25 +0200
tex.number: 26.472},
keywords = {\#nosource, motion detection, ⛔ No INSPIRE recid found},
}
@article{masquelier_competitive_2009,
title = {Competitive {STDP}-{Based} {Spike} {Pattern} {Learning}},
volume = {21},
issn = {0899-7667, 1530-888X},
url = {http://www.mitpressjournals.org/doi/10.1162/neco.2008.06-08-804},
doi = {10.1162/neco.2008.06-08-804},
language = {en},
number = {5},
urldate = {2018-09-10},
journal = {Neural Computation},
author = {Masquelier, Timothée and Guyonneau, Rudy and Thorpe, Simon J.},
month = may,
year = {2009},
note = {00203},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
pages = {1259--1276},
}
@article{masquelier_unsupervised_2007,
title = {Unsupervised {Learning} of {Visual} {Features} through {Spike} {Timing} {Dependent} {Plasticity}},
volume = {3},
issn = {1553-7358},
url = {https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.0030031},
doi = {10.1371/journal.pcbi.0030031},
abstract = {Spike timing dependent plasticity (STDP) is a learning rule that modifies synaptic strength as a function of the relative timing of pre- and postsynaptic spikes. When a neuron is repeatedly presented with similar inputs, STDP is known to have the effect of concentrating high synaptic weights on afferents that systematically fire early, while postsynaptic spike latencies decrease. Here we use this learning rule in an asynchronous feedforward spiking neural network that mimics the ventral visual pathway and shows that when the network is presented with natural images, selectivity to intermediate-complexity visual features emerges. Those features, which correspond to prototypical patterns that are both salient and consistently present in the images, are highly informative and enable robust object recognition, as demonstrated on various classification tasks. Taken together, these results show that temporal codes may be a key to understanding the phenomenal processing speed achieved by the visual system and that STDP can lead to fast and selective responses.},
language = {en},
number = {2},
urldate = {2018-09-10},
journal = {PLOS Computational Biology},
author = {Masquelier, Timothée and Thorpe, Simon J.},
month = feb,
year = {2007},
note = {00314},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
pages = {e31},
}
@article{olshausen_emergence_1996,
title = {Emergence of simple-cell receptive field properties by learning a sparse code for natural images.},
volume = {381},
issn = {0028-0836},
url = {http://dx.doi.org/10.1038/381607a0 http://www.ncbi.nlm.nih.gov/htbin-post/Entrez/query?db=m&form=6&dopt=r&uid=8637596 http://www.ncbi.nlm.nih.gov/pubmed/8637596 http://www.nature.com/doifinder/10.1038/381607a0},
doi = {10.1038/381607a0},
abstract = {The receptive fields of simple cells in mammalian primary visual cortex can be characterized as being spatially localized, oriented and bandpass (selective to structure at different spatial scales), comparable to the basis functions of wavelet transforms. One approach to understanding such response properties of visual neurons has been to consider their relationship to the statistical structure of natural images in terms of efficient coding. Along these lines, a number of studies have attempted to train unsupervised learning algorithms on natural images in the hope of developing receptive fields with similar properties, but none has succeeded in producing a full set that spans the image space and contains all three of the above properties. Here we investigate the proposal that a coding strategy that maximizes sparseness is sufficient to account for these properties. We show that a learning algorithm that attempts to find sparse linear codes for natural scenes will develop a complete family of localized, oriented, bandpass receptive fields, similar to those found in the primary visual cortex. The resulting sparse image code provides a more efficient representation for later stages of processing because it possesses a higher degree of statistical independence among its outputs.},
number = {6583},
journal = {Nature},
author = {Olshausen, Bruno A. and Field, David J.},
year = {1996},
pmid = {8637596},
note = {00000 },
keywords = {\#nosource, Algorithms, Learning, Models, Models,Neurological; Neurons, Neurological, Neurons, Neurons: physiology, Ocular, Ocular: physiology, Vision, Visual Cortex, Visual Cortex: cytology, Visual Cortex: physiology, anr-trax, bicv-sparse, perrinetadamsfriston14, sparse\_coding, sparse\_hebbian\_learning, sparse\_spike\_coding, ⛔ No INSPIRE recid found},
pages = {607--609},
}
@article{rasetto_challenges_2022,
title = {The {Challenges} {Ahead} for {Bio}-inspired {Neuromorphic} {Event} {Processors}: {How} {Memristors} {Dynamic} {Properties} {Could} {Revolutionize} {Machine} {Learning}},
shorttitle = {The {Challenges} {Ahead} for {Bio}-inspired {Neuromorphic} {Event} {Processors}},
url = {http://arxiv.org/abs/2201.12673},
abstract = {Neuromorphic engineering has led to the necessary process of rethinking of how we process and integrate information, analyze data, and use the resulting insights to improve computation and avoid the current high power and latency of Artificial Intelligence (AI) hardware. Current neuromorphic processors are, however, limited by digital technologies, which cannot reproduce the abilities of biological neural computation in terms of power, latency and area cost. In this paper, we show that the combined use of the dynamic properties of memristors to implement a model of synaptic integration and the determination of the correct level of abstraction of biological neural networks has the potential to open a new range of capabilities for neuromorphic processors. We test this approach using a novel three-terminal LixWO3 electrochemical memristor, by deriving its conductance model and using it to emulate synaptic temporal kernel computation in the context of a pattern recognition task. We show that these devices allow for robust results with no loss in precision while opening the path for an energy efficient approach to build novel bio-inspired processing units in silicon.},
language = {en},
urldate = {2022-02-02},
journal = {arXiv:2201.12673 [cs]},
author = {Rasetto, Marco and Wan, Qingzhou and Akolkar, Himanshu and Shi, Bertram and Xiong, Feng and Benosman, Ryad},
month = jan,
year = {2022},
keywords = {\#nosource, Computer Science - Emerging Technologies, ⛔ No DOI found, ⛔ No INSPIRE recid found},
}
@article{dan_efficient_1996,
title = {Efficient coding of natural scenes in the lateral geniculate nucleus: experimental test of a computational theory},
volume = {16},
doi = {10.1523/jneurosci.16-10-03351.1996},
number = {10},
journal = {The Journal of neuroscience : the official journal of the Society for Neuroscience},
author = {Dan, Yang and Atick, Joseph J and Reid, R C},
month = may,
year = {1996},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
pages = {3351--3362},
}
@article{brunel_phase_2000,
title = {Phase diagrams of sparsely connected networks of excitatory and inhibitory spiking neurons},
volume = {32-33},
issn = {09252312},
url = {http://linkinghub.elsevier.com/retrieve/pii/S092523120000179X},
doi = {10.1016/s0925-2312(00)00179-x},
language = {en},
urldate = {2019-01-14},
journal = {Neurocomputing},
author = {Brunel, Nicolas},
year = {2000},
note = {00009},
keywords = {Integrate-and-fire neuron, Neural network, Oscillations, Synchrony, ⛔ No INSPIRE recid found},
pages = {307--312},
}
@article{priebe_tuning_2006,
title = {Tuning for {Spatiotemporal} {Frequency} and {Speed} in {Directionally} {Selective} {Neurons} of {Macaque} {Striate} {Cortex}},
volume = {26},
doi = {10.1523/jneurosci.3936-05.2006},
number = {11},
journal = {Journal of Neuroscience},
author = {Priebe, N J},
month = mar,
year = {2006},
note = {00213
tex.ids= Priebe2006a},
keywords = {⛔ No INSPIRE recid found},
pages = {2941--2950},
}
@article{engbert_integrated_2011,
title = {An integrated model of fixational eye movements and microsaccades},
volume = {108},
copyright = {© . Freely available online through the PNAS open access option.},
issn = {0027-8424, 1091-6490},
url = {https://www.pnas.org/content/108/39/E765},
doi = {10.1073/pnas.1102730108},
abstract = {When we fixate a stationary target, our eyes generate miniature (or fixational) eye movements involuntarily. These fixational eye movements are classified as slow components (physiological drift, tremor) and microsaccades, which represent rapid, small-amplitude movements. Here we propose an integrated mathematical model for the generation of slow fixational eye movements and microsaccades. The model is based on the concept of self-avoiding random walks in a potential, a process driven by a self-generated activation field. The self-avoiding walk generates persistent movements on a short timescale, whereas, on a longer timescale, the potential produces antipersistent motions that keep the eye close to an intended fixation position. We introduce microsaccades as fast movements triggered by critical activation values. As a consequence, both slow movements and microsaccades follow the same law of motion; i.e., movements are driven by the self-generated activation field. Thus, the model contributes a unified explanation of why it has been a long-standing problem to separate slow movements and microsaccades with respect to their motion-generating principles. We conclude that the concept of a self-avoiding random walk captures fundamental properties of fixational eye movements and provides a coherent theoretical framework for two physiologically distinct movement types.},
language = {en},
number = {39},
urldate = {2021-02-18},
journal = {Proceedings of the National Academy of Sciences},
author = {Engbert, Ralf and Mergenthaler, Konstantin and Sinn, Petra and Pikovsky, Arkady},
month = sep,
year = {2011},
pmid = {21873243},
note = {Publisher: National Academy of Sciences
Section: PNAS Plus},
keywords = {⛔ No INSPIRE recid found},
pages = {E765--E770},
}
@article{simonyan_very_2015,
title = {Very {Deep} {Convolutional} {Networks} for {Large}-{Scale} {Image} {Recognition}},
url = {http://arxiv.org/abs/1409.1556},
abstract = {In this work we investigate the effect of the convolutional network depth on its accuracy in the large-scale image recognition setting. Our main contribution is a thorough evaluation of networks of increasing depth using an architecture with very small (3x3) convolution filters, which shows that a significant improvement on the prior-art configurations can be achieved by pushing the depth to 16-19 weight layers. These findings were the basis of our ImageNet Challenge 2014 submission, where our team secured the first and the second places in the localisation and classification tracks respectively. We also show that our representations generalise well to other datasets, where they achieve state-of-the-art results. We have made our two best-performing ConvNet models publicly available to facilitate further research on the use of deep visual representations in computer vision.},
urldate = {2021-05-24},
author = {Simonyan, Karen and Zisserman, Andrew},
month = apr,
year = {2015},
note = {204 citations (INSPIRE 2023/7/17)
204 citations w/o self (INSPIRE 2023/7/17)
arXiv:1409.1556 [cs.CV]},
keywords = {\#nosource, Computer Science - Computer Vision and Pattern Recognition, ⛔ No DOI found},
}
@inproceedings{state_training_2019,
address = {Cham},
series = {Lecture {Notes} in {Computer} {Science}},
title = {Training {Delays} in {Spiking} {Neural} {Networks}},
isbn = {978-3-030-30487-4},
doi = {10.1007/978-3-030-30487-4_54},
abstract = {Spiking Neural Networks (SNNs) are a promising computational paradigm, both to understand biological information processing and for low-power, embedded chips. Although SNNs are known to encode information in the precise timing of spikes, conventional artificial learning algorithms do not take this into account directly. In this work, we implement the spike timing by training the synaptic delays in a single layer SNN. We use two different approaches: a classical gradient descent and a direct algebraic method that is based on a complex-valued encoding of the spikes. Both algorithms are equally able to correctly solve simple detection tasks. Our work provides new optimization methods for the data analysis of highly time-dependent data and training methods for neuromorphic chips.},
language = {en},
booktitle = {Artificial {Neural} {Networks} and {Machine} {Learning} – {ICANN} 2019: {Theoretical} {Neural} {Computation}},
publisher = {Springer International Publishing},
author = {State, Laura and Vilimelis Aceituno, Pau},
editor = {Tetko, Igor V. and Kůrková, Věra and Karpov, Pavel and Theis, Fabian},
year = {2019},
keywords = {⛔ No INSPIRE recid found},
pages = {713--717},
}
@article{hazan_memory_2022,
title = {Memory via {Temporal} {Delays} in weightless {Spiking} {Neural} {Network}},
url = {http://arxiv.org/abs/2202.07132},
abstract = {A common view in the neuroscience community is that memory is encoded in the connection strength between neurons. This perception led artificial neural network models to focus on connection weights as the key variables to modulate learning. In this paper, we present a prototype for weightless spiking neural networks that can perform a simple classification task. The memory in this network is stored in the timing between neurons, rather than the strength of the connection, and is trained using a Hebbian Spike Timing Dependent Plasticity (STDP), which modulates the delays of the connection.},
urldate = {2022-10-27},
author = {Hazan, Hananel and Caby, Simon and Earl, Christopher and Siegelmann, Hava and Levin, Michael},
month = feb,
year = {2022},
note = {arXiv:2202.07132 [cs, q-bio, stat]},
keywords = {Computer Science - Artificial Intelligence, Computer Science - Machine Learning, Computer Science - Neural and Evolutionary Computing, Quantitative Biology - Neurons and Cognition, Statistics - Computation, ⛔ No INSPIRE recid found},
}
@article{deweese_binary_2003,
title = {Binary coding in auditory cortex},
url = {http://papers.nips.cc/paper/2342-binary-coding-in-auditory-cortex},
abstract = {Cortical neurons have been reported to use both rate and temporal codes. Here we describe a novel mode in which each neuron generates exactly 0 or 1 action potentials, but not more, in response to a stimulus. We used cell-attached recording, which ensured single-unit isolation, to record responses in rat auditory cortex to brief tone pips. Surprisingly, the majority of neurons exhibited binary behavior with few multi-spike responses; several dramatic examples consisted of exactly one spike on 100\% of trials, with no trial-to-trial variability in spike count. Many neurons were tuned to stimulus frequency. Since individual trials yielded at most one spike for most neurons, the information about stimulus frequency was encoded in the population, and would not have been accessible to later stages of processing that only had access to the activity of a single unit. These binary units allow a more efficient population code than is possible with conventional rate coding units, and are consistent with a model of cortical processing in which synchronous packets of spikes propagate stably from one neuronal population to the next.},
urldate = {2022-10-04},
author = {DeWeese, M. R. and Zador, A. M.},
year = {2003},
keywords = {\#nosource, ⛔ No DOI found, ⛔ No INSPIRE recid found},
}
@article{sotomayor-gomez_spikeship_2021,
title = {{SpikeShip}: {A} method for fast, unsupervised discovery of high-dimensional neural spiking patterns},
url = {https://www.biorxiv.org/content/10.1101/2020.06.03.131573},
doi = {10.1101/2020.06.03.131573},
journal = {bioRxiv : the preprint server for biology},
author = {Sotomayor-Gómez, Boris and Battaglia, Francesco P and Vinck, Martin},
year = {2021},
note = {Publisher: Cold Spring Harbor Laboratory},
keywords = {⛔ No INSPIRE recid found},
pages = {2020--06},
}
@article{berens_fast_2012,
title = {A {Fast} and {Simple} {Population} {Code} for {Orientation} in {Primate} {V1}},
volume = {32},
url = {https://doi.org/f365rn},
doi = {10.1523/jneurosci.1335-12.2012},
language = {en},
number = {31},
journal = {Journal of Neuroscience},
author = {Berens, P. and Ecker, A. S. and Cotton, R. J. and Ma, W. J. and Bethge, M. and Tolias, A. S.},
year = {2012},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
pages = {10618--10626},
}
@article{nawrot_eye_2003,
title = {Eye movements provide the extra-retinal signal required for the perception of depth from motion parallax},
volume = {43},
issn = {0042-6989},
url = {https://www.sciencedirect.com/science/article/pii/S0042698903001445},
doi = {10.1016/S0042-6989(03)00144-5},
abstract = {It has been unclear whether the perception of depth from motion parallax is an entirely visual process or whether it requires extra-retinal information such as head movements, vestibular activation, or eye movements. Using a motion aftereffect and static test stimulus technique to eliminate visual cues to depth, this psychophysical study demonstrates that the visual system employs a slow eye movement signal, optokinetic response (OKR) in particular, for the unambiguous perception of depth from motion parallax. A vestibular signal, or vestibularly driven eye movement signal is insufficient for unambiguous depth from motion parallax. Removal of the OKR eye movement signal gives rise to ambiguous perceived depth in motion parallax conditions. Neurophysiological studies suggest a possible neural mechanism in medial temporal and medial superior temporal cortical neurons that are selective to depth, motion, and direction of eye movement.},
language = {en},
number = {14},
urldate = {2022-09-15},
journal = {Vision Research},
author = {Nawrot, Mark},
month = jun,
year = {2003},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
pages = {1553--1562},
}
@article{barlow_unsupervised_1989,
title = {Unsupervised {Learning}},
volume = {1},
issn = {0899-7667},
url = {https://doi.org/10.1162/neco.1989.1.3.295},
doi = {10.1162/neco.1989.1.3.295},
abstract = {What use can the brain make of the massive flow of sensory information that occurs without any associated rewards or punishments? This question is reviewed in the light of connectionist models of unsupervised learning and some older ideas, namely the cognitive maps and working models of Tolman and Craik, and the idea that redundancy is important for understanding perception (Attneave 1954), the physiology of sensory pathways (Barlow 1959), and pattern recognition (Watanabe 1960). It is argued that (1) The redundancy of sensory messages provides the knowledge incorporated in the maps or models. (2) Some of this knowledge can be obtained by observations of mean, variance, and covariance of sensory messages, and perhaps also by a method called “minimum entropy coding.” (3) Such knowledge may be incorporated in a model of “what usually happens” with which incoming messages are automatically compared, enabling unexpected discrepancies to be immediately identified. (4) Knowledge of the sort incorporated into such a filter is a necessary prerequisite of ordinary learning, and a representation whose elements are independent makes it possible to form associations with logical functions of the elements, not just with the elements themselves.},
number = {3},
urldate = {2021-08-06},
journal = {Neural Computation},
author = {Barlow, H.B.},
month = sep,
year = {1989},
note = {00000},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
pages = {295--311},
}
@article{hanuschkin_general_2010,
title = {A {General} and {Efficient} {Method} for {Incorporating} {Precise} {Spike} {Times} in {Globally} {Time}-{Driven} {Simulations}},
volume = {4},
issn = {1662-5196},
url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2965048/},
doi = {10.3389/fninf.2010.00113},
abstract = {Traditionally, event-driven simulations have been limited to the very restricted class of neuronal models for which the timing of future spikes can be expressed in closed form. Recently, the class of models that is amenable to event-driven simulation has been extended by the development of techniques to accurately calculate firing times for some integrate-and-fire neuron models that do not enable the prediction of future spikes in closed form. The motivation of this development is the general perception that time-driven simulations are imprecise. Here, we demonstrate that a globally time-driven scheme can calculate firing times that cannot be discriminated from those calculated by an event-driven implementation of the same model; moreover, the time-driven scheme incurs lower computational costs. The key insight is that time-driven methods are based on identifying a threshold crossing in the recent past, which can be implemented by a much simpler algorithm than the techniques for predicting future threshold crossings that are necessary for event-driven approaches. As run time is dominated by the cost of the operations performed at each incoming spike, which includes spike prediction in the case of event-driven simulation and retrospective detection in the case of time-driven simulation, the simple time-driven algorithm outperforms the event-driven approaches. Additionally, our method is generally applicable to all commonly used integrate-and-fire neuronal models; we show that a non-linear model employing a standard adaptive solver can reproduce a reference spike train with a high degree of precision.},
urldate = {2022-09-14},
journal = {Frontiers in Neuroinformatics},
author = {Hanuschkin, Alexander and Kunkel, Susanne and Helias, Moritz and Morrison, Abigail and Diesmann, Markus},
month = oct,
year = {2010},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
pages = {113},
}
@article{yoonessi_contribution_2011,
title = {Contribution of motion parallax to segmentation and depth perception},
volume = {11},
issn = {1534-7362},
url = {https://doi.org/10.1167/11.9.13},
doi = {10.1167/11.9.13},
abstract = {Relative image motion resulting from active movement of the observer could potentially serve as a powerful perceptual cue, both for segmentation of object boundaries and for depth perception. To examine the perceptual role of motion parallax from shearing motion, we measured human performance in three psychophysical tasks: segmentation, depth ordering, and depth magnitude estimation. Stimuli consisted of random dot textures that were synchronized to head movement with sine- or square-wave modulation patterns. Segmentation was assessed with a 2AFC orientation judgment of a motion-defined boundary. In the depth-ordering task, observers reported which modulation half-cycle appeared in front of the other. Perceived depth magnitude was matched to that of a 3D rendered image with multiple static cues. The results indicate that head movement might not be important for segmentation, even though it is crucial for obtaining depth from motion parallax—thus, concomitant depth perception does not appear to facilitate segmentation. Our findings suggest that segmentation works best for abrupt, sharply defined motion boundaries, whereas smooth gradients are more powerful for obtaining depth from motion parallax. Thus, motion parallax may contribute in a different manner to segmentation and to depth perception and suggests that their underlying mechanisms might be distinct.},
number = {9},
urldate = {2022-09-15},
journal = {Journal of Vision},
author = {Yoonessi, Ahmad and Baker, Jr., Curtis L.},
month = aug,
year = {2011},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
pages = {13},
}
@article{haag_fly_2004,
title = {Fly motion vision is based on {Reichardt} detectors regardless of the signal-to-noise ratio},
volume = {101},
issn = {0027-8424},
doi = {10.1073/pnas.0407368101},
abstract = {The computational structure of an optimal motion detector was proposed to depend on the signal-to-noise ratio (SNR) of the stimulus: At low SNR, the optimal motion detector should be a correlation or "Reichardt" type, whereas at high SNR, the detector would employ a gradient scheme [Potters, M. \& Bialek, W. (1994) J. Physiol. (Paris) 4, 1755-1775]. Although a large body of experiments supports the Reichardt detector as the processing scheme leading to direction selectivity in fly motion vision, in most of these studies the SNR was rather low. We therefore reinvestigated the question over a much larger SNR range. Using 2-photon microscopy, we found that local dendritic [Ca(2+)] modulations, which are characteristic of Reichardt detectors, occur in response to drifting gratings over a wide range of luminance levels and contrasts. We also explored, as another fingerprint of Reichardt detectors, the dependence of the velocity optimum on the pattern wavelength. Again, we found Reichardt-typical behavior throughout the whole luminance and contrast range tested. Our results, therefore, provide strong evidence that only a single elementary processing scheme is used in fly motion vision.},
language = {eng},
number = {46},
journal = {Proceedings of the National Academy of Sciences of the United States of America},
author = {Haag, J. and Denk, W. and Borst, A.},
month = nov,
year = {2004},
pmid = {15534201},
pmcid = {PMC526200},
note = {00164 },
keywords = {\#nosource, Algorithms, Animals, Calcium Signaling, Diptera, Electrophysiology, Female, Models, Models, Neurological, Motion, Motion Perception, Neurological, Ocular, Optics and Photonics, Photic Stimulation, Vision, Vision, Ocular, biology, delay-learning, insects, ⛔ No INSPIRE recid found},
pages = {16333--16338},
}
@article{boutin_sparse_2020,
title = {Sparse {Deep} {Predictive} {Coding} captures contour integration capabilities of the early visual system},
copyright = {All rights reserved},
url = {https://doi.org/10.1371/journal.pcbi.1008629},
doi = {10.1371/journal.pcbi.1008629},
abstract = {Both neurophysiological and psychophysical experiments have pointed out the crucial role of recurrent and feedback connections to process context-dependent information in the early visual cortex. While numerous models have accounted for feedback effects at either neural or representational level, none of them were able to bind those two levels of analysis. Is it possible to describe feedback effects at both levels using the same model? We answer this question by combining Predictive Coding (PC) and Sparse Coding (SC) into a hierarchical and convolutional framework. In this Sparse Deep Predictive Coding (SDPC) model, the SC component models the internal recurrent processing within each layer, and the PC component describes the interactions between layers using feedforward and feedback connections. Here, we train a 2-layered SDPC on two different databases of images, and we interpret it as a model of the early visual system (V1 \& V2). We first demonstrate that once the training has converged, SDPC exhibits oriented and localized receptive fields in V1 and more complex features in V2. Second, we analyze the effects of feedback on the neural organization beyond the classical receptive field of V1 neurons using interaction maps. These maps are similar to association fields and reflect the Gestalt principle of good continuation. We demonstrate that feedback signals reorganize interaction maps and modulate neural activity to promote contour integration. Third, we demonstrate at the representational level that the SDPC feedback connections are able to overcome noise in input images. Therefore, the SDPC captures the association field principle at the neural level which results in better disambiguation of blurred images at the representational level.},
journal = {PLoS Computational Biology},
author = {Boutin, Victor and Franciosini, Angelo and Chavane, Frédéric Y and Ruffier, Franck and Perrinet, Laurent U},
month = may,
year = {2020},
note = {tex.date-added: 2019-06-18 13:53:53 +0200
tex.date-modified: 2020-12-12 11:55:20 +0100
tex.grants: doc-2-amu,phd-icn,mesocentre
tex.preprint: https://arxiv.org/abs/1902.07651
tex.url\_code: https://github.com/VictorBoutin/InteractionMap
publisher: Public Library of Science San Francisco, CA USA},
keywords = {\#nosource, deep-learning, sparse coding, ⛔ No INSPIRE recid found},
}
@article{paredes-valles_unsupervised_2020,
title = {Unsupervised {Learning} of a {Hierarchical} {Spiking} {Neural} {Network} for {Optical} {Flow} {Estimation}: {From} {Events} to {Global} {Motion} {Perception}},
volume = {42},
issn = {1939-3539},
shorttitle = {Unsupervised {Learning} of a {Hierarchical} {Spiking} {Neural} {Network} for {Optical} {Flow} {Estimation}},
doi = {10.1109/tpami.2019.2903179},
abstract = {The combination of spiking neural networks and event-based vision sensors holds the potential of highly efficient and high-bandwidth optical flow estimation. This paper presents the first hierarchical spiking architecture in which motion (direction and speed) selectivity emerges in an unsupervised fashion from the raw stimuli generated with an event-based camera. A novel adaptive neuron model and stable spike-timing-dependent plasticity formulation are at the core of this neural network governing its spike-based processing and learning, respectively. After convergence, the neural architecture exhibits the main properties of biological visual motion systems, namely feature extraction and local and global motion perception. Convolutional layers with input synapses characterized by single and multiple transmission delays are employed for feature and local motion perception, respectively; while global motion selectivity emerges in a final fully-connected layer. The proposed solution is validated using synthetic and real event sequences. Along with this paper, we provide the cuSNN library, a framework that enables GPU-accelerated simulations of large-scale spiking neural networks. Source code and samples are available at https://github.com/tudelft/cuSNN.},
number = {8},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
author = {Paredes-Vallés, Federico and Scheper, Kirk Y. W. and de Croon, Guido C. H. E.},
month = aug,
year = {2020},
note = {00000
Conference Name: IEEE Transactions on Pattern Analysis and Machine Intelligence},
keywords = {\#nosource, Biological information theory, Biological system modeling, Biomedical optical imaging, Event-based vision, Neurons, Optical sensors, Vision sensors, Visualization, feature extraction, motion detection, neural nets, neuromorphic computing, unsupervised learning, ⛔ No INSPIRE recid found},
pages = {2051--2064},
}
@article{nessler_bayesian_2013,
title = {Bayesian {Computation} {Emerges} in {Generic} {Cortical} {Microcircuits} through {Spike}-{Timing}-{Dependent} {Plasticity}},
volume = {9},
issn = {1553-7358},
url = {https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1003037},
doi = {10.1371/journal.pcbi.1003037},
abstract = {The principles by which networks of neurons compute, and how spike-timing dependent plasticity (STDP) of synaptic weights generates and maintains their computational function, are unknown. Preceding work has shown that soft winner-take-all (WTA) circuits, where pyramidal neurons inhibit each other via interneurons, are a common motif of cortical microcircuits. We show through theoretical analysis and computer simulations that Bayesian computation is induced in these network motifs through STDP in combination with activity-dependent changes in the excitability of neurons. The fundamental components of this emergent Bayesian computation are priors that result from adaptation of neuronal excitability and implicit generative models for hidden causes that are created in the synaptic weights through STDP. In fact, a surprising result is that STDP is able to approximate a powerful principle for fitting such implicit generative models to high-dimensional spike inputs: Expectation Maximization. Our results suggest that the experimentally observed spontaneous activity and trial-to-trial variability of cortical neurons are essential features of their information processing capability, since their functional role is to represent probability distributions rather than static neural codes. Furthermore it suggests networks of Bayesian computation modules as a new model for distributed information processing in the cortex.},
language = {en},
number = {4},
urldate = {2021-05-20},
journal = {PLOS Computational Biology},
author = {Nessler, Bernhard and Pfeiffer, Michael and Buesing, Lars and Maass, Wolfgang},
year = {2013},
note = {tex.ids= Nessler2013a
publisher: Public Library of Science},
keywords = {\#nosource, Action potentials, Learning, Machine learning, Neural networks, Neuronal plasticity, Neurons, Probability distribution, Synapses, ⛔ No INSPIRE recid found},
pages = {e1003037},
}
@incollection{perrinet_sparse_2015,
address = {Weinheim, Germany},
title = {Sparse {Models} for {Computer} {Vision}},
copyright = {All rights reserved},
url = {http://onlinelibrary.wiley.com/doi/10.1002/9783527680863.ch14/summary},
booktitle = {Biologically {Inspired} {Computer} {Vision}},
publisher = {Wiley-VCH Verlag GmbH \& Co. KGaA},
author = {Perrinet, Laurent U},
editor = {Keil, Matthias and Cristóbal, Gabriel and Perrinet, Laurent U},
month = aug,
year = {2015},
doi = {10.1002/9783527680863.ch14},
note = {Section: 13
tex.date-modified: 2020-01-07 12:54:57 +0100
tex.ids: Perrinet15bicv,Perrinet2015c
tex.preprint: https://hal-amu.archives-ouvertes.fr/hal-01444362
tex.url\_code: https://github.com/bicv/Perrinet2015BICVₛparse},
keywords = {\#nosource, Biologically Inspired Computer vision, anr-trax, bicv-sparse, sanz12jnp, sparse coding, vacher14, ⛔ No INSPIRE recid found},
pages = {319--346},
}
@article{dandekar_neural_2012,
title = {Neural saccadic response estimation during natural viewing},
volume = {107},
issn = {0022-3077},
url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3311669/},
doi = {10.1152/jn.00237.2011},
abstract = {Studying neural activity during natural viewing conditions is not often attempted. Isolating the neural response of a single saccade is necessary to study neural activity during natural viewing; however, the close temporal spacing of saccades that occurs during natural viewing makes it difficult to determine the response to a single saccade. Herein, a general linear model (GLM) approach is applied to estimate the EEG neural saccadic response for different segments of the saccadic main sequence separately. It is determined that, in visual search conditions, neural responses estimated by conventional event-related averaging are significantly and systematically distorted relative to GLM estimates due to the close temporal spacing of saccades during visual search. Before the GLM is applied, analyses are applied that demonstrate that saccades during visual search with intersaccadic spacings as low as 100–150 ms do not exhibit significant refractory effects. Therefore, saccades displaying different intersaccadic spacings during visual search can be modeled using the same regressor in a GLM. With the use of the GLM approach, neural responses were separately estimated for five different ranges of saccade amplitudes during visual search. Occipital responses time locked to the onsets of saccades during visual search were found to account for, on average, 79 percent of the variance of EEG activity in a window 90–200 ms after the onsets of saccades for all five saccade amplitude ranges that spanned a range of 0.2–6.0 degrees. A GLM approach was also used to examine the lateralized ocular artifacts associated with saccades. Possible extensions of the methods presented here to account for the superposition of microsaccades in event-related EEG studies conducted in nominal fixation conditions are discussed.},
number = {6},
urldate = {2022-09-14},
journal = {Journal of Neurophysiology},
author = {Dandekar, Sangita and Privitera, Claudio and Carney, Thom and Klein, Stanley A.},
month = mar,
year = {2012},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
pages = {1776--1790},
}
@article{deangelis_functional_1999,
title = {Functional micro-organization of primary visual cortex: receptive field analysis of nearby neurons},
volume = {19},
doi = {10.1523/JNEUROSCI.19-10-04046.1999},
number = {10},
journal = {Journal of Neuroscience},
author = {DeAngelis, Gregory C and Ghose, Geoffrey M and Ohzawa, Izumi and Freeman, Ralph D},
year = {1999},
note = {Publisher: Soc Neuroscience},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
pages = {4046--4064},
}
@article{frye_elementary_2015,
title = {Elementary motion detectors},
volume = {25},
issn = {09609822},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0960982215000159},
doi = {10.1016/j.cub.2015.01.013},
language = {en},
number = {6},
urldate = {2022-03-21},
journal = {Current Biology},
author = {Frye, Mark},
month = mar,
year = {2015},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
pages = {R215--R217},
}
@article{rogers_motion_1979,
title = {Motion {Parallax} as an {Independent} {Cue} for {Depth} {Perception}},
volume = {8},
issn = {0301-0066},
url = {https://doi.org/10.1068/p080125},
doi = {10.1068/p080125},
abstract = {The perspective transformations of the retinal image, produced by either the movement of an observer or the movement of objects in the visual world, were found to produce a reliable, consistent, and unambiguous impression of relative depth in the absence of all other cues to depth and distance. The stimulus displays consisted of computer-generated random-dot patterns that could be transformed by each movement of the observer or the display oscilloscope to simulate the relative movement information produced by a three-dimensional surface. Using a stereoscopic matching task, the second experiment showed that the perceived depth from parallax transformations is in close agreement with the degree of relative image displacement, as well as producing a compelling impression of three-dimensionality not unlike that found with random-dot stereograms.},
language = {en},
number = {2},
urldate = {2022-09-15},
journal = {Perception},
author = {Rogers, Brian and Graham, Maureen},
month = apr,
year = {1979},
note = {Publisher: SAGE Publications Ltd STM},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
pages = {125--134},
}
@article{simoncini_more_2012,
title = {More is not always better: adaptive gain control explains dissociation between perception and action},
volume = {15},
doi = {10.1038/nn.3229},
number = {11},
journal = {Nature neuroscience},
author = {Simoncini, Claudio and Perrinet, Laurent U and Montagnini, Anna and Mamassian, Pascal and Masson, Guillaume S},
year = {2012},
keywords = {\#nosource, activeₑyeₘovements, bicv-sparse, eye-movements, eyeₘovements, free energy, freemove, gain$_{\textrm{c}}$ontrol, perrinetadamsfriston14, pursuit, sanz12jnp, smooth-pursuit-eye-movements, spem, vacher14, ⛔ No INSPIRE recid found},
pages = {1596--1603},
}
@article{sanz_leon_virtual_2013,
title = {The {Virtual} {Brain}: a simulator of primate brain network dynamics},
volume = {7},
issn = {1662-5196},
shorttitle = {The {Virtual} {Brain}},
url = {https://www.frontiersin.org/articles/10.3389/fninf.2013.00010},
doi = {10.3389/fninf.2013.00010},
abstract = {We present The Virtual Brain (TVB), a neuroinformatics platform for full brain network simulations using biologically realistic connectivity. This simulation environment enables the model-based inference of neurophysiological mechanisms across different brain scales that underlie the generation of macroscopic neuroimaging signals including functional MRI (fMRI), EEG and MEG. Researchers from different backgrounds can benefit from an integrative software platform including a supporting framework for data management (generation, organization, storage, integration and sharing) and a simulation core written in Python. TVB allows the reproduction and evaluation of personalized configurations of the brain by using individual subject data. This personalization facilitates an exploration of the consequences of pathological changes in the system, permitting to investigate potential ways to counteract such unfavorable processes. The architecture of TVB supports interaction with MATLAB packages, for example, the well known Brain Connectivity Toolbox. TVB can be used in a client-server configuration, such that it can be remotely accessed through the Internet thanks to its web-based HTML5, JS, and WebGL graphical user interface. TVB is also accessible as a standalone cross-platform Python library and application, and users can interact with the scientific core through the scripting interface IDLE, enabling easy modeling, development and debugging of the scientific kernel. This second interface makes TVB extensible by combining it with other libraries and modules developed by the Python scientific community. In this article, we describe the theoretical background and foundations that led to the development of TVB, the architecture and features of its major software components as well as potential neuroscience applications.},
urldate = {2022-09-28},
journal = {Frontiers in Neuroinformatics},
author = {Sanz Leon, Paula and Knock, Stuart and Woodman, M. and Domide, Lia and Mersmann, Jochen and McIntosh, Anthony and Jirsa, Viktor},
year = {2013},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
}
@article{boutin_effect_2020,
title = {Effect of top-down connections in {Hierarchical} {Sparse} {Coding}},
volume = {32},
copyright = {All rights reserved},
url = {https://laurentperrinet.github.io/publication/boutin-franciosini-ruffier-perrinet-20-feedback/},
doi = {10.1162/neco_a_01325},
abstract = {Hierarchical Sparse Coding (HSC) is a powerful model to efficiently represent multi-dimensional, structured data such as images. The simplest solution to solve this computationally hard problem is to decompose it into independent layer-wise subproblems. However, neuroscientific evidence would suggest inter-connecting these subproblems as in the Predictive Coding (PC) theory, which adds top-down connections between consecutive layers. In this study, a new model called 2-Layers Sparse Predictive Coding (2L-SPC) is introduced to assess the impact of this inter-layer feedback connection. In particular, the 2L-SPC is compared with a Hierarchical Lasso (Hi-La) network made out of a sequence of independent Lasso layers. The 2L-SPC and the 2-layers Hi-La networks are trained on 4 different databases and with different sparsity parameters on each layer. First, we show that the overall prediction error generated by 2L-SPC is lower thanks to the feedback mechanism as it transfers prediction error between layers. Second, we demonstrate that the inference stage of the 2L-SPC is faster to converge than for the Hi-La model. Third, we show that the 2L-SPC also accelerates the learning process. Finally, the qualitative analysis of both models dictionaries, supported by their activation probability, show that the 2L-SPC features are more generic and informative.},
number = {11},
journal = {Neural Computation},
author = {Boutin, Victor and Franciosini, Angelo and Ruffier, Franck and Perrinet, Laurent U},
month = feb,
year = {2020},
note = {tex.ids= BoutinFranciosiniRuffierPerrinet20
tex.date-modified: 2020-11-03 09:59:57 +0100
tex.grants: doc-2-amu,phd-icn,mesocentre
tex.preprint: https://arxiv.org/abs/2002.00892
publisher: MIT Press},
keywords = {\#nosource, deep-learning, sparse coding, ⛔ No INSPIRE recid found},
pages = {2279--2309},
}
@article{brette_exact_2007,
title = {Exact {Simulation} of {Integrate}-and-{Fire} {Models} with {Exponential} {Currents}},
volume = {19},
issn = {0899-7667, 1530-888X},
url = {https://direct.mit.edu/neco/article/19/10/2604-2609/7220},
doi = {10.1162/neco.2007.19.10.2604},
abstract = {Neural networks can be simulated exactly using event-driven strategies, in which the algorithm advances directly from one spike to the next spike. It applies to neuron models for which we have (1) an explicit expression for the evolution of the state variables between spikes and (2) an explicit test on the state variables that predicts whether and when a spike will be emitted. In a previous work, we proposed a method that allows exact simulation of an integrate-and-fire model with exponential conductances, with the constraint of a single synaptic time constant. In this note, we propose a method, based on polynomial root finding, that applies to integrate-and-fire models with exponential currents, with possibly many different synaptic time constants. Models can include biexponential synaptic currents and spike-triggered adaptation currents.},
language = {en},
number = {10},
urldate = {2022-09-15},
journal = {Neural Computation},
author = {Brette, Romain},
month = oct,
year = {2007},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
pages = {2604--2609},
}
@misc{kohn_utah_2016,
title = {Utah array extracellular recordings of spontaneous and visually evoked activity from anesthetized macaque primary visual cortex ({V1}).},
url = {http://crcns.org/data-sets/vc/pvc-11},
language = {en},
urldate = {2022-12-19},
publisher = {CRCNS.org},
author = {Kohn, A. and Smith, M.A.},
year = {2016},
doi = {10.6080/K0NC5Z4X},
keywords = {Macaque, Neuroscience, Primary visual cortex, ⛔ No INSPIRE recid found},
}
@inproceedings{lee_real-time_2014,
address = {Paris, France},
title = {Real-time motion estimation based on event-based vision sensor},
isbn = {978-1-4799-5751-4},
url = {http://ieeexplore.ieee.org/document/7025040/},
doi = {10.1109/ICIP.2014.7025040},
abstract = {Fast and efficient motion estimation is essential for a number of applications including the gesture-based user interface (UI) for portable devices like smart phones. In this paper, we propose a highly efficient method that can estimate four degree of freedom (DOF) motional components of a moving object based on an event-based vision sensor, the dynamic vision sensor (DVS). The proposed method finds informative events occurred at edges and estimates their velocities for global motion analysis. We will also describe a novel method to correct the aperture problem in the motion estimation.},
language = {en},
urldate = {2022-07-19},
booktitle = {2014 {IEEE} {International} {Conference} on {Image} {Processing} ({ICIP})},
publisher = {IEEE},
author = {Lee, Jun Haeng and Lee, Kyoobin and Ryu, Hyunsurk and Park, Paul K. J. and Shin, Chang-Woo and Woo, Jooyeon and Kim, Jun-Seok},
month = oct,
year = {2014},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
pages = {204--208},
}
@article{leon_motion_2012,
title = {Motion clouds: model-based stimulus synthesis of natural-like random textures for the study of motion perception},
volume = {107},
doi = {10.1152/jn.00737.2011},
number = {11},
journal = {Journal of neurophysiology},
author = {Leon, Paula Sanz and Vanzetta, Ivo and Masson, Guillaume S and Perrinet, Laurent U},
year = {2012},
keywords = {\#nosource, Eye movements, Low-level sensory systems, Motion detection, Natural scenes, Optimal stimulation, Python, anr-trax, bicv-sparse, kaplan13, log-gabor, motion-clouds, perrinetadamsfriston14, sanz12jnp, vacher14, ⛔ No INSPIRE recid found},
pages = {3217--3226},
}
@article{lin_supervised_2021,
title = {Supervised {Learning} {Algorithm} for {Multilayer} {Spiking} {Neural} {Networks} with {Long}-{Term} {Memory} {Spike} {Response} {Model}},
volume = {2021},
issn = {1687-5265},
url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC8635912/},
doi = {10.1155/2021/8592824},
abstract = {As a new brain-inspired computational model of artificial neural networks, spiking neural networks transmit and process information via precisely timed spike trains. Constructing efficient learning methods is a significant research field in spiking neural networks. In this paper, we present a supervised learning algorithm for multilayer feedforward spiking neural networks; all neurons can fire multiple spikes in all layers. The feedforward network consists of spiking neurons governed by biologically plausible long-term memory spike response model, in which the effect of earlier spikes on the refractoriness is not neglected to incorporate adaptation effects. The gradient descent method is employed to derive synaptic weight updating rule for learning spike trains. The proposed algorithm is tested and verified on spatiotemporal pattern learning problems, including a set of spike train learning tasks and nonlinear pattern classification problems on four UCI datasets. Simulation results indicate that the proposed algorithm can improve learning accuracy in comparison with other supervised learning algorithms.},
urldate = {2022-09-14},
journal = {Computational Intelligence and Neuroscience},
author = {Lin, Xianghong and Zhang, Mengwei and Wang, Xiangwen},
month = nov,
year = {2021},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
pages = {8592824},
}
@article{macdonald_neuromorphic_2022,
title = {Neuromorphic {Tactile} {Edge} {Orientation} {Classification} in an {Unsupervised} {Spiking} {Neural} {Network}},
volume = {22},
copyright = {http://creativecommons.org/licenses/by/3.0/},
issn = {1424-8220},
url = {https://www.mdpi.com/1424-8220/22/18/6998},
doi = {10.3390/s22186998},
abstract = {Dexterous manipulation in robotic hands relies on an accurate sense of artificial touch. Here we investigate neuromorphic tactile sensation with an event-based optical tactile sensor combined with spiking neural networks for edge orientation detection. The sensor incorporates an event-based vision system (mini-eDVS) into a low-form factor artificial fingertip (the NeuroTac). The processing of tactile information is performed through a Spiking Neural Network with unsupervised Spike-Timing-Dependent Plasticity (STDP) learning, and the resultant output is classified with a 3-nearest neighbours classifier. Edge orientations were classified in 10-degree increments while tapping vertically downward and sliding horizontally across the edge. In both cases, we demonstrate that the sensor is able to reliably detect edge orientation, and could lead to accurate, bio-inspired, tactile processing in robotics and prosthetics applications.},
language = {en},
number = {18},
urldate = {2022-09-26},
journal = {Sensors},
author = {Macdonald, Fraser L. A. and Lepora, Nathan F. and Conradt, Jörg and Ward-Cherrier, Benjamin},
month = jan,
year = {2022},
note = {Number: 18
Publisher: Multidisciplinary Digital Publishing Institute},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
pages = {6998},
}
@book{mandelbrot_fractal_1982,
title = {The fractal geometry of nature},
isbn = {978-0-7167-1186-5},
url = {http://archive.org/details/fractalgeometryo00beno},
abstract = {Rev. ed. of: Fractals. c1977; Includes indexes; Bibliography: p. [425]-443},
language = {eng},
urldate = {2022-09-27},
publisher = {San Francisco : W.H. Freeman},
author = {Mandelbrot, Benoit B.},
collaborator = {{Internet Archive}},
year = {1982},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
}
@article{gewaltig_propagation_2001,
title = {Propagation of cortical synfire activity: survival probability in single trials and stability in the mean},
volume = {14},
issn = {0893-6080},
shorttitle = {Propagation of cortical synfire activity},
url = {https://www.sciencedirect.com/science/article/pii/S0893608001000703},
doi = {10.1016/S0893-6080(01)00070-3},
abstract = {The synfire hypothesis states that under appropriate conditions volleys of synchronized spikes (pulse packets) can propagate through the cortical network by traveling along chains of groups of cortical neurons. Here, we present results from network simulations, taking full account of the variability in pulse packet realizations. We repeatedly stimulated a synfire chain of model neurons and estimated activity (a) and temporal jitter (σ) of the spike response for each neuron group in the chain in many trials. The survival probability of the activity was assessed for each point in (a, σ)-space. The results confirm and extend our earlier predictions based on single neuron properties and a deterministic state-space analysis [Diesmann, M., Gewaltig, M.-O., \& Aertsen, A. (1999). Stable propagation of synchronous spiking in cortical neural networks. Nature, 402, 529–533].},
language = {en},
number = {6},
urldate = {2022-10-26},
journal = {Neural Networks},
author = {Gewaltig, Marc-Oliver and Diesmann, Markus and Aertsen, Ad},
month = jul,
year = {2001},
keywords = {Action Potentials, Animals, Cell Membrane, Cerebral Cortex, Computer, Cortical dynamics, Humans, Integrate-and-fire neurons, Models, Models, Statistical, Nerve Net, Neural Networks, Neural Networks, Computer, Neurons, Pulse packets, Single-trial analysis, Spike patterns, Spiking neurons, Statistical, Synaptic Transmission, Synfire chains, Variability, ⛔ No INSPIRE recid found},
pages = {657--673},
}
@article{goodman_spike-timing-based_2010,
title = {Spike-timing-based computation in sound localization.},
volume = {6},
issn = {1553-7358},
url = {http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2978676/},
doi = {10.1371/journal.pcbi.1000993},
abstract = {Spike timing is precise in the auditory system and it has been argued that it conveys information about auditory stimuli, in particular about the location of a sound source. However, beyond simple time differences, the way in which neurons might extract this information is unclear and the potential computational advantages are unknown. The computational difficulty of this task for an animal is to locate the source of an unexpected sound from two monaural signals that are highly dependent on the unknown source signal. In neuron models consisting of spectro-temporal filtering and spiking nonlinearity, we found that the binaural structure induced by spatialized sounds is mapped to synchrony patterns that depend on source location rather than on source signal. Location-specific synchrony patterns would then result in the activation of location-specific assemblies of postsynaptic neurons. We designed a spiking neuron model which exploited this principle to locate a variety of sound sources in a virtual acoustic environment using measured human head-related transfer functions. The model was able to accurately estimate the location of previously unknown sounds in both azimuth and elevation (including front/back discrimination) in a known acoustic environment. We found that multiple representations of different acoustic environments could coexist as sets of overlapping neural assemblies which could be associated with spatial locations by Hebbian learning. The model demonstrates the computational relevance of relative spike timing to extract spatial information about sources independently of the source signal.},
number = {11},
journal = {PLoS Comput Biol},
author = {Goodman, Dan F. M. and Brette, Romain},
month = nov,
year = {2010},
pmid = {21085681},
keywords = {\#nosource, auditory\_stimuli, auditory\_system, neural\_assemblies, sound\_localization, spatial\_information, spike, spike\_timing, spikes, synchrony, ⛔ No INSPIRE recid found},
}
@article{perrinet_sparse_2004,
series = {New {Aspects} in {Neurocomputing}: 10th {European} {Symposium} on {Artificial} {Neural} {Networks} 2002},
title = {Sparse spike coding in an asynchronous feed-forward multi-layer neural network using matching pursuit},
volume = {57},
issn = {0925-2312},
url = {https://www.sciencedirect.com/science/article/pii/S0925231204000670},
doi = {10.1016/j.neucom.2004.01.010},
abstract = {In order to account for the rapidity of visual processing, we explore visual coding strategies using a one-pass feed-forward spiking neural network. We based our model on the work of Van Rullen and Thorpe Neural Comput. 13 (6) (2001) 1255, which constructs a retinal representation using an orthogonal wavelet transform. This strategy provides a spike code, thanks to a rank order coding scheme which offers an alternative to the classical spike frequency coding scheme. We extended this model to efficient representations in arbitrary linear generative models by implementing lateral interactions on top of this feed-forward model. This method uses a matching pursuit scheme—recursively detecting in the image the best match with the elements of a dictionary and then subtracting it—and which may similarly define a visual spike code. In particular, this transform could be used with large and arbitrary dictionaries, so that we may define an over-complete representation which may define an efficient sparse spike coding scheme in arbitrary multi-layered architectures. We show here extensions of this method of computing with spike events, introducing an adaptive scheme leading to the emergence of V1-like receptive fields and then a model of bottom-up saliency pursuit.},
language = {en},
urldate = {2022-12-13},
journal = {Neurocomputing},
author = {Perrinet, Laurent and Samuelides, Manuel and Thorpe, Simon},
month = mar,
year = {2004},
keywords = {\#nosource, Natural images statistics, Parallel asynchronous processing, Sparse coding, Ultra-rapid categorization, Vision, Wavelet Hansform, association field, assofield, matching, matching pursuit, pursuit, sparse coding, ⛔ No INSPIRE recid found},
pages = {125--134},
}
@article{simoncelli_characterization_2003,
title = {Characterization of {Neural} {Responses} with {Stochastic} {Stimuli}},
url = {http://pillowlab.princeton.edu/pubs/simoncelli03c-preprint.pdf},
language = {en},
author = {Simoncelli, Eero P and Paninski, Liam and Pillow, Jonathan and Schwartz, Odelia},
year = {2003},
keywords = {⛔ No DOI found, ⛔ No INSPIRE recid found},
}
@article{tatler_eye_2011,
title = {Eye guidance in natural vision: {Reinterpreting} salience},
volume = {11},
issn = {1534-7362},
shorttitle = {Eye guidance in natural vision},
url = {https://doi.org/10.1167/11.5.5},
doi = {10.1167/11.5.5},
abstract = {Models of gaze allocation in complex scenes are derived mainly from studies of static picture viewing. The dominant framework to emerge has been image salience, where properties of the stimulus play a crucial role in guiding the eyes. However, salience-based schemes are poor at accounting for many aspects of picture viewing and can fail dramatically in the context of natural task performance. These failures have led to the development of new models of gaze allocation in scene viewing that address a number of these issues. However, models based on the picture-viewing paradigm are unlikely to generalize to a broader range of experimental contexts, because the stimulus context is limited, and the dynamic, task-driven nature of vision is not represented. We argue that there is a need to move away from this class of model and find the principles that govern gaze allocation in a broader range of settings. We outline the major limitations of salience-based selection schemes and highlight what we have learned from studies of gaze allocation in natural vision. Clear principles of selection are found across many instances of natural vision and these are not the principles that might be expected from picture-viewing studies. We discuss the emerging theoretical framework for gaze allocation on the basis of reward maximization and uncertainty reduction.},
number = {5},
urldate = {2022-09-14},
journal = {Journal of Vision},
author = {Tatler, Benjamin W. and Hayhoe, Mary M. and Land, Michael F. and Ballard, Dana H.},
month = may,
year = {2011},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
pages = {5},
}
@article{van_der_stigchel_eye_2006,
title = {Eye movement trajectories and what they tell us},
volume = {30},
issn = {0149-7634},
url = {https://www.sciencedirect.com/science/article/pii/S0149763405001740},
doi = {10.1016/j.neubiorev.2005.12.001},
abstract = {In the last two decades, research has shown that eye movement trajectories can be modified by situational determinants. These modifications can inform us about the mechanisms that control eye movements and they can yield information about the oculomotor, memory and attention system that is not easily obtained via other sources. Eye movement trajectories can deviate either towards or away from elements in the visual field. We review the conditions in which these deviations are found and the mechanisms underlying trajectory deviations. It is argued that deviations towards an element are caused by the unresolved competition in the oculomotor system between elements in a visual scene. Deviations away from an element are mainly observed in situations in which top-down preparation can influence the target selection process, but the exact cause of such deviations remains unclear.},
language = {en},
number = {5},
urldate = {2022-09-13},
journal = {Neuroscience \& Biobehavioral Reviews},
author = {Van der Stigchel, Stefan and Meeter, Martijn and Theeuwes, Jan},
month = jan,
year = {2006},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
pages = {666--679},
}
@article{abeles_role_1982,
title = {Role of the cortical neuron: integrator or coincidence detector?},
volume = {18},
number = {1},
journal = {Israel journal of medical sciences},
author = {Abeles, Moshe},
year = {1982},
keywords = {\#nosource, ⛔ No DOI found, ⛔ No INSPIRE recid found},
pages = {83--92},
}
@article{perrinet_active_2014,
title = {Active inference, eye movements and oculomotor delays},
volume = {108},
copyright = {All rights reserved},
issn = {1432-0770},
url = {https://doi.org/10.1007/s00422-014-0620-8},
doi = {10.1007/s00422-014-0620-8},
abstract = {This paper considers the problem of sensorimotor delays in the optimal control of (smooth) eye movements under uncertainty. Specifically, we consider delays in the visuo-oculomotor loop and their implications for active inference. Active inference uses a generalisation of Kalman filtering to provide Bayes optimal estimates of hidden states and action in generalised coordinates of motion. Representing hidden states in generalised coordinates provides a simple way of compensating for both sensory and oculomotor delays. The efficacy of this scheme is illustrated using neuronal simulations of pursuit initiation responses, with and without compensation. We then consider an extension of the generative model to simulate smooth pursuit eye movements in which the visuo-oculomotor system believes both the target and its centre of gaze are attracted to a (hidden) point moving in the visual field. Finally, the generative model is equipped with a hierarchical structure, so that it can recognise and remember unseen (occluded) trajectories and emit anticipatory responses. These simulations speak to a straightforward and neurobiologically plausible solution to the generic problem of integrating information from different sources with different temporal delays and the particular difficulties encountered when a system, like the oculomotor system, tries to control its environment with delayed signals.},
number = {6},
journal = {Biological Cybernetics},
author = {Perrinet, Laurent U and Adams, Rick A and Friston, Karl J},
month = dec,
year = {2014},
keywords = {\#nosource, Active inference, Bayesian model, Biologically Inspired Computer vision, Generalised coordinates, Oculomotor delays, Smooth pursuit eye movements, Tracking eye movements, Variational free energy, active inference, active-inference, bayesian, bicv-motion, bicv-sparse, delays, eye, eye movements, eye-movements, free energy, free-energy, generalized-coordinates, generalized-filtering, motion detection, oculomotor, perception, perrinetadamsfriston14, smooth-pursuit, tracking-eye-movements, variational-filtering, ⛔ No INSPIRE recid found},
pages = {777--801},
}
@article{davis_spontaneous_2021,
title = {Spontaneous traveling waves naturally emerge from horizontal fiber time delays and travel through locally asynchronous-irregular states},
volume = {12},
doi = {10.1038/s41467-021-26175-1},
number = {1},
journal = {Nature Communications},
author = {Davis, Zachary W and Benigno, Gabriel B and Fletterman, Charlee and Desbordes, Theo and Steward, Christopher and Sejnowski, Terrence J and H Reynolds, John and Muller, Lyle},
year = {2021},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
pages = {1--16},
}
@article{bohte_evidence_2004,
title = {The evidence for neural information processing with precise spike-times: {A} survey},
volume = {3},
doi = {10.1023/B:NACO.0000027755.02868.60},
number = {2},
journal = {Natural Computing},
author = {Bohte, Sander M},
year = {2004},
keywords = {\#nosource, neural coding, precise spike timing, spiking neural networks, synchrony coding, temporal coding, ⛔ No INSPIRE recid found},
pages = {195--206},
}
@article{riehle_spike_1997,
title = {Spike synchronization and rate modulation differentially involved in motor cortical function},
volume = {278},
doi = {10.1126/science.278.5345.1950},
number = {5345},
journal = {Science (New York, N.Y.)},
author = {Riehle, Alexa and Grun, Sonja and Diesmann, Markus and Aertsen, Ad},
year = {1997},
note = {Publisher: American Association for the Advancement of Science},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
pages = {1950--1953},
}
@article{isbister_clustering_2021,
title = {Clustering and control for adaptation uncovers time-warped spike time patterns in cortical networks in vivo},
volume = {11},
copyright = {2021 The Author(s)},
issn = {2045-2322},
url = {https://www.nature.com/articles/s41598-021-94002-0},
doi = {10.1038/s41598-021-94002-0},
abstract = {How information in the nervous system is encoded by patterns of action potentials (i.e. spikes) remains an open question. Multi-neuron patterns of single spikes are a prime candidate for spike time encoding but their temporal variability requires further characterisation. Here we show how known sources of spike count variability affect stimulus-evoked spike time patterns between neurons separated over multiple layers and columns of adult rat somatosensory cortex in vivo. On subsets of trials (clusters) and after controlling for stimulus-response adaptation, spike time differences between pairs of neurons are “time-warped” (compressed/stretched) by trial-to-trial changes in shared excitability, explaining why fixed spike time patterns and noise correlations are seldom reported. We show that predicted cortical state is correlated between groups of 4 neurons, introducing the possibility of spike time pattern modulation by population-wide trial-to-trial changes in excitability (i.e. cortical state). Under the assumption of state-dependent coding, we propose an improved potential encoding capacity.},
language = {en},
number = {1},
urldate = {2022-10-06},
journal = {Scientific Reports},
author = {Isbister, James B. and Reyes-Puerta, Vicente and Sun, Jyh-Jang and Horenko, Illia and Luhmann, Heiko J.},
month = jul,
year = {2021},
note = {Number: 1
Publisher: Nature Publishing Group},
keywords = {⛔ No INSPIRE recid found},
pages = {15066},
}
@article{wang_delay_2019,
title = {A {Delay} {Learning} {Algorithm} {Based} on {Spike} {Train} {Kernels} for {Spiking} {Neurons}},
volume = {13},
issn = {1662-453X},
url = {https://www.frontiersin.org/articles/10.3389/fnins.2019.00252},
abstract = {Neuroscience research confirms that the synaptic delays are not constant, but can be modulated. This paper proposes a supervised delay learning algorithm for spiking neurons with temporal encoding, in which both the weight and delay of a synaptic connection can be adjusted to enhance the learning performance. The proposed algorithm firstly defines spike train kernels to transform discrete spike trains during the learning phase into continuous analog signals so that common mathematical operations can be performed on them, and then deduces the supervised learning rules of synaptic weights and delays by gradient descent method. The proposed algorithm is successfully applied to various spike train learning tasks, and the effects of parameters of synaptic delays are analyzed in detail. Experimental results show that the network with dynamic delays achieves higher learning accuracy and less learning epochs than the network with static delays. The delay learning algorithm is further validated on a practical example of an image classification problem. The results again show that it can achieve a good classification performance with a proper receptive field. Therefore, the synaptic delay learning is significant for practical applications and theoretical researches of spiking neural networks.},
urldate = {2022-10-04},
journal = {Frontiers in Neuroscience},
author = {Wang, Xiangwen and Lin, Xianghong and Dang, Xiaochao},
year = {2019},
keywords = {\#nosource, ⛔ No DOI found, ⛔ No INSPIRE recid found},
}
@article{wang_neuromorphic_2015,
title = {A neuromorphic implementation of multiple spike-timing synaptic plasticity rules for large-scale neural networks},
volume = {9},
issn = {1662-453X},
url = {https://www.frontiersin.org/articles/10.3389/fnins.2015.00180},
doi = {10.3389/fnins.2015.00180},
abstract = {We present a neuromorphic implementation of multiple synaptic plasticity learning rules, which include both Spike Timing Dependent Plasticity (STDP) and Spike Timing Dependent Delay Plasticity (STDDP). We present a fully digital implementation as well as a mixed-signal implementation, both of which use a novel dynamic-assignment time-multiplexing approach and support up to 226 (64M) synaptic plasticity elements. Rather than implementing dedicated synapses for particular types of synaptic plasticity, we implemented a more generic synaptic plasticity adaptor array that is separate from the neurons in the neural network. Each adaptor performs synaptic plasticity according to the arrival times of the pre- and post-synaptic spikes assigned to it, and sends out a weighted or delayed pre-synaptic spike to the post-synaptic neuron in the neural network. This strategy provides great flexibility for building complex large-scale neural networks, as a neural network can be configured for multiple synaptic plasticity rules without changing its structure. We validate the proposed neuromorphic implementations with measurement results and illustrate that the circuits are capable of performing both STDP and STDDP. We argue that it is practical to scale the work presented here up to 236 (64G) synaptic adaptors on a current high-end FPGA platform.},
urldate = {2022-10-06},
journal = {Frontiers in Neuroscience},
author = {Wang, Runchun M. and Hamilton, Tara J. and Tapson, Jonathan C. and van Schaik, André},
year = {2015},
keywords = {⛔ No INSPIRE recid found},
}
@article{benvenuti_anticipatory_2020,
title = {Anticipatory responses along motion trajectories in awake monkey area {V1}},
copyright = {All rights reserved},
url = {https://www.biorxiv.org/content/10.1101/2020.03.26.010017v1},
doi = {10.1101/2020.03.26.010017},
abstract = {What are the neural mechanisms underlying motion integration of translating objects? Visual motion integration is generally conceived of as a feedforward, hierarchical, information processing. However, feedforward models fail to account for many contextual effects revealed using natural moving stimuli. In particular, a translating object evokes a sequence of transient feedforward responses in the primary visual cortex but also propagations of activity through horizontal and feedback pathways. We investigated how these pathways shape the representation of a translating bar in monkey V1. We show that, for long trajectories, spiking activity builds-up hundreds of milliseconds before the bar enters the neurons receptive fields. Using VSDI and LFP recordings guided by a phenomenological model of propagation dynamics, we demonstrate that this anticipatory response arises from the interplay between horizontal and feedback networks driving V1 neurons well ahead of their feedforward inputs. This mechanism could subtend several perceptual contextual effects observed with translating objects.},
language = {english},
urldate = {2020-03-31},
journal = {bioRxiv : the preprint server for biology},
author = {Benvenuti, Giacomo and Chemla, Sandrine and Boonman, Arjan and Perrinet, Laurent U and Masson, Guillaume S and Chavane, Frederic},
month = mar,
year = {2020},
keywords = {\#nosource, ⛔ No INSPIRE recid found},