@STRING{colt93 = "Proceedings of the Sixth Annual ACM Conference on
Computational Learning Theory" }
@STRING{jacm = "Journal of the Association for Computing Machinery" }
@STRING{jma = "Journal of Multivariate Analysis" }
@STRING{lncs = "Lecture Notes of Computer Science, Springer" }
@STRING{lncs = "Lecture Notes in Computer Science" }
@STRING{nips = "Advances in Neural Information Processing Systems" }
@STRING{pecs = "Colloquia Mathematica Societatis Janos Bolai, 57.\ Limit
Theorem in Probability and Statistics, Pecs (Hungary)" }
@STRING{spl = "Statistics \& Probability Letters" }
@Article{SubramoneyETAL:2021,
title = {Revisiting the role of synaptic plasticity and network
dynamics for fast learning in spiking neural networks},
author = {Subramoney, Anand and Bellec, Guillaume and Scherr, Franz
and Legenstein, Robert and Maass, Wolfgang},
journal = {BioRxiv},
year = {2021},
htmlnote = {(Link to PDF)}
}
@Article{RaoETAL:2021,
author = {A. Rao and R. Legenstein and A. Subramoney and W. Maass},
title = {A normative framework for learning top-down predictions
through synaptic plasticity in apical dendrites},
journal = {BioRxiv/2021/433822},
year = {2021},
optkey = {},
optvolume = {},
optnumber = {},
optpages = {},
optmonth = {},
optnote = {},
optannote = {},
abstract = {Predictive coding has been identified as a key aspect of
computation and learning in cortical microcircuits. But we
do not know how synaptic plasticity processes install and
maintain predictive coding capabilites in these neural
circuits. Predictions are inherently uncertain, and
learning rules that aim at discriminating linearly
separable classes of inputs - such as the perceptron
learning rule - do not perform well if the goal is learning
to predict. We show that experimental data on synaptic
plasticity in apical dendrites of pyramidal cells support
another learning rule that is suitable for learning to
predict. More precisely, it enables a spike-based
approximation to logistic regression, a well-known gold
standard for probabilistic prediction. We also show that
data-based interactions between apical dendrites support
learning of predictions for more complex probability
distributions than those that can be handled by single
dendrites. The resulting learning theory for top-down
inputs to pyramidal cells provides a normative framework
for evaluating experimental data, and suggests further
experiments for tracking the emergence of predictive coding
through synaptic plasticity in apical dendrites.}
}
@Article{Maass:2021,
author = {F. Zenke and S. M. Boht\'{e} and C. Clopath and I. M.
Com\c{s}a and J. G\"{o}ltz and W. Maass and T. Masquelier
and R. Naud and E. O. Neftci and M. A. Petrovici and F.
Scherr and D. F. M. Goodman},
title = {Visualizing a joint future of neuroscience and
neuromorphic engineering},
journal = {Neuron},
volume = {109},
number = {4},
pages = {571--575},
year = {2021},
publisher = {},
abstract = {Recent research resolves the challenging problem of
building biophysically plausible spiking neural models that
are also capable of complex information processing. This
advance creates new opportunities in neuroscience and
neuromorphic engineering, which we discussed at an online
focus meeting.},
note = {},
htmlnote = {(Link to PDF)}
}
@Article{ScherrMaass:2021,
author = {Franz Scherr and W. Maass},
title = {Learning-to-Learn for Neuromorphic Hardware},
journal = {to appear in Neuromorphic Computing and Engineering},
volume = {},
pages = {},
year = {2021},
publisher = {IOS Publishing},
note = {Draft of a contribution to the Roadmap on Neuromorphic
Computing and Engineering},
htmlnote = {}
}
@Article{ScheerETAL:20,
author = {F. Scherr and C. Stoeckl and W. Maass},
title = {One-shot learning with spiking neural networks},
journal = {bioRxiv},
year = {2020},
volume = {},
number = {},
pages = {},
note = {},
abstract = {Understanding how one-shot learning can be accomplished
through synaptic plasticity in neural networks of the brain
is a major open problem. We propose that approximations to
BPTT in recurrent networks of spiking neurons (RSNNs) such
as e-prop cannot achieve this because their local synaptic
plasticity is gated by learning signals that are rather ad
hoc from a biological perspective: Random projections of
instantaneously arising losses at the network outputs,
analogously as in Broadcast Alignment for feedforward
networks. In contrast, synaptic plasticity is gated in the
brain by learning signals such as dopamine, which are
emitted by specialized brain areas, e.g. VTA. These brain
areas have arguably been optimized by evolution to gate
synaptic plasticity in such a way that fast learning of
survival-relevant tasks is enabled. We found that a
corresponding model architecture, where learning signals
are emitted by a seperate RSNN that is optimized to
facilitate fast learning, enables one-shot learning via
local synaptic plasticity in RSNNs for large families of
learning tasks. The same learning approach also supports
fast spike-based learning of posterior probabilities of
potential input sources, thereby providing a new basis for
probabilistic reasoning in RSNNs. Our new learning approach
also solves an open problem in neuromorphic engineering,
where on-chip one-shot learning capability is higly
desirable for spike-based neuromorphic devices, but could
so far not be achieved. Our method can easily be mapped
into neuromorphic hardware, and thereby solves this
problem.},
htmlnote = {(Supplementary material PDF)}
}
@Article{SalajETAL:20,
author = {D. Salaj and A. Subramoney and C. Krai{\v{s}}nikovi{\'{c}}
and G. Bellec and R. Legenstein and W. Maass},
title = {Spike-frequency adaptation supports network computations
on temporarlly dispersed information},
journal = {bioRxiv},
year = {2020},
volume = {},
number = {},
pages = {},
note = {},
abstract = {For solving tasks such as recognizing a song, answering a
question, or inverting a sequence of symbols, cortical
microcircuits need to integrate and manipulate information
that was dispersed over time during the preceding seconds.
Creating biologically realistic models for the underlying
computations, especially with spiking neurons and for
behaviorally relevant integration time spans, is
notoriously difficult. We examine the role of spike
frequency adaptation in such computations and find that it
has a surprisingly large impact. The inclusion of this well
known property of a substantial fraction of neurons in the
neocortex especially in higher areas of the human neocortex
moves the performance of spiking neural network models for
computations on network inputs that are temporally
dispersed from a fairly low level up to the performance
level of the human brain.},
htmlnote = {(Link to bioRxiv version PDF)}
}
@Article{MuellerLegenstein:20,
author = {M. G. M\"uller and C. H. Papadimitriou and W. Maass and R.
Legenstein},
title = {A model for structured information representation in
neural networks of the brain},
journal = {eNeuro},
year = {2020},
volume = {7},
number = {3},
pages = {},
note = {},
abstract = {Humans can reason at an abstract level and structure
information into abstract categories, but the underlying
neural processes have remained unknown. Recent experimental
data provide the hint that this is likely to involve
specific subareas of the brain from which structural
information can be decoded. Based on this data, we
introduce the concept of assembly projections, a general
principle for attaching structural information to content
in generic networks of spiking neurons. According to the
assembly projections principle, structure-encoding
assemblies emerge and are dynamically attached to content
representations through {H}ebbian plasticity mechanism.
This model provides the basis for explaining a number of
experimental data and provides a basis for modeling
abstract computational operations of the brain.},
htmlnote = {(Journal iink to PDF)}
}
@Article{PapadimitriouMaass:19,
author = {Papadimitriou, C. and Vempala, S. and Mitropolsky, D. and
Collins, M. and Maass, W.},
title = {Brain computation by assemblies of neurons.},
journal = {PNAS},
year = {2020},
volume = {117},
number = {25},
pages = {14464--14472},
note = {},
abstract = {Assemblies are large populations of neurons believed to
imprint memories, concepts, words and other cognitive
information. We identify a repertoire of operations on
assemblies. These operations correspond to properties of
assemblies observed in experiments, and can be shown,
analytically and through simulations, to be realizable by
generic, randomly connected populations of neurons with
{H}ebbian plasticity and inhibition. Operations on
assemblies include: projection (duplicating an assembly by
creating a new assembly in a downstream brain area);
reciprocal projection (a variant of projection also
entailing synaptic connectivity from the newly created
assembly to the original one); association (increasing the
overlap of two assemblies in the same brain area to reflect
cooccurrence or similarity of the corresponding concepts);
merge (creating a new assembly with ample synaptic
connectivity to and from two existing ones); and
pattern-completion (firing of an assembly, with some
probability, in response to the firing of some but not all
of its neurons). Our analytical results establishing the
plausibility of these operations are proved in a simplified
mathematical model of cortex: a finite set of brain areas
each containing $n$ excitatory neurons, with random
connectivity that is both recurrent (within an area) and
afferent (between areas). Within one area and at any time,
only $k$ of the $n$ neurons fire an assumption that models
inhibition and serves to define both assemblies and areas
while synaptic weights are modified by {H}ebbian
plasticity, as well as homeostasis. Importantly, all neural
apparatus needed for the functionality of the assembly
operations is created on the flythrough the randomness of
the synaptic network, the selection of the $k$ neurons with
the highest synaptic input, and {H}ebbian plasticity,
without any special neural circuits assumed to be inplace.
Assemblies and their operations constitute a computational
model of the brain which we call the {Assembly Calculus},
occupying a level of detail intermediate between the level
of spiking neurons and synapses, and that of the whole
brain. As with high-level programming languages, a
computation in the {Assembly Calculus} (that is, a coherent
sequence of assembly operations accomplishing a task) can
ultimately be reduced ``compiled down'' to computation by
neurons and synapses; however, it would be far more
cumbersome and opaque to represent the same computation
that way. The resulting computational system can be shown,
under assumptions, to be in principle capable of carrying
out arbitrary computations. We hypothesize that something
like it may underlie higher human cognitive functions such
as reasoning, planning,and language. In particular, we
propose a plausible brain architecture based on assemblies
for implementing the syntactic processing of language in
cortex, which is consistent with recent experimental
results.},
htmlnote = {(Link to journal version PDF),(Draft
on biorxiv PDF)}
}
@Article{StoecklMaass:20,
author = {C. Stoeckl and W. Maass},
title = {Optimized spiking neurons can classify images with high
accuracy through temporal coding with two spikes},
journal = {Nature Machine Intelligence},
year = {2021},
volume = {3},
number = {},
pages = {230--238},
note = {Draft on arXiv},
abstract = {Spike-based neuromorphic hardware promises to reduce the
energy consumption of image classification and other deep
learning applications, particularly on mobile phones and
other edge devices. However direct training of deep spiking
neural networks is difficult, and previous methods for
converting trained artificial neural networks to spiking
neurons were inefficient because the neurons had to emit
too many spikes. We show that a substantially more
efficient conversion arises when one optimizes the spiking
neuron model for that purpose, so that it not only matters
for information transmission how many spikes a neuron
emits, but also when it emits those spikes. This advances
the accuracy that can be achieved for image classification
with spiking neurons, and the resulting networks need on
average just two spikes per neuron for classifying an
image. In addition, our new conversion method drastically
improves latency and throughput of the resulting spiking
networks.},
htmlnote = {Link to journal version PDF),(Link
to arXiv version PDF)}
}
@Article{StoecklMaass:19,
author = {C. Stoeckl and W. Maass},
title = {Recognizing Images with at most one Spike per Neuron},
journal = {arXiv:2001.01682v3},
year = {2019},
volume = {},
number = {},
pages = {},
note = {},
abstract = {In order to port the performance of trained artificial
neural networks (ANNs) to spiking neural networks (SNNs),
which can be implemented in neuromorphic hardware with a
drastically reduced energy consumption, an efficient ANN to
SNN conversion is needed. Previous conversion schemes
focused on the representation of the analog output of a
rectified linear (ReLU) gate in the ANN by the firing rate
of a spiking neuron. But this is not possible for other
commonly used ANN gates, and it reduces the throughput even
for ReLU gates. We introduce a new conversion method where
a gate in the ANN, which can basically be of any type, is
emulated by a small circuit of spiking neurons, with At
Most One Spike (AMOS) per neuron. We show that this AMOS
conversion improves the accuracy of SNNs for ImageNet from
74.60\% to 80.97\%, thereby bringing it within reach of the
best available ANN accuracy (85.0\%). The Top5 accuracy of
SNNs is raised to 95.82\%, getting even closer to the best
Top5 performance of 97.2\% for ANNs. In addition, AMOS
conversion improves latency and throughput of spike-based
image classification by several orders of magnitude. Hence
these results suggest that SNNs provide a viable direction
for developing highly energy efficient hardware for AI that
combines high performance with versatility of applications.},
htmlnote = {(Link to arXiv version PDF)}
}
@InCollection{MaassETAL:19,
title = {Brain computation: a computer science perspective},
author = {W. Maass and C.H. Papadimitriou and S. Vempala and R.
Legenstein},
booktitle = {Computing and Software Science (LNCS 10000)},
pages = {184--199},
year = {2019},
publisher = {Springer},
abstract = {The brain carries out tasks that are very demanding from a
computational perspective, apparently powered by a mere 20
Watts. This fact has intrigued computer scientists for many
decades, and is currently drawing many of them to the quest
of acquiring a computational understanding of the brain.
Yet, at present there is no productive interaction of
computer scientists with neuroscientists in this quest.
Research in computational neuroscience is advancing at a
rapid pace, and the resulting abundance of facts and models
makes it increasingly difficult for scientists from other
fields to engage in brain research. The goal of this
article is to provide --- along with a few words of caution
--- background, up-to-date references on data and models in
neuroscience, and open problems that appear to provide good
opportunities for theoretical computer scientists to enter
the fascinating field of brain computation.},
htmlnote = {(Journal link to PDF)}
}
@InCollection{SubramoneyETAL:19,
author = {A. Subramoney and F. Scherr and W. Maass},
title = {Reservoirs learn to learn},
editor = {Nakajima, K. and Fischer, I.},
booktitle = {Reservoir Computing: Theory, Physical Implementations, and
Applications},
publisher = {Springer },
year = {2020},
pages = {},
chapter = {},
note = {},
abstract = {The common procedure in reservoir computing is to take a
"found" reservoir, such as a recurrent neural network with
randomly chosen synaptic weights or a complex physical
device, and to adapt the weights of linear readouts from
this reservoir for a particular computing task. We address
the question of whether the performance of reservoir
computing can be significantly enhanced if one instead
optimizes some (hyper)parameters of the reservoir, not for
a single task but for the range of all possible tasks in
which one is potentially interested, before the weights of
linear readouts are optimized for a particular computing
task. After all, networks of neurons in the brain are also
known to be not randomly connected. Rather, their structure
and parameters emerge from complex evolutionary and
developmental processes, arguably in a way that enhances
speed and accuracy of subsequent learning of any concrete
task that is likely to be essential for the survival of the
organism. We apply the Learning-to-Learn (L2L) paradigm to
mimick this two-tier process, where a set of
(hyper)parameters of the reservoir are optimized for a
whole family of learning tasks. We found that this
substantially enhances the performance of reservoir
computing for the families of tasks that we considered.
Furthermore, L2L enables a new form of reservoir learning
that tends to enable even faster learning, where not even
the weights of readouts need to be adjusted for learning a
concrete task. We present demos and performance results of
these new forms of reservoir computing for reservoirs that
consist of networks of spiking neurons, and are hence of
particular interest from the perspective of neuroscience
and implementations in spike-based neuromorphic hardware.
We leave it as an open question what performance advantage
the new methods that we propose provide for other types of
reservoirs.},
htmlnote = {Draft on arXiv:1909.07486v1}
}
@Article{KaiserlETAL:19,
author = {J. Kaiser and M. Hoff and A. Konle and J. C. V. Tieck and
D. Kappel and D. Reichard and A. Subramoney and R.
Legenstein and A. Roennau and W. Maass and R. Dillmann},
title = {Embodied Synaptic Plasticity with Online Reinforcement
Learning},
journal = {Frontiers in Neurorobotics},
year = {2019},
volume = {13},
number = {81},
pages = {},
abstract = {The endeavor to understand the brain involves multiple
collaborating research fields. Classically, synaptic
plasticity rules derived by theoretical neuroscientists are
evaluated in isolation on pattern classification tasks.
This contrasts with the biological brain which purpose is
to control a body in closed-loop. This paper contributes to
bringing the fields of computational neuroscience and
robotics closer together by integrating open-source
software components from these two fields. The resulting
framework allows to evaluate the validity of
biologically-plausibe plasticity models in closed-loop
robotics environments. We demonstrate this framework to
evaluate Synaptic Plasticity with Online REinforcement
learning (SPORE), a reward-learning rule based on synaptic
sampling, on two visuomotor tasks: reaching and lane
following. We show that SPORE is capable of learning to
perform policies within the course of simulated hours for
both tasks. Provisional parameter explorations indicate
that the learning rate and the temperature driving the
stochastic processes that govern synaptic learning dynamics
need to be regulated for performance improvements to be
retained. We conclude by discussing the recent deep
reinforcement learning techniques which would be beneficial
to increase the functionality of SPORE on visuomotor
tasks.},
htmlnote = {(Journal link to PDF)}
}
@Article{BellecETAL:19b,
author = {G. Bellec and F. Scherr and A. Subramoney and E. Hajek and
D. Salaj and R. Legenstein and W. Maass},
title = {A solution to the learning dilemma for recurrent networks
of spiking neurons},
journal = {Nature Communications},
year = {2020},
volume = {11},
number = {},
pages = {3625},
note = {},
abstract = {Recurrently connected networks of spiking neurons underlie
the astounding information processing capabilities of the
brain. But in spite of extensive research, it has remained
open how they can learn through synaptic plasticity to
carry out complex network computations. We argue that two
pieces of this puzzle were provided by experimental data
from neuroscience. A new mathematical insight tells us how
theses pieces need to be combined to enable biologically
plausible online network learning through gradient descent,
in particular deep reinforcement learning. This new
learning method - called {\textit{e-prop}} - approaches the
performance of {\textit{BPTT}} (backpropagation through
time), the best known method for training recurrent neural
networks in machine learning. But in contrast to BPTT,
e-prop is biologically plausible. In addition, it suggests
a method for powerful on-chip learning in novel
energy-efficient spike-based hardware for AI.},
htmlnote = {Supplementary material PDF,
Supplementary movies PDF,
(Commentary by Manneschi, L. & Vasilaki, E. (2020). An
alternative to backpropagation through time. In Nature
Machine Intelligence, 2(3), 155--156. PDF)}
}
@Article{BohnstinglETAL:19,
author = {T. Bohnstingl and F. Scherr and C. Pehle and K. Meier and
W. Maass},
title = {Neuromorphic Hardware learns to learn},
journal = {Frontiers in Neuroscience},
year = {2019},
volume = {13},
number = {},
pages = {483},
note = {},
abstract = {Hyperparameters and learning algorithms for neuromorphic
hardware are usually chosen by hand to suit a particular
task. In contrast, networks of neurons in the brain were
optimized through extensive evolutionary and developmental
processes to work well on a range of computing and learning
tasks. Occasionally this process has been emulated through
genetic algorithms, but these require themselves
hand-design of their details and tend to provide a limited
range of improvements. We employ instead other powerful
gradient-free optimization tools, such as cross-entropy
methods and evolutionary strategies, in order to port the
function of biological optimization processes to
neuromorphic hardware. As an example, we show these
optimization algorithms enable neuromorphic agents to learn
very efficiently from rewards. In particular,
meta-plasticity, i.e., the optimization of the learning
rule which they use, substantially enhances reward-based
learning capability of the hardware. In addition, we
demonstrate for the first time Learning-to-Learn benefits
from such hardware, in particular, the capability to
extract abstract knowledge from prior learning experiences
that speeds up the learning of new but related tasks.
Learning-to-Learn is especially suited for accelerated
neuromorphic hardware, since it makes it feasible to carry
out the required very large number of network
computations.},
htmlnote = {}
}
@Article{YanETAL:19,
author = {Y. {Yan} and D. {Kappel} and F. {Neumärker} and J.
{Partzsch} and B. {Vogginger} and S. {Höppner} and S.
{Furber} and W. {Maass} and R. {Legenstein} and C. {Mayr}},
journal = {IEEE Transactions on Biomedical Circuits and Systems},
title = {Efficient Reward-Based Structural Plasticity on a
SpiNNaker 2 Prototype},
year = {2019},
volume = {13},
number = {3},
pages = {579-591},
month = {June}
}
@Article{BellecETAL:19,
author = {G. Bellec and F. Scherr and E. Hajek and D. Salaj and R.
Legenstein and W. Maass},
title = {Biologically inspired alternatives to backpropagation
through time for learning in recurrent neural nets},
journal = {arxiv.org/abs/1901.09049},
year = {January 2019},
volume = {},
number = {},
pages = {},
note = {},
abstract = {The memory requirement of deep learning algorithms is
considered incompatible with the memory restriction of
energy-efficient hardware. A low memory footprint can be
achieved by pruning obsolete connections or reducing the
precision of connection strengths after the network has
been trained. Yet, these techniques are not applicable to
the case when neural networks have to be trained directly
on hardware due to the hard memory constraints. Deep
rewiring (DEEP R) is a training algorithm which
continuously rewires the network while preserving very
sparse connectivity all along the training procedure. We
apply DEEP R to a deep neural network implementation on a
prototype chip of the 2nd generation SpiNNaker system. The
local memory of a single core on this chip is limited to 64
KB and a deep network architecture is trained entirely
within this constraint without the use of external memory.
Throughout training, the proportion of active connections
is limited to 1.3\%. On the handwritten digits dataset
MNIST, this extremely sparse network achives 96.6\%
classification accuracy at convergence. Utilizing the
multi-processor feature of the SpiNNaker system, we found
very good scaling in terms of computation time, per-core
memory consumption, and energy constraints. When compared
to a X86 CPU implementation, neural network training on the
SpiNNaker 2 prototype improves power and energy consumption
by two orders of magnitude.},
htmlnote = {}
}
@Article{LiuETAL:18,
author = {C. Liu and G. Bellec and B. Vogginger and D. Kappel and J.
Partzsch and F. Neum\"arker and S. H\"oppner and W. Maass
and S. B. Furber and R. Legenstein and C. G. Mayr},
title = {Memory-efficient Deep Learning on a SpiNNaker 2
prototype},
journal = {Frontiers in Neuroscience},
year = {2018},
volume = {},
number = {},
pages = {},
note = {},
abstract = {The memory requirement of deep learning algorithms is
considered incompatible with the memory restriction of
energy-efficient hardware. A low memory footprint can be
achieved by pruning obsolete connections or reducing the
precision of connection strengths after the network has
been trained. Yet, these techniques are not applicable to
the case when neural networks have to be trained directly
on hardware due to the hard memory constraints. Deep
rewiring (DEEP R) is a training algorithm which
continuously rewires the network while preserving very
sparse connectivity all along the training procedure. We
apply DEEP R to a deep neural network implementation on a
prototype chip of the 2nd generation SpiNNaker system. The
local memory of a single core on this chip is limited to 64
KB and a deep network architecture is trained entirely
within this constraint without the use of external memory.
Throughout training, the proportion of active connections
is limited to 1.3\%. On the handwritten digits dataset
MNIST, this extremely sparse network achives 96.6\%
classification accuracy at convergence. Utilizing the
multi-processor feature of the SpiNNaker system, we found
very good scaling in terms of computation time, per-core
memory consumption, and energy constraints. When compared
to a X86 CPU implementation, neural network training on the
SpiNNaker 2 prototype improves power and energy consumption
by two orders of magnitude.},
htmlnote = {}
}
@Article{AnanriETAL:18,
author = {N. Ananri and C. Daskalakis and W. Maass and C. H.
Papadimitriou and A. Saberi and S. Vempala},
title = {Smoothed Analysis of Discrete Tensor Decomposition and
Assemblies of Neurons},
journal = {32nd Conference on Neural Information Processing Systems
(NIPS 2018), Montreal, Canada},
year = {2018},
volume = {},
number = {},
pages = {},
note = {},
abstract = {We analyze linear independence of rank one tensors
produced by tensor powers of randomly perturbed vectors.
This enables efficient decomposition of sums of high-order
tensors. Our analysis builds upon Bhaskara et al. [3] but
allows for a wider range of perturbation models, including
discrete ones. We give an application to recovering
assemblies of neurons. Assemblies are large sets of neurons
representing specific memories or concepts. The size of the
intersection of two assemblies has been shown in
experiments to represent the extent to which these memories
co-occur or these concepts are related; the phenomenon is
called association of assemblies. This suggests that an
animal's memory is a complex web of associations, and poses
the problem of recovering this representation from
cognitive data. Motivated by this problem, we study the
following more general question: {C}an we reconstruct the
{V}enn diagram of a family of sets, given the sizes of
their l-wise intersections? We show that as long as the
family of sets is randomly perturbed, it is enough for the
number of measurements to be polynomially larger than the
number of nonempty regions of the {V}enn diagram to fully
reconstruct the diagram.}
}
@Article{BellecETAL:18,
author = {G. Bellec and D. Salaj and A. Subramoney and R. Legenstein
and W. Maass },
title = {Long short-term memory and Learning-to-learn in networks
of spiking neurons},
journal = {32nd Conference on Neural Information Processing Systems
(NIPS 2018), Montreal, Canada},
year = {2018},
volume = {},
number = {},
pages = {},
note = {},
abstract = {The brain carries out demanding computations and learning
processes with recurrent networks of spiking neurons
(RSNNs). But computing and learning capabilities of
currently available RSNN models have remained poor,
especially in comparison with the performance of recurrent
networks of artificial neurons, such as Long Short-Term
Memory (LSTM) networks. in this article, we investigate
whether deep learning can improve RSNN performance. We
applied backpropagation through time (BPTT), augmented by
biologically inspired heuristics for synaptic rewiring, to
RSNNs whose inherent time constants were enriched through
simple models for adapting spiking neurons. We found that
the resulting RSNNs approximate, for the first time, the
computational power of LSTM networks on two common
benchmark tasks. Furthermore, our results show that recent
successes with applications of Learning-to-Learn (L2L) to
LSTM networks can be ported to RSNNs. This opens the door
to the investigation of L2L in data-based models for neural
networks of the brain, whose activitiy can - unlike that of
LSTM networks - be compared directly with recordings from
neurons in the brain. In particular, L2L shows that RSNNs
can learn large families of non-linear transformations from
very few examples, using previously unknown network
learning mechanisms. Furthermore, meta-reinforcement
learning (meta-RL) shows that LSNNs can learn and execute
complex exploration and exploitation strategies.}
}
@Article{LegensteinETAL:18,
author = {R. Legenstein and W. Maass and C. H. Papadimitriou and S.
S. Vempala},
title = {Long term memory and the densest {K}-subgraph problem},
journal = {In Proc. of Innovations in Theoretical Computer Science
(ITCS)},
year = {2018},
volume = {},
number = {},
pages = {},
note = {},
abstract = {In a recent experiment [9], a cell in the human medial
temporal lobe {(MTL)} encoding one sensory stimulus starts
to also respond to a second stimulus following a combined
experience associating the two. We develop a theoretical
model predicting that an assembly of cells with
exceptionally high synaptic intraconnectivity can emerge,
in response to a particular sensory experience, to encode
and abstract that experience. We also show that two such
assemblies are modified to increase their intersection
after a sensory event that associates the two corresponding
stimuli. The main technical tools employed are random graph
theory, and {B}ernoulli approximations. Assembly creation
must overcome a computational challenge akin to the
{Densest K-Subgraph} problem, namely selecting, from a
large population of randomly and sparsely interconnected
cells, a subset with exceptionally high density of
interconnections. We identify three mechanisms that help
achieve this feat in our model: (1) a simple two-stage
randomized algorithm, and (2) the "triangle completion
bias" in synaptic connectivity [14] and a "birthday
paradox", while (3) the strength of these connections is
enhanced through {H}ebbian plasticity.}
}
@Article{BellecETAL:17,
author = {G. Bellec and D. Kappel and W. Maass and R. Legenstein},
title = {Deep rewiring: training very sparse deep networks},
journal = {International Conference on Learning Representations
(ICLR)},
year = {2018},
volume = {},
number = {},
pages = {},
abstract = {Neuromorphic hardware tends to pose limits on the
connectivity of deep networks that one can run on them. But
also generic hardware and software implementations of deep
learning run more efficiently on sparse networks. Several
methods exist for pruning connections of a neural network
after it was trained without connectivity constraints. We
present an algorithm, {DEEP R}, that enables us to train
directly a sparsely connected neural network. {DEEP R}
automatically rewires the network during supervised
training so that connections are there where they are most
needed for the task, while its total number is all the time
strictly bounded. We demonstrate that {DEEP R} can be used
to train very sparse feedforward and recurrent neural
networks on standard benchmark tasks with just a minor loss
in performance. {DEEP R} is based on a rigorous theoretical
foundation that views rewiring as stochastic sampling of
network configurations from a posterior.},
htmlnote = {},
note = {}
}
@Article{PokornyETAL:17,
author = {C. Pokorny and M. J. Ison and A. Rao and R. Legenstein and
C. Papadimitriou and W. Maass},
title = {{STDP} forms associations between memory traces in
networks of spiking neurons},
journal = {Cerebral Cortex},
year = {2020},
volume = {30},
number = {3},
pages = {952--968},
abstract = {Memory traces and associations between them are
fundamental for cognitive brain function. Neuron recordings
suggest that distributed assemblies of neurons in the brain
serve as memory traces for spatial information, real-world
items, and concepts. However, there is conflicting evidence
regarding neural codes for associated memory traces. Some
studies suggest the emergence of overlaps between
assemblies during an association, while others suggest that
the assemblies themselves remain largely unchanged and new
assemblies emerge as neural codes for associated memory
items. Here we study the emergence of neural codes for
associated memory items in a generic computational model of
recurrent networks of spiking neurons with a
data-constrained rule for spike-timing- dependent
plasticity. The model depends critically on two parameters,
which control the excitability of neurons and the scale of
initial synaptic weights. By modifying these two
parameters, the model can reproduce both experimental data
from the human brain on the fast formation of associations
through emergent overlaps between assemblies, and rodent
data where new neurons are recruited to encode the
associated memories. Hence, our findings suggest that the
brain can use both of these two neural codes for
associations, and dynamically switch between them during
consolidation.},
htmlnote = {(Supplementary material PDF),
(Journal link to PDF)},
note = {}
}
@Article{LegensteinETAL:17,
author = {R. Legenstein and Z. Jonke and S. Habenschuss and W.
Maass},
title = {A probabilistic model for learning in cortical
microcircuit motifs with data-based divisive inhibition},
journal = {arXiv:1707.05182},
year = {2017},
volume = {},
number = {},
pages = {},
abstract = {Previous theoretical studies on the interaction of
excitatory and inhibitory neurons proposed to model this
cortical microcircuit motif as a so-called Winner-Take-All
(WTA) circuit. A recent modeling study however found that
the WTA model is not adequate for data-based softer forms
of divisive inhibition as found in a microcircuit motif in
cortical layer 2/3. We investigate here through theoretical
analysis the role of such softer divisive inhibition for
the emergence of computational operations and neural codes
under spike-timing dependent plasticity (STDP). We show
that in contrast to WTA models - where the network activity
has been interpreted as probabilistic inference in a
generative mixture distribution - this network dynamics
approximates inference in a noisy-OR-like generative model
that explains the network input based on multiple hidden
causes. Furthermore, we show that STDP optimizes the
parameters of this model by approximating online the
expectation maximization (EM) algorithm. This theoretical
analysis corroborates a preceding modelling study which
suggested that the learning dynamics of this layer 2/3
microcircuit motif extracts a specific modular
representation of the input and thus performs blind source
separation on the input statistics.},
htmlnote = {},
note = {}
}
@Article{JonkeETAL:17,
author = {Z. Jonke and R. Legenstein and S. Habenschuss and W.
Maass},
title = {Feedback inhibition shapes emergent computational
properties of cortical microcircuit motifs},
journal = {Journal of Neuroscience},
year = {2017},
volume = {37},
number = {35},
pages = {8511-8523},
abstract = {Cortical microcircuits are very complex networks, but they
are composed of a relatively small number of stereotypical
motifs. Hence one strategy for throwing light on the
computational function of cortical microcircuits it to
analyze emergent computational properties of these
stereotypical microcircuit motifs. We are addressing here
the question how spike-timing dependent plasticity (STDP)
shapes the computational properties of one motif that has
frequently been studied experimentally: interconnected
populations of pyramidal cells and parvalbumin-positive
inhibitory cells in layer 2/3. Experimental studies suggest
that these inhibitory neurons exert some form of divisive
inhibition on the pyramidal cells. We show that this
data-based form of feedback inhibition, which is softer
than that of winner-take-all models that are commonly
considered in theoretical analyses, contributes to the
emergence of an important computational function through
STDP: {T}he capability to disentangle superimposed firing
patterns in upstream networks, and to represent their
information content through a sparse assembly code.},
htmlnote = {},
note = {}
}
@Article{KappelETAL:17,
author = {D. Kappel and R. Legenstein and S. Habenschuss and M.
Hsieh and W. Maass},
title = {A dynamic connectome supports the emergence of stable
computational function of neural circuits through
reward-based learning},
journal = {eNeuro, 2 April},
year = {2018},
volume = {},
number = {},
pages = {},
abstract = {Synaptic connections between neurons in the brain are
synamic because of continuoisly ongoing spine dynamics,
axonal sprouting, and other processes. In fact, it was
recently shown that the spontaneous synapse-autonomous
component of spine dynamics is at least as large as the
component that depends on the history of pre- and
postsynaptic neural activity. These data are inconsistent
with common models for network plasticity, and raise the
questions how neural circuits can maintain a stable
computational function in spite of these continuoisly
ongoing processes, and what functional uses these ongoing
processes might have. Here, we present a rigorous
theoretical framework for these seemingly stochastic spine
dynamics and rewiring processes in the context of
reward-based learning tasks. We show that spontaneous
synapse-autonomous processes, in combination with rewards
signals such as dopamine, can explain the capability of
networks of neurons in the brain to configure themselves
for specific computational tasks, and to compensate
automatically for later changes in the network or task.
Furthermore we show theoretically and through computer
simulations that stable computational performance is
compatible with continuously ongoing synapse-autonomous
changes. After reaching good computational performance it
causes primarily a slow drift of network architecture and
dynamics in task-irrelevant dimensions, as observed for
neural activity in motor cortex and other areas. On the
more abstract level of reinforcement learning the resulting
model gives rise to an understnading of reward-driven
network plasticity, as continuous sampling of network
configurations.},
htmlnote = {},
note = {}
}
@Article{PetroviciETAL:17,
author = {M. A. Petrovici and S. Schmitt and J. Kl\"ahn and D.
St\"ockel and A. Schroeder and G. Bellec and J. Bill and O.
Breitwieser and I. Bytschok and A. Gr\"ubl and M. G\"uttler
and A. Hartel and S. Hartmann and D. Husmann and K. Husmann
and and S. Jeltsch and V. Karasenko and M. Kleider and C.
Koke and A. Kononov and C. Mauch and P. M\"uller and J.
Partzsch and T. Pfeil and S. Schiefer and S. Scholze and A.
Subramoney and V. Thanasoulis and B. Vogginger and R.
Legenstein and W. Maass and R. Sch\"uffny and C. Mayr and
J. Schemmel and K. Meier},
title = {Pattern representation and recognition with accelerated
analog neuromorphic systems},
journal = {arXiv:1703.06043},
year = {2017},
volume = {},
number = {},
pages = {},
abstract = {Despite being originally inspired by the central nervous
system, artificial neural networks have diverged from their
biological archetypes as they have been remodeled to fit
particular tasks. In this paper, we review several
possibilites to reverse map these architectures to
biologically more realistic spiking networks with the aim
of emulating them on fast, lowpower neuromorphic hardware.
Since many of these devices employ analog components, which
cannot be perfectly controlled, finding ways to compensate
for the resulting effects represents a key challenge. Here,
we discuss three different strategies to address this
problem: the addition of auxiliary network components for
stabilizing activity, the utilization of inherently robust
architectures and a training method for hardwareemulated
networks that functions without perfect knowledge of the
system’s dynamics and parameters. For all three
scenarios, we corroborate our theoretical considerations
with experimental results on accelerated analog
neuromorphic platforms.},
htmlnote = {},
note = {}
}
@InProceedings{SchmittETAL:17,
author = {S. Schmitt and J. Kl\"ahn and G. Bellec and A. Gr\"ubl and
M. G\"uttler and A. Hartel and S. Hartmann and D. Husmann
and K. Husmann and S. Jeltsch and V. Karasenko and M.
Kleider and C. Koke and A. Kononov and C. Mauch and E.
M\"uller and P. M\"uller and J. Partzsch and M. A.
Petroviciy and S. Schiefer and S. Scholze and V.
Thanasoulis and B. Vogginger and R. Legenstein and W. Maass
and C. Mayr and R. Sch\"uffny and J. Schemmel and K. Meier},
title = {Neuromorphic Hardware In The Loop: {T}raining a Deep
Spiking Network on the {BrainScaleS} {W}afer-{S}cale
{S}ystem},
booktitle = {{IEEE} International Joint Conference on Neural Networks
({IJCNN}) 2017},
year = {2017},
volume = {},
number = {},
pages = {2227--2234},
abstract = {Emulating spiking neural networks on analog neuromorphic
hardware offers several advantages over simulating them on
conventional computers, particularly in terms of speed and
energy consumption. However, this usually comes at the cost
of reduced control over the dynamics of the emulated
networks. In this paper, we demonstrate how iterative
training of a hardware-emulated network can compensate for
anomalies induced by the analog substrate. We first convert
a deep neural network trained in software to a spiking
network on the BrainScaleS wafer-scale neuromorphic system,
thereby enabling an acceleration factor of 10 000 compared
to the biological time domain. This mapping is followed by
the in-the-loop training, where in each training step, the
network activity is first recorded in hardware and then
used to compute the parameter updates in software via
backpropagation. An essential finding is that the parameter
updates do not have to be precise, but only need to
approximately follow the correct gradient, which simplifies
the computation of updates. Using this approach, after only
several tens of iterations, the spiking network shows an
accuracy close to the ideal software-emulated prototype.
The presented techniques show that deep spiking networks
emulated on analog neuromorphic devices can attain good
computational performance despite the inherent variations
of the analog substrate.},
htmlnote = {},
note = {}
}
@Article{MaassETAL:17,
author = {W. Maass and C. H. Papadimitriou and S. Vempala and R.
Legenstein},
title = {Brain Computation: A Computer Science Perspective},
journal = {Draft of an invited contribution to Springer Lecture Notes
in Computer Science},
year = {2017},
volume = {vol. 10000},
number = {},
pages = {},
abstract = {The brain carries out tasks that are very demanding from a
computational perspective, apparently powered by a mere 20
Watts. This fact has intrigued computer scientists for many
decades, and is currently drawing many of them to the quest
of acquiring a computational understanding of the brain.
Yet, at present there is no productive interaction of
computer scientists with neuroscientists in this quest.
Research in computational neuroscience is advancing at a
rapid pace, and the resulting abundance of facts and models
makes it increasingly difficult for scientists from other
fields to engage in brain research. The goal of this
article is to provide --- along with a few words of caution
--- background, up-to-date references on data and models in
neuroscience, and open problems that appear to provide good
opportunities for theoretical computer scientists to enter
the fascinating field of brain computation.},
htmlnote = {},
note = {}
}
@Article{LegensteinETAL:16,
author = {R. Legenstein and C. H. Papadimitriou and S. Vempala and
W. Maass},
title = {Assembly pointers for variable binding in networks of
spiking neurons},
journal = {arXiv preprint arXiv:1611.03698},
year = {2016},
volume = {},
number = {},
pages = {},
abstract = {We propose a model for binding of variables such as the
thematic role of a word in a sentence or episode (e.g.,
agent or patient), to concrete fillers (e.g., a word or
concept). Our model is based on recent experimental data
about corresponding processes in the human brain. One
source of information are electrode recordings from the
human brain, which suggest that concepts are represented in
the medial temporal lobe (MTL) through sparse sets of
neurons (assemblies). Another source of information are
fMRI recordings from the human brain, which suggests that
subregions of the temporal cortex are dedicated to the
representation of specific roles (e.g.,subject or object)
of concepts in a sentence or visually presented episode. We
propose that quickly recruited assemblies of neurons in
these subregions act as pointers to previously created
assemblies that represent concepts. We provide a proof of
principle that the resulting model for binding through
assembly pointers can be implemented in networks of spiking
neurons, and supports basic operations of brain
computations, such as structured information retrieval and
copying of information. We also show that salient featrues
of fMRI data on neural activity during structured
information retrieval can be reproduced by the proposed
model.},
htmlnote = {},
note = {}
}
@Article{Maass:16b,
author = {W. Maass},
title = {Energy-efficient neural network chips approach human
recognition capabilities},
journal = {PNAS},
year = {2016},
volume = {113},
number = {40},
pages = {doi/10.1073/pnas.1614109113},
abstract = {},
htmlnote = {},
note = {}
}
@Article{YuETAL:16,
author = {Z. Yu and D. Kappel and R. Legenstein and S. Song and F.
Chen and W. Maass},
title = {{CaMKII} activation supports reward-based neural network
optimization through {H}amiltonian sampling},
journal = {arXiv:1606.00157},
year = {2016},
volume = {},
number = {},
pages = {},
abstract = {Synaptic plasticity is implemented and controlled through
over thousand different types of molecules in the
postsynaptic density and presynaptic boutons that assume a
staggering array of different states through phosporylation
and other mechanisms. One of the most prominent molecule in
the postsynaptic density is {CaMKII}, that is described in
molecular biology as a "memory molecule" that can integrate
through auto-phosporylation Ca-influx signals on a
relatively large time scale of dozens of seconds. The
functional impact of this memory mechanism is largely
unknown. We show that the experimental data on the specific
role of CaMKII activation in dopamine-gated spine
consolidation suggest a general functional role in speeding
up reward-guided search for network configurations that
maximize reward expectation. Our theoretical analysis shows
that stochastic search could in principle even attain
optimal network configurations by emulating one of the most
well-known nonlinear optimization methods, simulated
annealing. But this optimization is usually impeded by
slowness of stochastic search at a given temperature. We
propose that {CaMKII} contributes a momentum term that
substantially speeds up this search. In particular, it
allows the network to overcome saddle points of the fitness
function. The resulting improved stochastic policy search
can be understood on a more abstract level as {H}amiltonian
sampling, which is known to be one of the most efficient
stochastic search methods.},
htmlnote = {},
note = {}
}
@Article{PecevskiMaass:16,
author = {D. Pecevski and W. Maass},
title = {Learning probabilistic inference through {STDP}},
journal = {eNeuro},
year = {2016},
volume = {},
number = {},
pages = {},
abstract = {Numerous experimental data show that the brain is able to
extract information from complex, uncertain, and often
ambiguous experiences. Furthermore it can use such learnt
information for decision making through probabilistic
inference. Several models have been proposed that aim at
explaining how probabilistic inference could be carried out
by networks of neurons in the brain. We propose here a
model that can also explain how such neural network could
acquire the necessary information for that from examples.
We show that spike-timing-dependent plasticity (STDP) in
combination with intrinsic plasticity generates in
ensembles of pyramidal cells with lateral inhibition a
fundamental building block for that: probabilistic
associations between neurons that represent through their
ring current values of random variables. Furthermore, by
combining such adaptive network motifs in a recursive
manner the resulting network is enabled to extract
statistical information from complex input streams, and to
build an internal model for the distribution p that
generates the examples it receives. This holds even if p
contains higher order moments. The analysis of this
learning process is supported by a rigorous theoretical
foundation. Furthermore we show, that the network can use
the learnt internal model immediately for prediction,
decision making, and other types of probabilistic
inference},
htmlnote = {},
note = {}
}
@Article{JonkeETAL:16,
author = {Z. Jonke and S. Habenschuss and W. Maass},
title = {Solving constraint satisfaction problems with networks of
spiking neurons},
journal = {Front. Neurosci., 30 March},
year = {2016},
volume = {},
number = {},
pages = {},
abstract = {Network of neurons in the brain apply unlike processors in
our current generation of computer hardware an event-based
processing strategy, where short pulses (spikes) are
emitted sparsely by neurons to signal the occurrence of an
event at a particular point in time. Such spike-based
computations promise to be substantially more
power-efficient than traditional clocked processing
schemes. However it turns out to be surprisingly difficult
to design networks of spiking neurons that can solve
difficult computational problems on the level of single
spikes, rather than rates of spikes. We present here a new
method for designing networks of spiking neurons via an
energy function. Furthermore we show how the energy
function of a network of stochastically firing neurons can
be shaped in a transparent manner by composing the networks
of simple stereotypical network motifs. We show that this
design approach enables networks of spiking neurons to
produce approximate solutions to difficult (NP-hard)
constraint satisfaction problems from the domains of
planning/optimization and verification/logical inference.
The resulting networks employ noise as a computational
resource. Nevertheless the timing of spikes plays an
essential role in their computations. Furthermore, networks
of spiking neurons carry out for the Traveling Salesman
Problem a more efficient stochastic search for good
solutions compared with stochastic artificial neural
networks (Boltzmann machines) and Gibbs sampling.},
htmlnote = {(Journal link to PDF)},
note = {}
}
@Article{Maass:16,
author = {W. Maass},
title = {Searching for principles of brain computation},
journal = {Current Opinion in Behavioral Sciences (Special Issue on
Computational Modelling)},
year = {2016},
volume = {11},
number = {},
pages = {81--92},
abstract = {Experimental methods in neuroscience, such as
calcium-imaging and recordings with multi-electrode arrays,
are advancing at a rapid pace. They produce insight into
the simultaneous activity of large numbers of neurons, and
into plasticity processes in the brains of awake and
behaving animals. These new data constrain models for
neural computation and network plasticity that underlie
perception, cognition, behavior, and learning. I will
discuss in this short article four such constraints:
Inherent recurrent network activity and heterogeneous
dynamic properties of neurons and synapses, stereotypical
spatio-temporal activity patterns in networks of neurons,
high trial-to-trial variability of network responses, and
functional stability in spite of permanently ongoing
changes in the network. I am proposing that these
constraints provide hints to underlying principles of brain
computation and learning.},
htmlnote = {},
note = {}
}
@Article{Maass:15,
author = {W. Maass},
title = {To spike or not to spike: {T}hat is the question},
journal = {Proceedings of the IEEE},
year = {2015},
volume = {103},
number = {12},
pages = {2219--2224},
abstract = {Both the brain and digital computers process information,
but they do this in completely different ways. Neurons in
the brain transmit information not through bits, but
through spikes. Spikes are short voltage increases that are
generated near the cell body of a neuron, with average
spike rates below 10 Hz. These spikes are transmitted via
fine axonal fibers and synapses to about 10 000 other
neurons. Neurons also differ in another fundamental aspect
from processors in a digital computer: they produce spikes
according to stochastic rather than deterministic rules.
This article discusses recent progress in understanding how
complex computations can be carried out with such
stochastically spiking neurons. Other recent developments
suggest that spike-based neural networks can be emulated by
neuromorphic hardware at a fraction of the energy consumed
by current digital computing hardware. Can both
developments be merged to provide a blueprint for
substantially more energy-efficient computing devices?
Explores these issues and examines the viability of such a
merger.},
htmlnote = {},
note = {}
}
@InCollection{KappelETAL:15a,
author = {D. Kappel and S. Habenschuss and R. Legenstein and W.
Maass},
title = {Synaptic sampling: A {B}ayesian approach to neural network
plasticity and rewiring},
booktitle = {Advances in Neural Information Processing Systems 28},
editor = {C. Cortes and N. D. Lawrence and D. D. Lee and M. Sugiyama
and R. Garnett},
publisher = {Curran Associates, Inc.},
year = {2015},
volume = {},
number = {},
pages = {370--378},
abstract = {We reexamine in this article the conceptual and
mathematical framework for understanding the organization
of plasticity in spiking neural networks. We propose that
inherent stochasticity enables synaptic plasticity to carry
out probabilistic inference by sampling from a posterior
distribution of synaptic parameters. This view provides a
viable alternative to existing models that propose
convergence of synaptic weights to maximum likelihood
parameters. It explains how priors on weight distributions
and connection probabilities can be merged optimally with
learned experience. In simulations we show that our model
for synaptic plasticity allows spiking neural networks to
compensate continuously for unforeseen disturbances.
Furthermore it provides a normative mathematical framework
to better understand the permanent variability and rewiring
observed in brain networks.},
htmlnote = {},
note = {}
}
@Article{KappelETAL:15,
author = {D. Kappel and S. Habenschuss and R. Legenstein and W.
Maass},
title = {Network Plasticity as {B}ayesian Inference},
journal = {PLOS Computational Biology},
year = {2015},
volume = {11},
number = {11},
pages = {e1004485},
abstract = {General results from statistical learning theory suggest
to understand not only brain computations, but also brain
plasticity as probabilistic inference. But a model for that
has been missing. We propose that inherently stochastic
features of synaptic plasticity and spine motility enable
cortical networks of neurons to carry out probabilistic
inference by sampling from a posterior distribution of
network con gurations. This model provides a viable
alternative to existing models that propose convergence of
parameters to maximum likelihood values. It explains how
priors on weight distributions and connection probabilities
can be merged optimally with learned experience, how
cortical networks can generalize learned information so
well to novel experiences, and how they can compensate
continuously for unforeseen disturbances of the network.
The resulting new theory of network plasticity explains
from a functional perspective a number of experimental data
on stochastic aspects of synaptic plasticity that
previously appeared to be quite puzzling.},
htmlnote = {(Journal link to PDF)}
}
@Article{BillETAL:15,
author = {J. Bill and L. Buesing and S. Habenschuss and B. Nessler
and W. Maass and R. Legenstein},
title = {Distributed {B}ayesian computation and self-organized
learning in sheets of spiking neurons with local lateral
inhibition},
journal = {PLOS ONE},
year = {2015},
volume = {10},
number = {8},
pages = {e0134356},
abstract = {During the last decade, Bayesian probability theory has
emerged as a framework in cognitive science and
neuroscience for describing perception, reasoning and
learning of mammals. However, our understanding of how
probabilistic computations could be organized in the brain,
and how the observed connectivity structure of cortical
microcircuits supports these calculations, is rudimentary
at best. In this study, we investigate statistical
inference and self-organized learning in a spatially
extended spiking network model, that accommodates both
local competitive and large-scale associative aspects of
neural information processing, under a unified Bayesian
account. Specifically, we show how the spiking dynamics of
a recurrent network with lateral excitation and local
inhibition in response to distributed spiking input, can be
understood as sampling from a variational posterior
distribution of a well-defined implicit probabilistic
model. This interpretation further permits a rigorous
analytical treatment of experience-dependent plasticity on
the network level. Using machine learning theory, we derive
update rules for neuron and synapse parameters which equate
with Hebbian synaptic and homeostatic intrinsic plasticity
rules in a neural implementation. In computer simulations,
we demonstrate that the interplay of these plasticity rules
leads to the emergence of probabilistic local experts that
form distributed assemblies of similarly tuned cells
communicating through lateral excitatory connections. The
resulting sparse distributed spike code of a well-adapted
network carries compressed information on salient input
features combined with prior experience on correlations
among them. Our theory predicts that the emergence of such
efficient representations benefits from network
architectures in which the range of local inhibition
matches the spatial extent of pyramidal cells that share
common afferent input.},
htmlnote = {(Journal link to PDF)},
note = {}
}
@Article{JonkeETAL:14,
author = {Z. Jonke and S. Habenschuss and W. Maass},
title = {A theoretical basis for efficient computations with noisy
spiking neurons},
journal = {arXiv.org},
year = {2014},
volume = {arXiv:1412.5862},
number = {},
pages = {},
abstract = {Network of neurons in the brain apply unlike processors in
our current generation of computer hardware an event-based
processing strategy, where short pulses (spikes) are
emitted sparsely by neurons to signal the occurrence of an
event at a particular point in time. Such spike-based
computations promise to be substantially more
power-efficient than traditional clocked processing
schemes. However it turned out to be surprisingly difficult
to design networks of spiking neurons that are able to
carry out demanding computations. We present here a new
theoretical framework for organizing computations of
networks of spiking neurons. In particular, we show that a
suitable design enables them to solve hard constraint
satisfaction problems from the domains of
planning/optimization and verification/logical inference.
The underlying design principles employ noise as a
computational resource. Nevertheless the timing of spikes
(rather than just spike rates) plays an essential role in
the resulting computations. Furthermore, one can
demonstrate for the Traveling Salesman Problem a surprising
computational advantage of networks of spiking neurons
compared with traditional artificial neural networks and
Gibbs sampling. The identification of such advantage has
been a well-known open problem},
htmlnote = {}
}
@Article{LegensteinMaass:14,
author = {R. Legenstein and W. Maass},
title = {Ensembles of Spiking Neurons with Noise Support Optimal
Probabilistic Inference in a Dynamically Changing
Environment},
journal = {PLOS Computational Biology},
year = {2014},
volume = {10},
number = {10},
pages = {e1003859},
abstract = {It has recently been shown that networks of spiking
neurons with noise can emulate simple forms of
probabilistic inference through "neural sampling", i.e., by
treating spikes as samples from a probability distribution
of network states that is encoded in the network.
Deficiencies of the existing model are its reliance on
single neurons for sampling from each random variable, and
the resulting limitation in representing quickly varying
probabilistic information. We show that both deficiencies
can be overcome by moving to a biologically more realistic
encoding of each salient random variable through the
stochastic firing activity of an ensemble of neurons. The
resulting model demonstrates that networks of spiking
neurons with noise can easily track and carry out basic
computational operations on rapidly varying probability
distributions, such as the odds of getting rewarded for a
specific behavior. We demonstrate the viability of this new
approach towards neural coding and computation, which makes
use of the inherent parallelism of generic neural circuits,
by showing that this model can explain experimentally
observed firing activity of cortical neurons for a variety
of tasks that require rapid temporal integration of sensory
information.},
htmlnote = {(Journal link to PDF)}
}
@Article{Maass:14,
author = {W. Maass},
title = {Noise as a resource for computation and learning in
networks of spiking neurons},
journal = {Special Issue of the Proc. of the IEEE on "Engineering
Intelligent Electronic Systems based on Computational
Neuroscience"},
year = {2014},
volume = {102},
number = {5},
pages = {860--880},
abstract = {We are used to viewing noise as a nuisance in computing
systems. This is a pity, since noise will be abundantly
available in energy efficient future nanoscale devices and
circuits. I propose here to learn from the way the brain
deals with noise, and apparently even benefits from it.
Recent theoretical results have provided insight into how
this can be achieved: how noise enables networks of spiking
neurons to carry out probabilistic inference through
sampling and also enables creative problem solving. In
addition noise supports the self organization of networks
of spiking neurons, and learning from rewards. I will
sketch here the main ideas and some consequences of these
results. I will also describe why these results are paving
the way for a qualitative jump in the computational
capability and learning performance of neuromorphic
networks of spiking neurons with noise, and for other
future computing systems that are able to treat noise as a
resource. \emph{Index Terms}--noise, spiking neurons,
neural networks, computational power, stochastic computing,
self-organization, neuromorphic hardware.},
htmlnote = {},
note = {}
}
@Article{KappelETAL:14,
author = {D. Kappel and B. Nessler and W. Maass},
title = {{STDP} installs in winner-take-all circuits an online
approximation to hidden {M}arkov model learning},
journal = {PLOS Computational Biology},
year = {2014},
volume = {10},
number = {3},
pages = {e1003511},
abstract = {In order to cross a street without being run over, we need
to be able to extract very fast hidden causes of
dynamically changing multi-modal sensory stimuli, and to
predict their future evolution. We show here that a generic
cortical microcircuit motif, pyramidal cells with lateral
excitation and inhibition, provides the basis for this
diffcult but all-important information processing
capability. This capability emerges in the presence of
noise automatically through effects of STDP on connections
between pyramidal cells in Winner-Take-All circuits with
lateral excitation. In fact, one can show that these motifs
endow cortical microcircuits with functional properties of
a hidden Markov model, a generic model for solving such
tasks through probabilistic inference. Whereas in
engineering applications this model is adapted to specific
tasks through offline learning, we show here that a major
portion of the functionality of hidden Markov models arises
already from online applications of STDP, without any
supervision or rewards. We demonstrate the emergent
computing capabilities of the model through several
computer simulations. The full power of hidden Markov model
learning can be attained through reward-gated STDP. This is
due to the fact that these mechanisms enable a rejection
sampling approximation to theoretically optimal learning.
We investigate the possible performance gain that can be
achieved with this more accurate learning method for an
artificial grammar task.},
htmlnote = {(Journal link to PDF)},
note = {}
}
@Article{HabenschussETAL:13a,
author = {S. Habenschuss and Z. Jonke and W. Maass},
title = {Stochastic computations in cortical microcircuit models},
journal = {PLOS Computational Biology},
year = {2013},
volume = {9},
number = {11},
pages = {e1003311},
abstract = {Experimental data from neuroscience suggest that a
substantial amount of knowledge is stored in the brain in
the form of probability distributions over network states
and trajectories of network states. We provide a
theoretical foundation for this hypothesis by showing that
even very detailed models for cortical microcircuits, with
data-based diverse nonlinear neurons and synapses, have a
stationary distribution of network states and trajectories
of network states to which they converge exponentially fast
from any initial state. We demonstrate that this
convergence holds in spite of the non-reversibility of the
stochastic dynamics of cortical microcircuits. We further
show that, in the presence of background network
oscillations, separate stationary distributions emerge for
different phases of the oscillation, in accordance with
experimentally reported phase-specific codes. We complement
these theoretical results by computer simulations that
investigate resulting computation times for typical
probabilistic inference tasks on these internally stored
distributions, such as marginalization or marginal
maximum-a-posteriori estimation. Furthermore, we show that
the inherent stochastic dynamics of generic cortical
microcircuits enables them to quickly generate approximate
solutions to difficult constraint satisfaction problems,
where stored knowledge and current inputs jointly constrain
possible solutions. This provides a powerful new computing
paradigm for networks of spiking neurons, that also throws
new light on how networks of neurons in the brain could
carry out complex computational tasks such as prediction,
imagination, memory recall and problem solving.},
htmlnote = {(Additional technical information PDF)},
note = {}
}
@Article{KlampflMaass:13,
author = {S. Klampfl and W. Maass},
title = {Emergence of dynamic memory traces in cortical
microcircuit models through {STDP}},
journal = {The Journal of Neuroscience},
year = {2013},
volume = {33},
number = {28},
pages = {11515--11529},
abstract = {Numerous experimental data suggest that simultaneously or
sequentially activated assemblies of neurons play a key
role in the storage and computational use of long term
memory in the brain. But a model which elucidates how these
memory traces could emerge through STDP has been missing.
We show that stimulus-specific assemblies of neurons emerge
automatically through STDP in a simple cortical
microcircuit model. The model that we consider is a
randomly connected network of well known microcircuit
motifs: pyramidal cells with lateral inhibition. We show
that the emergent assembly codes for repeatedly occurring
spatio-temporal input patterns tend to fire in some loose
sequential manner, reminiscent of experimentally observed
stereotypical trajectories of network states. We also show
that the emergent assembly codes add an important
computational capability to standard models for online
computations in cortical microcircuits: the capability to
integrate information from longterm memory with information
from novel spike inputs.},
htmlnote = {},
note = {}
}
@Article{NesslerETAL:13,
author = {B. Nessler and M. Pfeiffer and L. Buesing and W. Maass},
title = {Bayesian computation emerges in generic cortical
microcircuits through spike-timing-dependent plasticity},
journal = {PLOS Computational Biology},
year = {2013},
volume = {9},
number = {4},
pages = {e1003037},
abstract = {The principles by which networks of neurons compute, and
how spike-timing dependent plasticity (STDP) of synaptic
weights generates and maintains their computational
function, are unknown. Preceding work has shown that soft
winner-take-all (WTA) circuits, where pyramidal neurons
inhibit each other via interneurons, are a common motif of
cortical microcircuits. We show through theoretical
analysis and computer simulations that Bayesian computation
is induced in these network motifs through STDP in
combination with activitydependent changes in the
excitability of neurons. The fundamental components of this
emergent Bayesian computation are priors that result from
adaptation of neuronal excitability and implicit generative
models for hidden causes that are created in the synaptic
weights through STDP. In fact, a surprising result is that
STDP is able to approximate a powerful principle for
fitting such implicit generative models to high-dimensional
spike inputs: Expectation Maximization. Our results suggest
that the experimentally observed spontaneous activity and
trial-to-trial variability of cortical neurons are
essential features of their information processing
capability, since their functional role is to represent
probability distributions rather than static neural codes.
Furthermore it suggests networks of Bayesian computation
modules as a new model for distributed information
processing in the cortex.},
htmlnote = {(Journal link to PDF)},
note = {}
}
@Article{HabenschussETAL:13,
author = {S. Habenschuss and H. Puhr and W. Maass},
title = {Emergence of optimal decoding of population codes through
{STDP}},
journal = {Neural Computation},
year = {2013},
volume = {25},
number = {6},
pages = {1371--1407},
abstract = {The brain faces the problem to infer reliable hidden
causes from large populations of noisy neurons, for example
the direction of a moving object from spikes in area MT. It
is known that a theoretically optimal likelihood decoding
could be carried out by simple linear readout neurons if
weights of synaptic connections would be set to certain
values that depend on the tuning functions of sensory
neurons. We show here that such theoretically optimal
readout weights emerge autonomously through STDP in
conjunction with lateral inhibition between readout
neurons. In particular, we identify a class of optimal STDP
learning rules with homeostatic plasticity, for which the
autonomous emergence of optimal readouts can be explained
on the basis of a rigorous learning theory. This theory
shows that the considered network motif approximates
Expectation Maximization for creating internal generative
models for hidden causes of high-dimensional spike inputs.
Notably, we find that this optimal functionality can be
well approximated by a variety of STDP rules beyond those
predicted by theory. Furthermore we show that this learning
process is very stable, and automatically adjusts weights
to changes in the number of readout neurons, in the tuning
functions of sensory neurons, and in the statistics of
external stimuli.},
htmlnote = {},
note = {}
}
@Article{RueckertETAL:12,
author = {E. A. Rueckert and G. Neumann and M. Toussaint and W.
Maass},
title = {Learned graphical models for probabilistic planning
provide a new class of movement primitives},
journal = {Frontiers in Computational Neuroscience},
year = {2013},
volume = {6},
number = {},
pages = {1--20},
abstract = {Biological movement generation combines three interesting
aspects: its modular organization in movement primitives,
its characteristics of stochastic optimality under
perturbations, and its efficiency in terms of learning. A
common approach to motor skill learning is to endow the
primitives with dynamical systems. Here, the parameters of
the primitive indirectly define the shape of a reference
trajectory. We propose an alternative movement primitive
representation based on probabilistic inference in learned
graphical models with new and interesting properties that
complies with salient features of biological movement
control. Instead of endowing the primitives with dynamical
systems, we propose to endow movement primitives with an
intrinsic probabilistic planning system, integrating the
power of stochastic optimal control methods within a
movement primitive. The parametrization of the primitive is
a graphical model that represents the dynamics and
intrinsic cost func21 tion such that inference in this
graphical model yields the control policy. We parametrize
the intrinsic cost function using task-relevant features,
such as the importance of passing through certain
via-points. The system dynamics as well as intrinsic cost
function parameters are learned in a reinforcement learning
setting. We evaluate our approach on a complex 4-link
balancing task. Our experiments show that our movement
represen tation facilitates learning significantly and
leads to better generalization to new task settings without
re-learning.},
htmlnote = {(Journal link to PDF)},
note = {doi:10.3389/fncom.2012.00097}
}
@Article{NesslerETAL:12,
author = {B. Nessler and M. Pfeiffer and Lars Buesing and W. Maass},
title = {Bayesian Computation Emerges in Generic Cortical
Microcircuits through Spike-Timing-Dependent Plasticity},
journal = {submitted for publication},
year = {2012},
volume = {},
number = {},
pages = {},
note = {}
}
@Article{HoerzerETAL:12,
author = {G. M. Hoerzer and R. Legenstein and Wolfgang Maass},
title = {Emergence of complex computational structures from chaotic
neural networks through reward-modulated {H}ebbian
learning},
journal = {Cerebral Cortex},
year = {2014},
pages = {677--690},
volume = {24},
number = {},
abstract = {This article addresses the question how generic
microcircuits of neurons in different parts of the cortex
can attain and maintain different computational
specializations. We show that if stochastic variations in
the dynamics of local microcircuits are correlated with
signals related to functional improvements of the brain
(e.g. in the control of behavior), the computational
operation of these microcircuits can become optimized for
specific tasks such as the generation of specific periodic
signals, and task-dependent routing of information.
Furthermore, we show that working memory can emerge
autonomously through reward-modulated Hebbian learning, if
needed for specific tasks. Altogether our results suggest
that reward-modulated synaptic plasticity can not only
optimize network parameters for specific computational
tasks, but can also initiate a functional rewiring that
re-programs microcircuits, thereby generating diverse
computational functions in different generic cortical
microcircuits. On a more general level this article
provides a new perspective for a standard model for
computations in generic cortical microcircuits (liquid
computing model). It shows that the arguably most
problematic assumption of this model, the postulate of a
teacher that trains neural readouts through supervised
learning, can be eliminated. We show that generic networks
of neurons can learn numerous biologically relevant
computations through trial and error},
htmlnote = {(Supplementary material PDF)}
}
@InProceedings{ProbstETAL:12,
author = {D. Probst and W. Maass and H. Markram and M. O. Gewaltig},
title = {Liquid Computing in a Simplified Model of Cortical Layer
{IV}: Learning to Balance a Ball},
booktitle = {Proceedings of the 22nd International Conference on
Artificial Neural Networks and Machine Learning -- ICANN
2012},
pages = {209--216},
year = {2012},
editor = {Alessandro E.P. Villa and Wlodzislaw Duch and Peter Erdi
and Francesco Masulli and G\"unther Palm},
volume = {7552},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
abstract = {We present a biologically inspired recurrent network of
spiking neurons and a learning rule that enables the
network to balance a ball on a flat circular arena and to
steer it towards a target field, by controlling the
inclination angles of the arena. The neural controller is a
recurrent network of adaptive exponential integrate and
fire neurons configured and connected to match properties
of cortical layer IV. The network is used as a liquid state
machine with four action cells as readout neurons. The
solution of the task requires the controller to take its
own reaction time into account by anticipating the future
state of the controlled system. We demonstrate that the
cortical network can robustly learn this task using a
supervised learning rule that penalizes the error on the
force applied to the arena},
htmlnote = {(Journal link to PDF)}
}
@Article{AlonMaass:86,
author = {N. Alon and W. Maass},
journal = {Proceedings of the 27th Annual IEEE Symposium on
Foundations of Computer Science},
pages = {410--417},
title = {Meanders, Ramsey's theorem and lower bounds for branching
programs},
year = {1986}
}
@Article{AlonMaass:88,
author = {N. Alon and W. Maass},
journal = {J. Comput. System Sci.},
note = {Invited paper for a special issue of J. Comput. System
Sci.},
pages = {118--129},
title = {Meanders and their applications in lower bound arguments},
volume = {37},
year = {1988},
abstract = {The notion of a \emph {meander} is introduced and studied.
Roughly speaking, a meander is a sequence of integers
(drawn from the set $ N= \{I, 2, . . . ,n\}$) that wanders
back and forth between various subsets of $N$ \emph{a lot}.
Using Ramsey theoretic proof techniques we obtain sharp
lower bounds on the minimum length of meanders that achieve
various levels of wandering. We then apply these bounds to
improve existing lower bounds on the length of constant
width \emph{branching programs} for various symmetric
functions. In particular, an $\Omega (n \log n)$ lower
bound on the length of any such program for the majority
function of $n$ bits is proved. We further obtain optimal
time-space trade-offs for certain input oblivious branching
programs and establish sharp lower bounds on the size of
\emph{weak superconcentrators} of depth 2.}
}
@InProceedings{AuerETAL:01,
author = {P. Auer and H. Burgsteiner and W. Maass},
title = {Reducing Communication for Distributed Learning in Neural
Networks},
booktitle = {http://www.springer.de/comp/lncs/index.html - Proc. of the
International Conference on Artificial Neural Networks --
ICANN 2002},
series = {Lecture Notes in Computer Science},
editor = {Jos\'{e} R. Dorronsoro},
pages = {123--128},
volume = {2415},
year = {2002},
publisher = {Springer},
keywords = {learning algorithm, perceptrons, parallel, aVLSI },
abstract = {A learning algorithm is presented for circuits consisting
of a single layer of perceptrons. We refer to such circuits
as parallel perceptrons. In spite of their simplicity,
these circuits are universal approximators for arbitrary
boolean and continuous functions. In contrast to backprop
for multi-layer perceptrons, our new learning algorithm -
the parallel delta rule (p-delta rule) - only has to tune a
single layer of weights, and it does not require the
computation and communication of analog values with high
precision. This distinguishes our new learning rule also
from other learning rules for such circuits such as
MADALINE with far higher communication. Our algorithm also
provides an interesting new hypothesis for the organization
of learning in biological neural systems. A theoretical
analysis shows that the p-delta rule does in fact implement
gradient descent - with regard to a suitable error measure
- although it does not require to compute derivatives.
Furthermore it is shown through experiments on common
real-world benchmark datasets that its performance is
competitive with that of other learning approaches from
neural networks and machine learning.}
}
@Article{AuerETAL:01a,
author = {P. Auer and H. Burgsteiner and W. Maass},
title = {A learning rule for very simple universal approximators
consisting of a single layer of perceptrons},
journal = {Neural Networks},
year = {2008},
volume = {21},
number = {5},
pages = {786--795},
abstract = {A learning algorithm is presented for circuits consisting
of a single layer of perceptrons (= threshold gates, or
equivalently gates with a Heaviside activation function).
We refer to such circuits as parallel perceptrons. In spite
of their simplicity, these circuits can compute any boolean
function if one views the majority of the binary perceptron
outputs as the binary outputs of the parallel perceptron,
and they are universal approximators for arbitrary
continuous functions with values in [0,1] if one views the
fraction of perceptrons that output 1 as the analog output
of the parallel perceptron. For a long time one has thought
that there exists no competitive learning algorithms for
these extremely simple circuits consisting of gates with
binary outputs, which also became known as committee
machines. It is commonly believed that one has to replace
the hard threshold gates by sigmoidal gates and that one
has to tune the weights on at least two successive layers
in order to get satisfactory learning results. We show that
this is not true by exhibiting a simple learning algorithm
for parallel perceptrons - the parallel delta rule (p-delta
rule), whose performance is comparable to that of backprop
for multilayer networks consisting of sigmoidal gates. In
contrast to backprop, the p-delta rule does not require the
computation and communication of analog values with high
precision, although it does in fact implement gradient
descent - with regard to a suitable error measure.
Therefore it provides an interesting new hypothesis for the
organization of learning in biological neural systems.}
}
@InProceedings{AuerETAL:93,
author = {P. Auer and P. M. Long and W. Maass and G. J. Woeginger},
booktitle = {Proceedings of the 5th Annual ACM Conference on
Computational Learning Theory},
pages = {392--401},
title = {On the complexity of function learning},
year = {1993}
}
@Article{AuerETAL:93j,
author = {P. Auer and P. M. Long and W. Maass and G. J. Woeginger},
title = {On the complexity of function learning},
journal = {Machine Learning},
note = {Invited paper in a special issue of Machine Learning},
year = {1995},
volume = {18},
pages = {187--230}
}
@InProceedings{AuerETAL:95,
author = {P. Auer and R. C. Holte and W. Maass},
booktitle = {Proc. of the 12th International Machine Learning
Conference, Tahoe City (USA)},
publisher = {Morgan Kaufmann (San Francisco)},
pages = {21--29},
title = {Theory and applications of agnostic {PAC}-learning with
small decision trees},
abstract = {We exhibit a theoretically founded algorithm T2 for
agnostic PAC-learning of decision trees of at most 2
levels, whose computation time is almost linear in the size
of the training set. We evaluate the performance of this
learning algorithmT2 on 15 common real-world datasets, and
show that formost of these datasets T2 provides simple
decision trees with little or no loss in predictive power
(compared with C4.5). In fact, for datasets with continuous
attributes its error rate tends to be lower than that of
C4.5. To the best of our knowledge this is the first time
that a PAC-learning algorithmis shown to be applicable to
real-world classification problems. Since one can prove
that T2 is an agnostic PAClearning algorithm, T2 is
guaranteed to produce close to optimal 2-level decision
trees from sufficiently large training sets for any (!)
distribution of data. In this regard T2 differs strongly
fromall other learning algorithms that are considered in
applied machine learning, for which no guarantee can be
given about their performance on new datasets. We also
demonstrate that this algorithm T2 can be used as a
diagnostic tool for the investigation of the expressive
limits of 2-level decision trees. Finally, T2, in
combination with new bounds on the VC-dimension of decision
trees of bounded depth that we derive, provides us now for
the first time with the tools necessary for comparing
learning curves of decision trees for real-world datasets
with the theoretical estimates of PAClearning theory.},
year = {1995}
}
@InProceedings{AuerETAL:96,
author = {P. Auer and S. Kwek and W. Maass and M. K. Warmuth},
booktitle = {Proc. of the 9th Conference on Computational Learning
Theory 1996},
pages = {333--343},
publisher = {ACM-Press (New York)},
title = {Learning of depth two neural nets with constant fan-in at
the hidden nodes},
year = {1996},
abstract = {We present algorithms for learning depth two neural
networks where the hidden nodes are threshold gates with
constant fan-in. The transfer function of the output node
might be more general: we have results for the cases when
the threshold function, the logistic function or the
identity function is used as the transfer function at the
output node. We give batch and on-line learning algorithms
for these classes of neural networks and prove bounds on
the performance of our algorithms. The batch algoritms work
for real valued inputs whereas the on-line algorithms
assume that the inputs are discretized. The hypotheses of
our algorithms are essentially also neural networks of
depth two. However, their number of hidden nodes might be
much larger than the number of hidden nodes of the neural
network that has to be learned. Our algorithms can handle
such a large number of hidden nodes since they rely on
multiplicative weight updates at the output node, and the
performance of these algorithms scales only logarithmically
with the number of hidden nodes used.}
}
@Article{AuerMaass:98,
author = {P. Auer and W. Maass},
title = {Introduction to the Special Issue on Computational
Learning Theory},
journal = {Algorithmica},
year = {1998},
volume = {22},
number = {1/2},
pages = {1--2}
}
@Unpublished{BachlerMaass:06,
author = {M. Bachler and W. Maass},
title = {A {B}ayesian {H}ebb Rule for Incremental Learning of
Optimal Inference in {B}ayesian Networks},
note = {submitted for publication},
year = {2006}
}
@InCollection{BartlettMaass:03,
author = {Peter L. Bartlett and W. Maass},
title = {Vapnik-{C}hervonenkis Dimension of Neural Nets},
booktitle = {The Handbook of Brain Theory and Neural Networks},
publisher = {MIT Press (Cambridge)},
year = {2003},
editor = {M. A. Arbib},
edition = {2nd},
pages = {1188--1192}
}
@Article{BillETAL:10,
author = {J. Bill and K. Schuch and D. Br\"uderle and J. Schemmel
and W. Maass and K. Meier},
title = {Compensating inhomogeneities of neuromorphic {VLSI}
devices via short-term synaptic plasticity},
journal = {Frontiers in Computational Neuroscience},
year = {2010},
volume = {4},
number = {},
pages = {1--14},
abstract = {Recent developments in neuromorphic hardware engineering
make mixed-signal VLSI neural network models promising
candidates for neuroscientific research tools and massively
parallel computing devices, especially for tasks which
exhaust the computing power of software simulations. Still,
like all analog hardware systems, neuromorphic models
suffer from a constricted configurability and
production-related fluctuations of device characteristics.
Since also future systems, involving ever-smaller
structures, will inevitably exhibit such inhomogeneities on
the unit level, self-regulation properties become a crucial
requirement for their successful operation. By applying a
cortically inspired self-adjusting network architecture, we
show that the activity of generic spiking neural networks
emulated on a neuromorphic hardware system can be kept
within a biologically realistic firing regime and gain a
remarkable robustness against transistorlevel variations.
As a first approach of this kind in engineering practice,
the short-term synaptic depression and facilitation
mechanisms implemented within an analog VLSI model of I\&F
neurons are functionally utilized for the purpose of
network level stabilization. We present experimental data
acquired both from the hardware model and from comparative
software simulations which prove the applicability of the
employed paradigm to neuromorphic VLSI devices.},
htmlnote = {(Journal link to the PDF)},
note = {doi:10.3389/fncom.2010.00129}
}
@Article{BuesingETAL:11,
author = {L. B\"using and J. Bill and B. Nessler and W. Maass},
title = {Neural Dynamics as Sampling: A Model for Stochastic
Computation in Recurrent Networks of Spiking Neurons},
journal = {PLoS Computational Biology},
year = {2011},
pages = {e1002211},
volume = {7},
number = {11},
abstract = {The organization of computations in networks of spiking
neurons in the brain is still largely unknown, in
particular in view of the inherently stochastic features of
their firing activity and the experimentally observed
trial-to-trial variability of neural systems in the brain.
In principle there exists a powerful computational
framework for stochastic computations, probabilistic
inference by sampling, which can explain a large number of
macroscopic experimental data in neuroscience and cognitive
science. But it has turned out to be surprisingly difficult
to create a link between these abstract models for
stochastic computations and more detailed models of the
dynamics of networks of spiking neurons. Here we create
such a link, and show that under some conditions the
stochastic firing activity of networks of spiking neurons
can be interpreted as probabilistic inference via Markov
chain Monte Carlo (MCMC) sampling. Since common methods for
MCMC sampling in distributed systems, such as Gibbs
sampling, are inconsistent with the dynamics of spiking
neurons, we introduce a different approach based on
non-reversible Markov chains, that is able to reflect
inherent temporal processes of spiking neuronal activity
through a suitable choice of random variables. We propose a
neural network model and show by a rigorous theoretical
analysis that its neural activity implements MCMC sampling
of a given distribution, both for the case of discrete and
continuous time. This provides a step towards closing the
gap between abstract functional models of cortical
computations and more detailed models of networks of
spiking neurons.},
htmlnote = {(Journal link to PDF)},
note = {}
}
@InProceedings{BuesingMaass:07,
author = {L. Buesing and W. Maass},
title = {Simplified Rules and Theoretical Analysis for Information
Bottleneck Optimization and {PCA} with Spiking Neurons},
booktitle = {Proc. of NIPS 2007, Advances in Neural Information
Processing Systems},
editor = {},
publisher = {MIT Press},
year = {2008},
volume = {20},
pages = {},
abstract = {We show that under suitable assumptions (primarily
linearization) a simple and perspicuous online learning
rule for Information Bottleneck optimization with spiking
neurons can be derived. This rule performs on common
benchmark tasks as well as a rather complex rule that has
previously been proposed [2]. Furthermore, the transparency
of this new learning rule makes a theoretical analysis of
its convergence properties feasible. If this learning rule
is applied to an assemble of neurons, it provides a
theoretically founded method for performing principal
component analysis ({PCA}) with spiking neurons. In
addition it makes it possible to preferentially extract
those principal components from incoming signals X that are
related to some additional target signal $Y_T$ . This
target signal $Y_T$ (also called relevance variable) could
represent in a biological interpretation proprioception
feedback, input from other sensory modalities, or top-down
signals.}
}
@Article{BuesingMaass:09,
author = {Buesing, L. and Maass, W.},
title = {A Spiking Neuron as Information Bottleneck},
journal = {submitted for publication},
year = {2009}
}
@Article{BuesingMaass:10,
author = {L. Buesing and W. Maass},
title = {A Spiking Neuron as Information Bottleneck},
journal = {Neural Computation},
year = {2010},
volume = {22},
number = {},
pages = {1961--1992},
abstract = {Neurons receive thousands of presynaptic input spike
trains while emitting a single output spike train. This
drastic dimensionality reduction suggests to consider a
neuron as a bottleneck for information transmission.
Extending recent results, we propose a simple learning rule
for the weights of spiking neurons derived from the
Information Bottleneck (IB) framework that minimizes the
loss of relevant information transmitted in the output
spike train. In the IB framework relevance of information
is defined with respect to contextual information, the
latter entering the proposed learning rule as a "third"
factor besides pre- and postsynaptic activities. This
renders the theoretically motivated learning rule a
plausible model for experimentally observed synaptic
plasticity phenomena involving three factors. Furthermore,
we show that the proposed IB learning rule allows spiking
neurons to learn a "predictive code",i.e. to extract those
parts of their input that are predictive for future
input.}
}
@InProceedings{BultmanMaass:91,
author = {W. J. Bultman and W. Maass},
booktitle = {Proceedings of the 4th Annual ACM Workshop on
Computational Learning Theory,},
pages = {337--353},
title = {Fast identification of geometric objects with membership
queries},
year = {1991}
}
@Article{BultmanMaass:91j,
author = {W. J. Bultman and W. Maass},
title = {Fast identification of geometric objects with membership
queries},
journal = {Information and Computation},
year = 1995,
volume = 118,
pages = {48--64}
}
@Article{BuonomanoMaass:08,
author = {D. Buonomano and W. Maass},
title = {State-dependent Computations: Spatiotemporal Processing in
Cortical Networks.},
journal = {Nature Reviews in Neuroscience},
year = {2009},
volume = {10},
number = {2},
pages = {113--125},
note = {},
abstract = {A conspicuous ability of the brain is to seamlessly
assimilate and process spatial and temporal features of
sensory stimuli. This ability is indispensable for the
recognition of natural stimuli. Yet, a general
computational framework for processing spatiotemporal
stimuli remains elusive. Recent theoretical and
experimental work suggests that spatiotemporal processing
emerges from the interaction between incoming stimuli and
the internal dynamic state of neural networks which
includes not only ongoing spiking activity, but also
'hidden' neuronal states such as short-term synaptic
plasticity.}
}
@InProceedings{ChenMaass:92,
author = {Z. Chen and W. Maass},
booktitle = {Proceedings of the 5th Annual ACM Workshop on
Computational Learning Theory},
pages = {16--28},
title = {On-line learning of rectangles},
year = {1992}
}
@InProceedings{ChenMaass:92a,
author = {Z. Chen and W. Maass},
booktitle = {Proceedings of the 3rd Int. Workshop on Analogical and
Inductive Inference},
pages = {26--34},
publisher = {Springer},
series = {Lecture Notes in Artificial Intelligence},
title = {A solution of the credit assignment problem in the case of
learning rectangles},
volume = {642},
year = {1992}
}
@Article{ChenMaass:94,
author = {Z. Chen and W. Maass},
journal = {Machine Learning},
note = {Invited paper for a special issue of Machine Learning},
pages = {201--223},
title = {On-line learning of rectangles and unions of rectangles},
volume = {17},
year = {1994}
}
@Article{DietzfelbingerETAL:91a,
author = {M. Dietzfelbinger and W. Maass and G. Schnitger},
journal = {Theoretical Computer Science},
pages = {113--129},
title = {The complexity of matrix transposition on one-tape
off-line {T}uring machines},
volume = {82},
year = {1991}
}
@InProceedings{DietzfelbingerMaass:85,
author = {M. Dietzfelbinger and W. Maass},
booktitle = {Proceedings of the 1984 Recursion Theory Week Oberwolfach,
Germany},
pages = {89--120},
publisher = {Springer (Berlin)},
series = {Lecture Notes in Mathematics},
title = {Strong reducibilities in alpha- and beta-recursion
theory},
volume = {1141},
year = {1985}
}
@InProceedings{DietzfelbingerMaass:86,
author = {M. Dietzfelbinger and W. Maass},
booktitle = {Proceedings of the Structure in Complexity Theory
Conference, Berkeley 1986},
pages = {163--183},
publisher = {Springer (Berlin)},
series = {Lecture Notes in Computer Science},
title = {Two lower bound arguments with ``inaccessible'' numbers},
volume = {223},
year = {1986}
}
@Article{DietzfelbingerMaass:88,
author = {M. Dietzfelbinger and W. Maass},
journal = {Journal of Computer and System Sciences},
note = {},
pages = {313--335},
title = {Lower bound arguments with ``inaccesible'' numbers},
volume = {36},
year = {1988}
}
@InProceedings{DietzfelbingerMaass:88a,
author = {M. Dietzfelbinger and W. Maass},
booktitle = {Proceedings of the 15th International Colloquium on
Automata, Languages and Programming},
pages = {188--200},
publisher = {Springer (Berlin)},
series = {Lecture Notes in Computer Science},
title = {The complexity of matrix transposition on one-tape
off-line {T}uring machines with output tape},
volume = {317},
year = {1988}
}
@Article{DietzfelbingerMaass:93,
author = {M. Dietzfelbinger and W. Maass},
journal = {Theoretical Computer Science},
pages = {271--290},
title = {The complexity of matrix transposition on one-tape
off-line {T}uring machines with output tape},
volume = {108},
year = {1993}
}
@Article{DobkinETAL:96,
author = {D. P. Dobkin and D. Gunopulos and W. Maass},
journal = {Journal of Computer and System Sciences},
month = {June},
number = {3},
pages = {453--470},
title = {Computing the maximum bichromatic discrepancy, with
applications to computer graphics and machine learning},
volume = {52},
year = {1996}
}
@InProceedings{FregnacETAL:05,
author = {Y. Fregnac and M. Blatow and J.-P. Changeux and J. de
Felipe and A. Lansner and W. Maass and D. A. McCormick and
C. M. Michel and H. Monyer and E. Szathmary and R. Yuste},
title = {U{P}s and {DOWN}s in Cortical Computation},
booktitle = {The Interface between Neurons and Global Brain Function},
editor = {S. Grillner and A. M. Graybiel},
year = {2006},
pages = {393--433},
chapter = {19},
publisher = {MIT Press},
series = {Dahlem Workshop Report 93}
}
@Article{HabenschussETAL:11,
author = {S. Habenschuss and H. Purr and W. Maass},
title = {Emergence of optimal decoding of population codes through
{STDP}},
journal = {in preparation},
year = {2011},
pages = {},
volume = {}
}
@Article{HaeuslerETAL:03,
author = {S. Haeusler and H. Markram and W. Maass},
title = {Perspectives of the High-Dimensional Dynamics of Neural
Microcircuits from the Point of View of Low-Dimensional
Readouts},
journal = {Complexity (Special Issue on Complex Adaptive Systems)},
year = {2003},
volume = {8},
number = {4},
pages = {39--50},
abstract = {We investigate generic models for cortical microcircuits,
i.e., recurrent circuits of integrate-and-fire neurons with
dynamic synapses. These complex dynamic systems subserve
the amazing information processing capabilities of the
cortex, but are at the present time very little understood.
We analyze the transient dynamics of models for neural
microcircuits from the point of view of one or two readout
neurons that collapse the high-dimensional transient
dynamics of a neural circuit into a one- or two-dimensional
output stream. This stream may for example represent the
information that is projected from such circuit to some
particular other brain area or actuators. It is shown that
simple local learning rules enable a readout neuron to
extract from the high-dimensional transient dynamics of a
recurrent neural circuit quite different low-dimensional
projections, which even may contain ?virtual attractors?
that are not apparent in the high-dimensional dynamics of
the circuit itself. Furthermore it is demonstrated that the
information extraction capabilities of linear readout
neurons are boosted by the computational operations of a
sufficiently large preceding neural microcircuit. Hence a
generic neural microcircuit may play a similar role for
information processing as a kernel for support vector
machines in machine learning. We demonstrate that the
projection of time-varying inputs into a large recurrent
neural circuit enables a linear readout neuron to classify
the time-varying circuit inputs with the same power as
complex nonlinear classifiers, such as a pool of
perceptrons trained by the p-delta rule or a feedforward
sigmoidal neural net trained by backprop, provided that the
size of the recurrent circuit is sufficiently large. At the
same time such readout neurons can exploit the stability
and speed of learning rules for linear classifiers, thereby
overcoming the problems caused by local minima in the error
function of nonlinear classifiers. In addition it is
demonstrated that pairs of readout neurons can transform
the complex trajectory of transient states of a large
neural circuit into a simple and clearly structured
two-dimensional trajectory. This two-dimensional projection
of the high-dimensional trajectory can even exhibit
convergence to virtual attractors that are not apparent in
the high-dimensional trajectory. � 2003 Wiley
Periodicals, Inc. \textbf{Key Words:} cortical
microcircuits; machine learning; p-delta rule}
}
@Article{HaeuslerETAL:07,
author = {S. Haeusler and W. Singer and W. Maass and D. Nikolic},
title = {Superposition of information in large ensembles of neurons
in primary visual cortex},
journal = {37th Annual Conference of the Society for Neuroscience,
Program 176.2, Poster II23},
year = {2007},
volume = {},
number = {},
pages = {},
abstract = {We applied methods from machine learning in order to
analyze the temporal evolution of stimulus-related
information in the spiking activity of large ensembles of
around 100 neurons in primary visual cortex of anesthetized
cats. We present ed sequences of up to 3 different visual
stimuli (letters) that lasted 100 ms and followed at
intervals of 100 ms. We f ound that most of the information
about visual stimuli extractable by advanced methods from
machine learning (e.g., Sup port Vector Machines) could
also be extracted by simple linear classifiers
(perceptrons). Hence, in principle this info rmation can be
extracted by a biological neuron. A surprising result was
that new stimuli did not erase information abo ut previous
stimuli. In fact, information about the nature of the
preceding stimulus remained as high as the informatio n
about the current stimulus. Separately trained linear
readouts could retrieve information about both the current
and the preceding stimulus from responses to the current
stimulus. This information was encoded both in the
discharge rates (response amplitudes) of the ensemble of
neurons and in the precise timing of individual spikes, and
persisted for seve ral 100 ms beyond the offset of stimuli.
This superposition of information about sequentially
presented stimuli constrains computational models for
visual proce ssing. It poses a conundrum for models that
assume separate classification processes for each frame of
visual input and supports models for cortical computation
([Buonomano, Merzenich, 1995], [Maass, Natschlaeger,
Markram, 2002]) which arg ue that a frame-by frame
processing is neither feasible within highly recurrent
networks nor useful for classifying and predicting rapidly
changing stimulus sequences. Specific predictions of these
alternative computational models are tha i) information
from different frames of visual input is superimposed in
recurrent circuits and ii) nonlinear combinations of
different information components are immediately provided
in the spike output. Our results indicate that the network
from which we recorded provided nonlinear combinations of
information from sequen tial frames. Such nonlinear
preprocessing increases the discrimination capability of
any linear readout neurons receivi ng distributed input
from the kind of cells we recorded from. These readout
neurons could be implemented within V1 and/ or at
subsequent processing levels.}
}
@Article{HaeuslerETAL:08,
author = {S. Haeusler and K. Schuch and W. Maass},
title = {Motif distribution, dynamical properties, and
computational performance of two data-based cortical
microcircuit templates},
journal = {J. of Physiology (Paris)},
year = {2009},
volume = {103},
number = {1-2},
pages = {73--87},
abstract = {The neocortex is a continuous sheet composed of rather
stereotypical local microcircuits that consist of neurons
on several laminae with characteristic synaptic
connectivity patterns. An understanding of the structure
and computational function of these cortical microcircuits
may hold the key for understanding the enormous
computational power of the neocortex. Two templates for the
structure of laminar cortical microcircuits have recently
been published by Thomson et al. and Binzegger et al., both
resulting from long-lasting experimental studies (but based
on different methods). We analyze and compare in this
article the structure of these two microcircuit templates.
In particular, we examine the distribution of network
motifs, i.e. of subcircuits consisting of a small number of
neurons. The distribution of these building blocks has
recently emerged as a method for characterizing
similarities and differences among complex networks. We
show that the two microcircuit templates have quite
different distributions of network motifs, although they
both have a characteristic small-world property. In order
to understand the dynamical and computational properties of
these two microcircuit templates, we have generated
computer models of them, consisting of Hodgkin-Huxley point
neurons with conductance based synapses that have a
biologically realistic short-term plasticity. The
performance of these two cortical microcircuit models was
studied for seven generic computational tasks that require
accumulation and merging of information contained in two
afferent spike inputs. Although the two models exhibit a
different performance for some of these tasks, their
average computational performance is very similar. When we
changed the connectivity structure of these two
microcircuit models in order to see which aspects of it are
essential for computational performance, we found that the
distribution of degrees of nodes is a common key factor for
their computational performance. We also show that their
computational performance is correlated with specific
statistical properties of the circuit dynamics that is
induced by a particular distribution of degrees of nodes.}
}
@Article{HaeuslerETAL:08a,
author = {S. Haeusler and K. Schuch and W. Maass},
title = {Motif distribution and computational performance of two
data-based cortical microcircuit templates},
journal = {38th Annual Conference of the Society for Neuroscience,
Program 220.9},
year = {2008},
volume = {},
number = {},
pages = {},
abstract = {The neocortex is a continuous sheet composed of rather
stereotypical local microcircuits that consist of neurons
on several laminae with characteristic synaptic
connectivity patterns. An understanding of the structure
and computational function of these cortical microcircuits
may hold the key for understanding the enormous
computational power of the neocortex. Two templates for the
structure of laminar cortical microcircuits have recently
been published by Thomson et al. (2002) and Binzegger et
al. (2004), both resulting from long-lasting experimental
studies (but based on different methods). We analyze and
compare in this study the structure and computational
properties of these two microcircuit templates. In
particular, we examine the distribution of network motifs,
i.e. of sub-circuits consisting of a small number of
neurons. The distribution of these building blocks of
complex networks has recently emerged as a method for
characterizing similarities and differences among complex
networks. We show that the two microcircuit templates have
quite different distributions of network motifs, although
they both share characteristic global structural
properties, like degree distributions (distribution of the
number of synapses per neuron) and small-world properties.
In order to understand the computational properties of the
two microcircuit templates, we have generated computer
models of them, consisting of Hodgkin-Huxley point neurons
with conductance based synapses that have a biologically
realistic short-term plasticity. The information processing
capabilities of the two cortical microcircuit models were
studied for 7 generic computational tasks that require
accumulation and merging of information contained in two
afferent spike inputs. Although the two models exhibit a
different performance for some of these tasks, their
average computational performance is very similar. When we
changed the connectivity structure of these two
microcircuit models in order to see which aspects of it are
essential for computational performance, we found that the
distribution of degrees of nodes is a key factor for their
computational performance. References Thomson et al.
(2002), Cerebral Cortex, 12(9):936 Binzegger et al. (2004),
J. Neurosci., 24(39):8441}
}
@Article{HaeuslerMaass:04,
author = {S. Haeusler and W. Maass},
title = {A statistical analysis of information processing
properties of lamina-specific cortical microcircuit
models},
journal = {Cerebral Cortex},
year = 2007,
volume = {17},
number = {1},
pages = {149-162}
}
@Article{HaeuslerMaass:06,
author = {S. Haeusler and W. Maass},
title = {Computational impact of laminar structure and small world
properties of cortical microcircuit models},
journal = {submitted for publication},
year = 2006
}
@Article{HajnalETAL:87a,
author = {A. Hajnal and W. Maass and P. Pudlak and M. Szegedy and G.
Turan},
journal = {Journal of Computer and System Sciences},
volume = {46},
pages = {129--154},
title = {Threshold circuits of bounded depth},
abstract = {We examine a powerful model of parallel computation:
polynomial size threshold circuits of bounded depth (the
gates compute threshold functions with polynomial weights).
Lower bounds are given to separate polynomial size
threshold circuits of depth 2 from polynomial size
threshold circuits of depth 3 and from probabilistic
polynomial size circuits of depth 2. With regard to the
unreliability of bounded depth circuits, it is shown that
the class of functions computed reliably with bounded depth
circuits of unreliable $\wedge, \vee ,\neg$ gates is
narrow. On the other hand, functions computable by bounded
depth, polynomial-size threshold circuits can also be
computed by such circuits of unreliable threshold gates.
Furthermore we examine to what extent imprecise threshold
gates (which behave unpredictably near the threshold value)
can compute nontrivial functions in bounded depth and a
bound is given for the permissible amount of imprecision.
We also discuss threshold quantifiers and prove an
undefinability result for graph connectivity.},
year = {1993}
}
@InProceedings{HajnalETAL:88,
author = {A. Hajnal and W. Maass and G. Turan},
booktitle = {Proceedings of the 20th Annual ACM Symposium on Theory of
Computing},
pages = {186--191},
title = {On the communication complexity of graph properties},
year = {1988}
}
@Article{HajnalETAL:93,
author = {A. Hajnal and W. Maass and P. Pudlak and M. Szegedy and G.
Turan},
journal = {J. Comput. System Sci.},
pages = {129--154},
title = {Threshold circuits of bounded depth},
abstract = {We examine a powerful model of parallel computation:
polynomial size threshold circuits of bounded depth (the
gates compute threshold functions with polynomial weights).
Lower bounds are given to separate polynomial size
threshold circuits of depth 2 from polynomial size
threshold circuits of depth 3 and from probabilistic
polynomial size circuits of depth 2. With regard to the
unreliability of bounded depth circuits, it is shown that
the class of functions computed reliably with bounded depth
circuits of unreliable A, v , 1 gates is narrow. On the
other hand, functions computable by bounded depth,
polynomial-size threshold circuits can also be computed by
such circuits of unreliable threshold gates. Furthermore we
examine to what extent imprecise threshold gates (which
behave unpredictably near the threshold value) can compute
nontrivial functions in bounded depth and a bound is given
for the permissible amount of imprecision. We also discuss
threshold quantifiers and prove an undefinability result
for graph connectivity.},
volume = {46},
year = {1993}
}
@InProceedings{HauserETAL:07,
author = {H. Hauser and G. Neumann and A. J. Ijspeert and W. Maass},
booktitle = {Proceedings of the {IEEE}-{RAS} 7th {I}nternational
{C}onference on {H}umanoid {R}obots ({H}umanoids 2007)},
title = {Biologically Inspired Kinematic Synergies Provide a New
Paradigm for Balance Control of Humanoid Robots},
publisher = {},
year = {2007},
pages = {},
abstract = {Nature has developed methods for controlling the movements
of organisms with many degrees of freedom which differ
strongly from existing approaches for balance control in
humanoid robots: Biological organisms employ kinematic
synergies that simultaneously engage many joints, and which
are apparently designed in such a way that their
superposition is approximately linear. We show in this
article that this control strategy can in principle also be
applied to balance control of humanoid robots. In contrast
to existing approaches, this control strategy reduces the
need to carry out complex computations in real time
(replacing the iterated solution of quadratic optimization
problems by a simple linear controller), and it does not
require knowledge of a dynamic model of the robot.
Therefore it can handle unforeseen changes in the dynamics
of the robot that may for example arise from wind or other
external forces. We demonstrate the feasibility of this
novel approach to humanoid balance control through
simulations of the humanoid robot HOAP-2 for tasks that
require balance control on a randomly moving surfboard.},
note = {Best {P}aper {A}ward}
}
@Article{HauserETAL:11,
author = {H. Hauser and G. Neumann and A. J. Ijspeert and W. Maass},
title = {Biologically Inspired Kinematic Synergies Enable Linear
Balance Control of a Humanoid Robot},
journal = {Biological Cybernetics},
year = {2011},
pages = {235--249},
volume = {104},
number = {4-5},
note = {},
abstract = {Despite many efforts, balance control of humanoid robots
in the presence of unforeseen external or internal forces
has remained an unsolved problem. The difficulty of this
problem is a consequence of the high dimensionality of the
action space of a humanoid robot, due to its large number
of degrees of freedom (joints), and of nonlinearities in
its kinematic chains. Biped biological organisms face
similar difficulties, but have nevertheless solved this
problem. Experimental data reveal that many biological
organisms reduce the high dimensionality of their action
space by generating movements through linear superposition
of a rather small number of stereotypical combinations of
simultaneous movements of many joints, to which we refer as
kinematic synergies in this paper. We show that by
constructing two suitable nonlinear kinematic synergies for
the lower part of the body of a humanoid robot, balance
control can in fact be reduced to a linear control problem,
at least in the case of relatively slow movements. We
demonstrate for a variety of tasks that the humanoid robot
HOAP-2 acquires through this approach the capability to
balance dynamically against unforeseen disturbances that
may arise from external forces or from manipulating unknown
loads.},
htmlnote = {(Journal link to PDF)}
}
@Article{HauserETAL:12,
author = {H. Hauser and A. J. Ijspeert and R. M. F\"uchslin and R.
Pfeifer and W. Maass},
title = {Towards a Theoretical Foundation for Morphological
Computation with Compliant Bodies},
journal = {Biological Cybernetics},
year = {2011},
pages = {355--370},
volume = {105},
number = {5-6},
abstract = {The control of compliant robots is, due to their often
nonlinear and complex dynamics, inherently difficult. The
vision of morphological computation proposes to view these
aspects not only as problems, but rather as parts of the
solution. Non-rigid body parts are not seen anymore as
imperfect realizations of rigid body parts, but rather as
potential computational resources. The applicability of
this vision has already been demonstrated for a variety of
complex robot control problems. Nevertheless, a theoretical
basis for understanding the capabilities and limitations of
morphological computation has been missing so far.We
present a model for morphological computation with
compliant bodies, where a precise mathematical
characterization of the potential computational
contribution of a complex physical body is feasible. The
theory suggests that complexity and nonlinearity, typically
unwanted properties of robots, are desired features in
order to provide computational power. We demonstrate that
simple generic models of physical bodies, based on
mass-spring systems, can be used to implement complex
nonlinear operators. By adding a simple readout (which is
static and linear) to the morphology, such devices are able
to emulate complex mappings of input to output streams in
continuous time. Hence, by outsourcing parts of the
computation to the physical body, the difficult problem of
learning to control a complex body, could be reduced to a
simple and perspicuous learning task, which can not get
stuck in local minima of an error function.},
htmlnote = {(Journal link to PDF)}
}
@Article{HauserETAL:12a,
author = {H. Hauser and A. J. Ijspeert and R. M. F\"uchslin and R.
Pfeifer and W. Maass},
title = {The Role of Feedback in Morphological Computation with
Compliant Bodies},
journal = {Biological Cybernetics},
year = {published 06 Sept 2012},
pages = {},
volume = {},
number = {},
abstract = {The generation of robust periodic movements of complex
nonlinear robotic systems is inherently difficult,
especially, if parts of the robots are compliant. It has
previously been proposed that complex nonlinear features of
a robot, similarly as in biological organisms, might
possibly facilitate its control. This bold hypothesis,
commonly referred to as morphological computation, has
recently received some theoretical support by Hauser et al.
(2012). We show in this article that this theoretical
support can be extended to cover not only the case of
fading memory responses to external signals, but also the
essential case of autonomous generation of adaptive
periodic patterns, as, e.g., needed for locomotion. The
theory predicts that feedback into the morphological
computing system is necessary and sufficient for such
tasks, for which a fading memory is insufficient. We
demonstrate the viability of this theoretical analysis
through computer simulations of complex nonlinear
mass-spring systems that are trained to generate a large
diversity of periodic movements by adapting the weights of
a simple linear feedback device. Hence, the results of this
article substantially enlarge the theoretically tractable
application domain of morphological computation in
robotics, and also provide new paradigms for understanding
control principles of biological organisms.},
htmlnote = {(Journal link to PDF)},
note = {doi: 10.1007/s00422-012-0516-4}
}
@InProceedings{HochbaumMaass:84,
author = {D. Hochbaum and W. Maass},
booktitle = {Proceedings of Symp. on Theoretical Aspects of Computer
Science (Paris 1984)},
pages = {55--62},
publisher = {Springer (Berlin)},
series = {Lecture Notes in Computer Science},
title = {Approximation schemes for covering and packing problems in
robotics and {VLSI} (extended abstract)},
volume = {166},
year = {1984}
}
@Article{HochbaumMaass:85,
author = {D. Hochbaum and W. Maass},
journal = {J. Assoc. Comp. Mach.},
pages = {130--136},
title = {Approximation schemes for covering and packing problems in
image processing and {VLSI}},
volume = {32},
year = {1985}
}
@Article{HochbaumMaass:87,
author = {D. Hochbaum and W. Maass},
journal = {J. Algorithms},
pages = {305--323},
title = {Fast approximation algorithms for a nonconvex Covering
problem},
volume = {8},
year = {1987}
}
@Article{HomerMaass:83,
author = {S. Homer and W. Maass},
title = {Oracle dependent properties of the lattice of {NP}-sets},
journal = {Theoretical Computer Science},
volume = {24},
pages = {279--289},
year = {1983},
abstract = {We consider under the assumption $P \neq NP$ questions
concerning the structure of the lattice of $NP$ sets
together with the sublattice $P$. We show that two
questions which are slightly more complex than the known
splitting properties of this lattice cannot be settled by
arguments which relativize. The two questions which we
consider are whether every infinite $NP$ set contains an
infinite $P$ subset and whether there exists an $NP$-simple
set. We construct several oracles, all of which make $P
\neq NP$, and which in addition make the above-mentioned
statements either true or false. In particular we give a
positive answer to the question, raised by Bennett and Gill
1981), whether an oracle $B$ exists making $P^B \neq NP^B$
and such that every infinite set in $NP^B$ has an infinite
subset in $P^B$. The constructions of the oracles are
finite injury priority arguments.}
}
@Article{JoshiETAL:09,
author = {P. Joshi and G. Rainer and W. Maass},
title = {Computational role of theta oscillations in
delayed-decision tasks},
journal = {in preparation},
year = {2009},
pages = {},
volume = {}
}
@InProceedings{JoshiMaass:03,
author = {P. Joshi and W. Maass},
title = {Movement Generation and Control with Generic Neural
Microcircuits},
booktitle = {Biologically Inspired Approaches to Advanced Information
Technology. First International Workshop, Bio{ADIT} 2004,
Lausanne, Switzerland, January 2004, Revised Selected
Papers},
pages = {258--273},
year = {2004},
editor = {A. J. Ijspeert and M. Murata and N. Wakamiya},
volume = {3141},
series = {Lecture Notes in Computer Science},
publisher = {Springer Verlag},
abstract = {Simple linear readouts from generic neural microcircuit
models consisting of spiking neurons and dynamic synapses
can be trained to generate and control basic movements, for
example, reaching with an arm to various target points.
After suitable training of these readouts on a small number
of target points; reaching movements to other target points
can also be generated. Sensory or proprioceptive feedback
turns out to be essential for such movement control, even
if it is noisy and substantially delayed. Such feedback
turns out to optimally improve the performance of the
neural microcircuit model if it arrives with a biologically
realistic delay of 100 to 200 ms. Furthermore, additional
feedbacks of ``prediction of sensory variables'' are shown
to improve the performance significantly. The proposed
model also provides a new approach for movement control in
robotics. Existing control methods in robotics that take
the particular dynamics of the sensors and actuators into
account (``embodiment of robot control'') are taken one
step further by this approach, which provides methods for
also using the ``embodiment of computation'', i.e. the
inherent dynamics and spatial structure of neural circuits,
for the design of robot movement controllers.}
}
@Article{JoshiMaass:04,
author = {P. Joshi and W. Maass},
journal = {Neural Computation},
title = {Movement Generation with Circuits of Spiking Neurons},
year = {2005},
volume = 17,
number = 8,
pages = {1715--1738},
abstract = {How can complex movements that take hundreds of
milliseconds be generated by stereotypical neural
microcircuits consisting of spiking neurons with a much
faster dynamics? We show that linear readouts from generic
neural microcircuit models can be trained to generate basic
arm movements. Such movement generation is independent of
the arm-model used and the type of feedbacks that the
circuit receives. We demonstrate this by considering two
different models of a two-jointed arm, a standard model
from robotics and a standard model from biology, that each
generate different kinds of feedback. Feedbacks that arrive
with biologically realistic delays of 50--280 ms turn out
to give rise to the best performance. If a feedback with
such desirable delay is not available, the neural
microcircuit model also achieves good performance if it
uses internally generated estimates of such feedback.
Existing methods for movement generation in robotics that
take the particular dynamics of sensors and actuators into
account (``embodiment of motor systems'') are taken one
step further with this approach, which provides methods for
also using the ``embodiment of motion generation
circuitry'', i.e., the inherent dynamics and spatial
structure of neural circuits, for the generation of
movements.}
}
@Article{KaskeMaass:04,
author = {A. Kaske and W. Maass},
title = {A model for the interaction of oscillations and pattern
generation with real-time computing in generic neural
microcircuit models},
journal = {Neural Networks},
year = {2006},
volume = {19},
number = {5},
pages = {600--609},
abstract = {It is shown that real-time computations on spike patterns
and temporal integration of information in neural
microcircuit models are compatible with potentially
descruptive additional inputs such as oscillations. A minor
change in the connection statistics of such circuits
(making synaptic connections to more distal target neurons
more likely for excitatory than for inhibitory neurons)
endows such generic neural microcircuit model with the
ability to generate periodic patterns autonomously. We show
that such pattern generation can also be multiplexed with
pattern classification and temporal integration of
information in the same neural circuit. These results can
be interpreted as showing that periodic activity provides a
second channel for communication in neural systems which
can be used to synchronize or coordinate spatially
separated processes, without encumbering local real-time
computations on spike trains in diverse neural circuits.}
}
@Article{KlampflETAL:07,
author = {S. Klampfl and R. Legenstein and W. Maass},
title = {Spiking neurons can learn to solve information bottleneck
problems and extract independent components},
journal = {Neural Computation},
year = {2009},
volume = {21},
number = {4},
pages = {911--959},
abstract = {Independent Component Analysis (or blind source
separation) is assumed to be an essential component of
sensory processing in the brain and could provide a less
redundant representation about the external world. Another
powerful processing strategy is the optimization of
internal representations according to the information
bottleneck method. This method would allow to extract
preferentially those components from high-dimensional
sensory input streams that are related to other information
sources, such as internal predictions or proprioceptive
feedback. However there exists a lack of models that could
explain how spiking neurons could learn to execute either
of these two processing strategies. We show in this article
how stochastically spiking neurons with refractoriness
could in principle learn in an unsupervised manner to carry
out both information bottleneck optimization and the
extraction of independent components. We derive suitable
learning rules, which extend the well known BCM-rule, from
abstract information optimization principles. These rules
will simultaneously keep the firing rate of the neuron
within a biologically realistic range.}
}
@InProceedings{KlampflETAL:07b,
author = {S. Klampfl and R. Legenstein and W. Maass},
title = {Information Bottleneck Optimization and Independent
Component Extraction with Spiking Neurons},
booktitle = {Proc. of NIPS 2006, Advances in Neural Information
Processing Systems},
editor = {},
publisher = {MIT Press},
year = {2007},
volume = {19},
pages = {713--720},
abstract = {The extraction of statistically independent components
from high-dimensional multi-sensory input streams is
assumed to b e an essential component of sensory processing
in the brain. Such independent component analysis (or blind
source separat ion) could provide a less redundant
representation of inform ation about the external world.
Another powerful processing strategy is to extract
preferentially those components from high-dimensional input
streams that are related to other inf ormation sources,
such as internal predictions or propriocep tive feedback.
This strategy allows the optimization of inte rnal
representation according to the information bottleneck
method. However, concrete learning rules that implement
thes e general unsupervised learning principles for spiking
neuro ns are still missing. We show how both information
bottlenec k optimization and the extraction of independent
components can in principle be implemented with
stochastically spiking neurons with refractoriness. The new
learning rule that achi eves this is derived from abstract
information optimization principles.}
}
@Article{KlampflETAL:09,
author = {S. Klampfl and S.V. David and P. Yin and S.A. Shamma and
W. Maass},
title = {Integration of stimulus history in information conveyed by
neurons in primary auditory cortex in response to tone
sequences},
journal = {39th Annual Conference of the Society for Neuroscience,
Program 163.8, Poster T6 },
year = {2009},
volume = {},
number = {},
pages = {},
abstract = {A critical component of auditory processing is integrating
information about sound features that change over time.
Previous studies have shown that the context of a sound --
the immediate history of auditory stimulation -- can have a
substantial effect on responses of auditory neurons to a
current sound. In order to characterize these effects, we
measured the information contained in the neural activity
in primary auditory cortex (A1) about both current and
preceding sounds. Neural recordings were made from single
A1 neurons (n=122) isolated from 23 multi-channel
recordings in 4 passively listening ferrets. The stimulus
was a sequence of tones (150 ms duration). The frequency
step between two consecutive tones was always half an
octave up or down. For each neuron, we measured at
particular points in time the mutual information (MI)
between its response during a sliding window (20ms) and the
identity of the current and preceding tone. Since direct
estimates of MI from spike trains typically suffer from a
systematic error (bias) due to the limited number of
available response trials for a given stimulus, we used a
recently proposed shuffling-based estimator with additional
quadratic extrapolation bias correction (Panzeri et al.,
2007). This method produces reliable information estimates
for this particular setup. We found that most responses
(102 out of 122 neurons) contained significant information
about the stimulus throughout the duration of the tone. Of
this information, on average, 60\% was about the current
tone, while 40\% was about the previous tone. We also
trained linear classifiers (Support Vector Machines with
linear kernel) on the low-pass filtered response spike
trains of multiple simultaneously recorded neurons (4-10)
to discriminate between the two possible predecessors for a
given tone. The performance the linear classifier can be
viewed as a lower bound on the information contained in the
responses about the previous tone. Performance of up to
80\% was achieved. These results quantify the amount of
information contained in the responses of A1 neurons about
both currently and previously played tones and demonstrate
that neurons in A1 integrate information about previous
input into their current responses. References: Panzeri et
al. (2007), J Neurophysiol, 98(3):1064}
}
@Article{KlampflETAL:12,
author = {S. Klampfl and S. V. David and P. Yin and S. A. Shamma and
W. Maass},
title = {A quantitative analysis of information about past and
present stimuli encoded by spikes of {A}1 neurons.},
journal = {Journal of Neurophysiology},
year = {2012},
pages = {1366--1380},
volume = {108},
number = {},
abstract = {In order to process the rich temporal structure of their
acoustic environment, organisms have to integrate
information over time into an appropriate neural response.
Previous studies have addressed the modulation of responses
of auditory neurons to a current sound in dependence of the
immediate stimulation history, but a quantitative analysis
of this important computational process has been missing.
In this study, we analyzed temporal integration of
information in the spike output of 122 single neurons in
primary auditory cortex (A1) of four awake ferrets in
response to random tone sequences. We quantified the
information contained in the responses about both current
and preceding sounds in two ways: by estimating directly
the mutual information between stimulus and response, and
by training linear classifiers to decode information about
the stimulus from the neural response. We found that (i)
many neurons conveyed a significant amount of information
not only about the current tone, but also simultaneously
about the previous tone, (ii) that the neural response to
tone sequences was a non-linear combination of responses to
the tones in isolation, and (iii) that, nevertheless, much
of the information about current and previous tones could
be extracted by linear decoders. Furthermore our analysis
of these experimental data shows that methods from
information theory and the application of standard machine
learning methods for extracting specific information yield
quite similar results.},
htmlnote = {(Journal link to abstract PDF)}
}
@Article{KlampflMaass:09,
author = {S. Klampfl and W. Maass},
title = {A neuron can learn anytime classification of trajectories
of network states without supervision},
journal = {submitted for publication},
year = {Feb. 2009},
volume = {},
number = {},
pages = {},
note = {}
}
@Article{KlampflMaass:09a,
author = {S. Klampfl and W. Maass},
title = {A theoretical basis for emergent pattern discrimination in
neural systems through slow feature extraction},
journal = {submitted},
year = {2009},
pages = {},
volume = {}
}
@InProceedings{KlampflMaass:09b,
author = {S. Klampfl and W. Maass},
title = {Replacing supervised classification learning by {S}low
{F}eature {A}nalysis in spiking neural networks},
booktitle = {Proc. of NIPS 2009: Advances in Neural Information
Processing Systems},
editor = {},
publisher = {MIT Press},
year = {2010},
volume = {22},
pages = {988--996},
abstract = {Many models for computations in recurrent networks of
neurons assume that the network state moves from some
initial state to some fixed point attractor or limit cycle
that represents the output of the computation. However
experimental data show that in response to a sensory
stimulus the network state moves from its initial state
through a trajectory of network states and eventually
returns to the initial state, without reaching an attractor
or limit cycle in between. This type of network response,
where salient information about external stimuli is encoded
in characteristic trajectories of continuously varying
network states, raises the question how a neural system
could compute with such code, and arrive for example at a
temporally stable classification of the external stimulus.
We show that a known unsupervised learning algorithm, Slow
Feature Analysis (SFA), could be an important ingredient
for extracting stable information from these network
trajectories. In fact, if sensory stimuli are more often
followed by another stimulus from the same class than by a
stimulus from another class, SFA approaches the
classification capability of Fishers Linear Discriminant
(FLD), a powerful algorithm for supervised learning. We
apply this principle to simulated cortical microcircuits,
and show that it enables readout neurons to learn
discrimination of spoken digits and detection of repeating
firing patterns within a stream of spike trains with the
same firing statistics, without requiring any supervision
for learning.}
}
@Article{KlampflMaass:10,
author = {S. Klampfl and W. Maass},
title = {A theoretical basis for emergent pattern discrimination in
neural systems through slow feature extraction},
journal = {Neural Computation},
year = {2010},
volume = {22},
number = {12},
pages = {2979--3035},
abstract = {Neurons in the brain are able to detect and discriminate
salient spatio-temporal patterns in the firing activity of
presynaptic neurons. It is open how they can learn to
achieve this, especially without the help of a supervisor.
We show that a well-known unsupervised learning algorithm
for linear neurons, Slow Feature Analysis (SFA), is able to
acquire the discrimination capability of one of the best
algorithms for supervised linear discrimination learning,
the Fisher Linear Discriminant (FLD), given suitable input
statistics. We demonstrate the power of this principle by
showing that it enables readout neurons from simulated
cortical microcircuits to learn without any supervision to
discriminate between spoken digits, and to detect repeated
firing patterns that are embedded into a stream of noise
spike trains with the same firing statistics. Both these
computer simulations and our theoretical analysis show that
slow feature extraction enables neurons to extract and
collect information that is spread out over a trajectory of
firing states that lasts several hundred ms. In addition,
it enables neurons to learn without supervision to keep
track of time (relative to a stimulus onset, or the
initiation of a motor response). Hence these results
elucidate how the brain could compute with trajectories of
firing states, rather than only with fixed point
attractors. It also provides a theoretical basis for
understanding recent experimental results on the emergence
of view- and position-invariant classification of visual
objects in inferior temporal cortex.},
note = {Epub 2010 Sep 21}
}
@Article{LegensteinETAL:03,
author = {R. Legenstein and H. Markram and W. Maass},
title = {Input Prediction and Autonomous Movement Analysis in
Recurrent Circuits of Spiking Neurons},
year = {2003},
journal = {Reviews in the Neurosciences (Special Issue on
Neuroinformatics of Neural and Artificial Computation)},
volume = {14},
number = {1--2},
pages = {5--19},
abstract = {Temporal integration of information and prediction of
future sensory inputs are assumed to be important
computational tasks of generic cortical microcircuits.
However it has remained open how cortical microcircuits
could possibly achieve this, especially since they consist
in contrast to most neural network models of neurons and
synapses with heterogeneous dynamic responses. However it
turns out that the diversity of computational units
increases the capability of microcircuit models for
temporal integration. Furthermore the prediction of future
input may be rather easy for such circuits since it
suffices to train the readouts from such microcircuits. In
this article we show that very simple readouts from a
generic recurrently connected circuit of integrate-and-fire
neurons with diverse dynamic synapses can be trained in an
unsupervised manner to predict movements of different
objects, that move within an unlimited number of
combinations of speed, angle, and offset over a simulated
sensor field. The autonomously trained microcircuit model
is also able to compute the direction of motion, which is a
computationally difficult problem ("aperture problem")
since it requires disambiguation of local sensor readings
through the context of other sensor readings at the current
and preceding moments. Furthermore the same circuit can be
trained simultaneously in a supervised manner to also
report the shape and velocity of the moving object. Finally
it is shown that the trained neural circuit supports
novelty detection and the generation of "imagined
movements". Altogether the results of this article suggest
that it is not necessary to construct specific and
biologically unrealistic neural circuit models for specific
sensory processing tasks, since "found" generic cortical
microcircuit models in combination with very simple
perceptron-like readouts can easily be trained to solve
such computational tasks.}
}
@Article{LegensteinETAL:04,
author = {R. Legenstein and C. Naeger and W. Maass},
title = {What can a Neuron Learn with Spike-Timing-Dependent
Plasticity?},
journal = {Neural Computation},
year = {2005},
volume = {17},
number = {11},
pages = {2337--2382},
abstract = {Spiking neurons are very flexible computational modules,
which can implement with different values of their
adjustable synaptic parameters an enormous variety of
different transformations F from input spike trains to
output spike trains. We examine in this letter the question
to what extent a spiking neuron with biologically realistic
models for dynamic synapses can be taught via
spike-timing-dependent plasticity (STDP) to implement a
given transformation F. We consider a supervised learning
paradigm where during training, the output of the neuron is
clamped to the target signal (teacher forcing). The
well-known perceptron convergence theorem asserts the
convergence of a simple supervised learning algorithm for
drastically simplified neuron models (McCulloch-Pitts
neurons). We show that in contrast to the perceptron
convergence theorem, no theoretical guarantee can be given
for the convergence of STDP with teacher forcing that holds
for arbitrary input spike patterns. On the other hand, we
prove that average case versions of the perceptron
convergence theorem hold for STDP in the case of
uncorrelated and correlated Poisson input spike trains and
simple models for spiking neurons. For a wide class of
cross-correlation functions of the input spike trains, the
resulting necessary and sufficient condition can be
formulated in terms of linear separability, analogously as
the well-known condition of learnability by perceptrons.
However, the linear separability criterion has to be
applied here to the columns of the correlation matrix of
the Poisson input. We demonstrate through extensive
computer simulations that the theoretically predicted
convergence of STDP with teacher forcing also holds for
more realistic models for neurons, dynamic synapses, and
more general input distributions. In addition, we show
through computer simulations that these positive learning
results hold not only for the common interpretation of
STDP, where STDP changes the weights of synapses, but also
for a more realistic interpretation suggested by
experimental data where STDP modulates the initial release
probability of dynamic synapses.}
}
@InProceedings{LegensteinETAL:04a,
author = {R. Legenstein and W. Maass},
title = {A criterion for the convergence of learning with spike
timing dependent plasticity},
booktitle = {Advances in Neural Information Processing Systems},
editor = {Y. Weiss and B. Schoelkopf and J. Platt},
volume = {18},
pages = {763--770},
year = 2006,
publisher = {MIT Press},
abstract = {We investigate under what conditions a neuron can learn by
experimentally supported rules for spike timing dependent
plasticity (STDP) to predict the arrival times of strong
``teacher inputs'' to the same neuron. It turns out that in
contrast to the famous Perceptron Convergence Theorem,
which predicts convergence of the perceptron learning rule
for a strongly simplified neuron model whenever a stable
solution exists, no equally strong convergence guarantee
can be given for spiking neurons with STDP. But we derive a
criterion on the statistical dependency structure of input
spike trains which characterizes exactly when learning with
STDP will converge on average for a simple model of a
spiking neuron. This criterion is reminiscent of the linear
separability criterion of the Perceptron Convergence
Theorem, but it applies here to the rows of a correlation
matrix related to the spike inputs. In addition we show
through computer simulations for more realistic neuron
models that the resulting analytically predicted positive
learning results not only hold for the common
interpretation of STDP where STDP changes the weights of
synapses, but also for a more realistic interpretation
suggested by experimental data where STDP modulates the
initial release probability of dynamic synapses.}
}
@InProceedings{LegensteinETAL:08,
author = {R. Legenstein and D. Pecevski and W. Maass},
title = {Theoretical Analysis of Learning with Reward-Modulated
Spike-Timing-Dependent Plasticity},
booktitle = {Proc. of NIPS 2007, Advances in Neural Information
Processing Systems},
editor = {},
publisher = {MIT Press},
year = {2008},
volume = {20},
pages = {881--888},
abstract = {Reward-modulated spike-timing-dependent plasticity
({STDP}) has recently emerged as a candidate for a learning
rule that could explain how local learning rules at single
synapses support behaviorally relevant adaptive changes in
complex networks of spiking neurons. However the potential
and limitations of this learning rule could so far only be
tested through computer simulations. This article provides
tools for an analytic treatment of reward-modulated {STDP},
which allow us to predict under which conditions
reward-modulated {STDP} will be able to achieve a desired
learning effect. In particular, we can produce in this way
a theoretical explanation and a computer model for a
fundamental experimental finding on biofeedback in monkeys
(reported in [1])}
}
@Article{LegensteinETAL:08a,
author = {R. Legenstein and D. Pecevski and W. Maass},
title = {A Learning Theory for Reward-Modulated
Spike-Timing-Dependent Plasticity with Application to
Biofeedback},
journal = {PLoS Computational Biology},
year = {2008},
volume = {4},
number = {10},
pages = {e1000180},
abstract = {Reward-modulated spike-timing-dependent plasticity
({STDP}) has recently emerged as a candidate for a learning
rule that could explain how behaviorally relevant adaptive
changes in complex networks of spiking neurons could be
achieved in a self-organizing manner through local synaptic
plasticity. However the capabilities and limitations of
this learning rule could so far only be tested through
computer simulations. This article provides tools for an
analytic treatment of reward-modulated {STDP}, which allows
us to predict under which conditions reward-modulated
{STDP} will achieve a desired learning effect. These
analytical results imply that neurons can learn through
reward-modulated {STDP} to classify not only spatial, but
also temporal firing patterns of presynaptic neurons. They
also can learn to respond to specific presynaptic firing
patterns with particular spike patterns. Finally, the
resulting learning theory predicts that even difficult
credit-assignment problems, where it is very hard to tell
which synaptic weights should be modified in order to
increase the global reward for the system, can be solved in
a self-organizing manner through reward-modulated {STDP}.
This yields an explanation for a fundamental experimental
result on biofeedback in monkeys by Fetz and Baker. In this
experiment monkeys were rewarded for increasing the firing
rate of a particular neuron in the cortex, and were able to
solve this extremely difficult credit assignment problem.
Our model for this experiment relies on a combination of
reward-modulated {STDP} with variable spontaneous firing
activity. Hence it also provides a possible functional
explanation for trial-to-trial variability, which is
characteristic for cortical networks of neurons, but has no
analogue in currently existing artificial computing
systems. In addition our model demonstrates that
reward-modulated {STDP} can be applied to all synapses in a
large recurrent neural network without endangering the
stability of the network dynamics.},
htmlnote = {(Journal link to PDF)}
}
@Article{LegensteinETAL:08c,
author = {R. Legenstein and S. A. Chase and A. B. Schwartz and W.
Maass},
title = {A model for learning effects in motor cortex that may
facilitate the brain control of neuroprosthetic devices},
journal = {38th Annual Conference of the Society for Neuroscience,
Program 517.6},
year = {2008},
volume = {},
number = {},
pages = {},
abstract = {Recent experimental results have shown that the direction
preference of neurons in monkey motor cortex changes in
order to compensate for purposeful misreading of preferred
directions for brain control of a robot arm. We show that a
simple neural network model in combination with a new rule
for reward-modulated Hebbian plasticity can explain this
effect. This rule requires substantial trial-to-trial
variability of the neuronal output for exploration. In
contrast to previously proposed rules for reward-modulated
Hebbian plasticity, the new rule does not require that the
plasticity mechanism `knows' the noise explicitly. It is
able to optimize the performance of the model system within
biologically realistic periods of time and under high noise
levels. When the neuronal noise is fitted to experimental
data, the model produces learning effects similar to those
found in monkey experiments. We quantified these effects
and found a surprisingly good match to those observed in
experiments. This study shows that reward-modulated
learning can explain detailed experimental results about
neuronal tuning changes in a motor control task and
suggests that reward-modulated learning is an essential
plasticity mechanism in the cortex for the acquisition of
goal-directed behavior. Self-tuning effects of the type
considered in this model are obviously important for
successful use of neuroprosthetic devices.}
}
@InProceedings{LegensteinETAL:09a,
author = {R. Legenstein and S. A. Chase and A. B. Schwartz and W.
Maass},
title = {Functional network reorganization in motor cortex can be
explained by reward-modulated {H}ebbian learning},
booktitle = {Proc. of NIPS 2009: Advances in Neural Information
Processing Systems},
editor = {D. Koller and D. Schuurmans and Y. Bengio and L. Bottou},
publisher = {MIT Press},
year = {2010},
volume = {22},
pages = {1105--1113},
abstract = {The control of neuroprosthetic devices from the activity
of motor cortex neurons benefits from learning effects
where the function of these neurons is adapted to the
control task. It was recently shown that tuning properties
of neurons in monkey motor cortex are adapted selectively
in order to compensate for an erroneous interpretation of
their activity. In particular, it was shown that the tuning
curves of those neurons whose preferred directions had been
misinterpreted changed more than those of other neurons. In
this article, we show that the experimentally observed
self-tuning properties of the system can be explained on
the basis of a simple learning rule. This learning rule
utilizes neuronal noise for exploration and performs
Hebbian weight updates that are modulated by a global
reward signal. In contrast to most previously proposed
reward-modulated Hebbian learning rules, this rule does not
require extraneous knowledge about what is noise and what
is signal. The learning rule is able to optimize the
performance of the model system within biologically
realistic periods of time and under high noise levels. When
the neuronal noise is fitted to experimental data, the
model produces learning effects similar to those found in
monkey experiments.}
}
@Article{LegensteinETAL:09b,
author = {R. Legenstein and S. M. Chase and A. B. Schwartz and W.
Maass},
title = {A reward-modulated {H}ebbian learning rule can explain
experimentally observed network reorganization in a brain
control task},
journal = {The Journal of Neuroscience},
year = {2010},
volume = {30},
number = {25},
pages = {8400--8410},
abstract = {It has recently been shown in a brain-computer interface
experiment that motor cortical neurons change their tuning
properties selectively to compensate for errors induced by
displaced decoding parameters. In particular, it was shown
that the three-dimensional tuning curves of neurons whose
decoding parameters were reassigned changed more than those
of neurons whose decoding parameters had not been
reassigned. In this article, we propose a simple learning
rule that can reproduce this effect. Our learning rule uses
Hebbian weight updates driven by a global reward signal and
neuronal noise. In contrast to most previously proposed
learning rules, this approach does not require extrinsic
information to separate noise from signal. The learning
rule is able to optimize the performance of a model system
within biologically realistic periods of time under high
noise levels. Furthermore, when the model parameters are
matched to data recorded during the brain-computer
interface learning experiments described above, the model
produces learning effects strikingly similar to those found
in the experiments.}
}
@TechReport{LegensteinMaass:04,
author = {R. Legenstein and W. Maass},
title = {Additional material to the paper: What can a Neuron Learn
with Spike-Timing-Dependent Plasticity?},
institution = {Institute of Theoretical Computer Science, Graz University
of Technology},
htmlnote = {(PDF)},
year = {2004}
}
@InCollection{LegensteinMaass:05,
author = {R. Legenstein and W. Maass},
title = {What makes a dynamical system computationally powerful?},
booktitle = {New Directions in Statistical Signal Processing: From
Systems to Brains},
publisher = {MIT Press},
editor = {S. Haykin and J. C. Principe and T.J. Sejnowski and J.G.
McWhirter},
pages = {127--154},
year = {2007},
abstract = {We review methods for estimating the computational
capability of a complex dynamical system. The main examples
that we discuss are models for cortical neural
microcircuits with varying degrees of biological accuracy,
in the context of online computations on complex input
streams. We address in particular the question to what
extent earlier results ab out the relationship between the
edge of chaos and the compu tational power of dynamical
systems in discrete time for off -line computing also apply
to this case.}
}
@Article{LegensteinMaass:05a,
author = {R. Legenstein and W. Maass},
title = {Edge of Chaos and Prediction of Computational Performance
for Neural Circuit Models},
journal = {Neural Networks},
year = {2007},
volume = {20},
number = {3},
pages = {323--334},
note = {},
abstract = {We analyze in this article the significance of the edge of
chaos for real-time computations in neural microcircuit
models consisting of spiking neurons and dynamic synapses.
We find that the edge of chaos predicts quite well those
values of circuit parameters that yield maximal
computational performance. But obviously it makes no
prediction of their computational performance for other
parameter values. Therefore, we propose a new method for
predicting the computational performance of neural
microcircuit models. The new measure estimates directly the
kernel property and the generalization capability of a
neural microcircuit.We validate the proposed measure by
comparing its prediction with direct evaluations of the
computational performance of various neural microcircuit
models. The proposed method also allows us to quantify
differences in the computational performance and
generalization capability of neural circuits in different
dynamic regimes (UP- and DOWN-states) that have been
demonstrated through intracellular recordings in vivo.}
}
@Article{LegensteinMaass:07,
author = {R. Legenstein and W. Maass},
title = {On the classification capability of sign-constrained
perceptrons},
journal = {Neural Computation},
volume = {20},
number = {1},
pages = {288--309},
year = {2008},
abstract = {The perceptron (also referred to as McCulloch-Pitts
neuron, or linear threshold gate) is commonly used as a
simplified model for the discrimination and learning
capability of a biological neuron. Criteria that tell us
when a perceptron can implement (or learn to implement) all
possible dichotomies over a given set of input patterns are
well-known, but only for the idealized case where one
assumes that the sign of a synaptic weight can be switched
during learning. We present in this article an analysis of
the classification capability of the biologically more
realistic model of a sign-constrained perceptron, where the
signs of synaptic weights remain fixed during learning
(which is the case for most types of biological synapses).
In particular, the VC-dimension of sign-constrained
perceptrons is determined, and a necessary and sufficient
criterion is provided that tells us when all $2^m$
dichotomies over a given set of m patterns can be learned
by sign-constrained perceptron. We also show that
uniformity of {L1} norms of input patterns is a sufficient
condition for full representation power in the case where
all weights are required to be nonnegative. Finally, we
also exhibit cases where the sign-constraint of a
perceptron drastically reduces its classification
capability. Our theoretical analysis is complemented by
computer simulations, which demonstrate in particular that
sparse input patterns improve the classification capability
of sign-constrained perceptrons.}
}
@Article{LegensteinMaass:09,
author = {R. Legenstein and W. Maass},
title = {An integrated learning rule for branch strength
potentiation and {STDP}},
journal = {39th Annual Conference of the Society for Neuroscience,
Program 895.20, Poster HH36},
year = {2009},
volume = {},
number = {},
pages = {},
abstract = {Recent experimental data (Losonczy, Makara, and Magee,
Nature 2008) show that not only the strength of synaptic
efficacy is plastic, but also the coupling between
dendritic branches and the soma (via dendritic spikes).
More precisely, the strength of this coupling can be
increased both through a coincidence of dendritic branch
activations with action potential generation, and through a
coincidence of branch activation with ACh. This effect has
been called Branch Strength Potentiation (BSP). We show
through theoretical analysis and computer simulations that
the learning capability of single neurons is substantially
increased if STDP is combined with BSP. More precisely, we
show that a simple learning rule, based on a
error-minimization principle, contains both BSP and STDP as
special cases. The learning rule includes a homeostatic
mechanism which acts locally at the site of the dendritic
branch. The depression that was observed for
post-before-pre pairings in standard STDP experiments is
also observed in simulations of this learning rule. It can
be explained by the combined effect of this local
homeostatic mechanism and the backpropagating action
potential. This powerful new learning rule endows single
neurons with learning capabilities which were previously
unattainable. For example, a single neuron acquires through
this new learning rule the capability to solve a "binding
problem". I.e., a single neuron can learn to respond to
fire upon activation of presynaptic pools A and B, and also
upon activation of presynaptic pools C and D, but NOT in
response to concurrent activation of presynaptic pools A
and C, or B and D. We also consider a variation of this
learning rule where changes at synapses and branches are
not only based on local activity, but also on a global
reward signal that is indicated to the neuron by the
concentration of a neuromodulatory signal such as ACh. We
show that this biologically plausible learning rule for
reward-based learning is much more efficient than
previously proposed rules based on simple neuron models
without nonlinear branches.}
}
@Article{LegensteinMaass:11,
author = {R. Legenstein and W. Maass},
title = {Branch-specific plasticity enables self-organization of
nonlinear computation in single neurons},
journal = {The Journal of Neuroscience},
year = {2011},
volume = {31},
number = {30},
pages = {10787--10802},
abstract = {It has been conjectured that nonlinear processing in
dendritic branches endows individual neurons with the
capability to perform complex computational operations that
are needed in order to solve for example the binding
problem. However, it is not clear how single neurons could
acquire such functionality in a self-organized manner,
since most theoretical studies of synaptic plasticity and
learning concentrate on neuron models without nonlinear
dendritic properties. In the meantime, a complex picture of
information processing with dendritic spikes and a variety
of plasticity mechanisms in single neurons has emerged from
experiments. In particular, new experimental data on
dendritic branch strength potentiation in rat hippocampus
have not yet been incorporated into such models. In this
article, we investigate how experimentally observed
plasticity mechanisms, such as depolarization-dependent
STDP and branch-strength potentiation could be integrated
to self-organize nonlinear neural computations with
dendritic spikes. We provide a mathematical proof that in a
simplified setup these plasticity mechanisms induce a
competition between dendritic branches, a novel concept in
the analysis of single neuron adaptivity. We show via
computer simulations that such dendritic competition
enables a single neuron to become member of several
neuronal ensembles, and to acquire nonlinear computational
capabilities, such as for example the capability to bind
multiple input features. Hence our results suggest that
nonlinear neural computation may self-organize in single
neurons through the interaction of local synaptic and
dendritic plasticity mechanisms.},
htmlnote = {(Commentary by R. P. Costa and P. J. Sj\"ostr\"om in
Frontiers in Synaptic Neuroscience PDF)}
}
@Article{LiebeETAL:09,
author = {S. Liebe and G. Hoerzer and N.K. Logothetis and W. Maass
and G. Rainer},
title = {Long range coupling between {V4} and {PF} in theta band
during visual short-term memory},
journal = {39th Annual Conference of the Society for Neuroscience,
Program 652.20, Poster Y31},
year = {2009},
volume = {},
number = {},
pages = {},
abstract = {Both extrastriate area V4 and the lateral prefrontal
cortex (PF) are thought to be part of a neural network
contributing to sensory and mnemonic processing of visual
information. However, it is not well understood how V4 and
PF might interact during visual memory. Here, we addressed
this question by recording Local Field Potentials (LFP)
simultaneously in both brain regions while two rhesus
monkeys performed a delayed matching-to sample task. In the
task, a sample stimulus (250ms) was presented followed by a
probe stimulus (600ms) after a delay period (1500ms). A
lever press was required if the sample stimulus matched the
probe. We assessed coupling between LFP sites within and
between the different brain regions by both measuring
pair-wise phase-synchrony (phase locking value, PLV) using
a wavelet based method and employing a coupling measure
that relies on the concept of Granger causality
(partial-directed coherence; PDC) using multivariate
autoregressive (MVAR) modeling. In both monkeys we
consistently found increases in theta-band phase synchrony
(3.5-7 Hz) between V4 and PF LFP site pairs during the
delay period of the task. Specifically, a significant
proportion of pairs (26.1\%, 62/231 for monkey 1 and 25\%,
40/160 for monkey 2, p<0.001) showed increased coherence
during the delay phase compared to the pre-stimulus
baseline period. In contrast, only a small proportion of
sites showed significant coupling in gamma (42-97 Hz,
5.9\%/13\% for monkeys 1/2, respectively) or beta (16-36Hz,
6.9\%/16\%) frequencies. In addition, we obtained
comparable results using PDC, which also assesses the
directionality of information flow between the brain areas.
Our preliminary results indicate that the interaction
between V4 and PF during short-term memory might be
primarily mediated through neuronal coherence in the theta
band. Furthermore, our analyses using MVAR modeling suggest
that this interaction can be characterized by a
bidirectional information flow between these areas. These
findings support the idea that long-range interactions play
an important role in short-term maintenance of short-term
memory.}
}
@InCollection{Maass:00,
author = {W. Maass},
title = {Spike trains -- Im {R}hythmus neuronaler {Z}ellen},
booktitle = {Katalog der steirischen Landesausstellung gr2000az},
pages = {36--42},
publisher = {Springer Verlag},
year = {2000},
editor = {H. Konrad, R. Kriesche}
}
@InCollection{Maass:00a,
author = {W. Maass},
title = {Lernende {M}aschinen},
booktitle = {Katalog der steirischen Landesausstellung gr2000az},
pages = {50--56},
publisher = {Springer Verlag},
year = 2000,
editor = {H. Konrad, R. Kriesche}
}
@InCollection{Maass:00b,
author = {W. Maass},
title = {Neural computation: a research topic for theoretical
computer science? {S}ome thoughts and pointers},
booktitle = {Current Trends in Theoretical Computer Science, Entering
the 21th Century},
publisher = {World Scientific Publishing},
year = {2001},
pages = {680--690},
editor = {Rozenberg G. and Salomaa A. and Paun G.}
}
@InProceedings{Maass:00c,
author = {W. Maass},
title = {Neural computation: a research topic for theoretical
computer science? {S}ome thoughts and pointers},
booktitle = {Bulletin of the European Association for Theoretical
Computer Science (EATCS)},
pages = {149--158},
year = {2000},
volume = {72}
}
@InProceedings{Maass:01a,
author = {W. Maass},
title = {wetware ({E}nglish version)},
booktitle = {{TAKEOVER}: {W}ho is {D}oing the {A}rt of {T}omorrow
({A}rs {E}lectronica 2001)},
pages = {148--152},
year = {2001},
publisher = {Springer}
}
@InProceedings{Maass:01b,
author = {W. Maass},
title = {wetware (deutsche {V}ersion)},
booktitle = {{TAKEOVER}: {W}ho is {D}oing the {A}rt of {T}omorrow
({A}rs {E}lectronica 2001)},
year = {2001},
pages = {153--157},
publisher = {Springer}
}
@Article{Maass:02,
author = {W. Maass},
title = {Computing with Spikes},
journal = {Special Issue on Foundations of Information Processing of
{TELEMATIK}},
year = {2002},
pages = {32--36},
volume = {8},
number = {1}
}
@InProceedings{Maass:02a,
author = {W. Maass},
title = {On the Computational Power of Neural Microcircuit Models:
Pointers to the Literature},
booktitle = {Proc. of
the International Conference on Artificial Neural Networks
-- ICANN 2002},
pages = {254--256},
year = {2002},
editor = {Jos\'{e} R. Dorronsoro},
volume = {2415},
series = {Lecture Notes in Computer Science},
publisher = {Springer}
}
@InCollection{Maass:03,
author = {W. Maass},
title = {Computation with Spiking Neurons},
booktitle = {The Handbook
of Brain Theory and Neural Networks},
edition = {2nd},
publisher = {MIT Press (Cambridge)},
editor = {M. A. Arbib},
year = {2003},
pages = {1080--1083}
}
@Article{Maass:06,
author = {W. Maass},
title = {Book Review of "{I}mitation of life: how biology is
inspiring computing" by {N}ancy {F}orbes},
journal = {Pattern Analysis and Applications},
year = {2006},
volume = {8},
number = {4},
pages = {390--391},
note = {Springer (London)}
}
@InProceedings{Maass:07,
author = {W. Maass},
booktitle = {Proceedings of the Conference CiE'07: {COMPUTABILITY IN
EUROPE} 2007, Siena (Italy)},
title = {Liquid Computing},
publisher = {Springer (Berlin)},
series = {Lecture Notes in Computer Science},
volume = {},
year = {2007},
pages = {507--516},
abstract = {This review addresses structural differences between that
type of computation on which computability theory and
computational complexity theory have focused so far, and
those computations that are usually carried out in
biological organisms (either in the brain, or in the form
of gene regulation within a single cell). These differences
concern the role of time, the way in which the input is
presented, the way in which an algorithm is implemented,
and in the end also the definition of what a computation
is. This article describes liquid computing as a new
framework for analyzing those types of computations that
are usually carried out in biological organisms.}
}
@InCollection{Maass:09,
author = {W. Maass},
title = {Liquid State Machines: Motivation, Theory, and
Applications},
booktitle = {Computability in Context: Computation and Logic in the
Real World},
editor = {B. Cooper and A. Sorbi},
year = {2010},
publisher = {Imperial College Press},
pages = {275--296},
keywords = {},
note = {},
abstract = {The Liquid State Machine (LSM) has emerged as a
computational model that is more adequate than the Turing
machine for describing computations in biological networks
of neurons. Characteristic features of this new model are
(i) that it is a model for adaptive computational systems,
(ii) that it provides a method for employing randomly
connected circuits, or eve "found" physical objects for
meaningful computations, (iii) that it provides a
theoretical context where heterogeneous, rather than
stereotypical, local gates or processors increase the
computational power of a circuit, (iv) that it provides a
method for multiplexing different computations (on a common
input) within the same circuit. This chapter reviews the
motivation for this model, its theoretical background, and
current work on implementations of this model in innovative
artificial computing devices.}
}
@InProceedings{Maass:75,
author = {W. Maass},
booktitle = {Proof Theory Symposium Kiel 1974},
editor = {J. Diller and G. H. Mueller},
journal = {Lecture Notes in Mathematics, Springer (Berlin)},
pages = {257--263},
publisher = {Springer (Berlin)},
series = {Lecture Notes in Mathematics},
title = {Church Rosser Theorem fuer Lambda-Kalkuele mit unendlich
langen Termen},
volume = {500},
year = {1975}
}
@Article{Maass:76,
author = {W. Maass},
journal = {Archive Math. Logik Grundlagen},
pages = {27--46},
title = {Eine {F}unktionalinterpretation der praedikativen
{A}nalysis},
volume = {18},
year = {1976}
}
@Article{Maass:77,
author = {W. Maass},
journal = {Archive Math. Logik Grundlagen},
pages = {169--186},
title = {On minimal pairs and minimal degrees in higher recursion
theory},
volume = {18},
year = {1977}
}
@Article{Maass:78,
author = {W. Maass},
journal = {Annals of Mathematical Logics},
pages = {149--170},
title = {Inadmissibility, tame r.e. sets and the admissible
collapse},
volume = {13},
year = {1978}
}
@Article{Maass:78a,
author = {W. Maass},
journal = {J. Symbolic Logic},
pages = {270-279},
title = {The uniform regular set theorem in alpha-recursion
theory},
volume = {43},
year = {1978}
}
@InProceedings{Maass:78b,
author = {W. Maass},
booktitle = {Higher Set Theory},
editor = {G. H. Mueller and D. Scott},
pages = {339--359},
publisher = {Springer (Berlin)},
series = {Lecture Notes in Mathematics},
title = {Fine structure theory of the constructible universe in
alpha- and beta-recursion theory},
volume = {669},
year = {1978}
}
@MastersThesis{Maass:78c,
author = {W. Maass},
note = {Minerva Publikation (Muenchen)},
school = {Ludwig-Maximilians-Universitaet Muenchen},
title = {Contributions to alpha- and beta-recursion theory},
type = {Habilitationsschrift},
year = {1978}
}
@InCollection{Maass:78d,
author = {W. Maass},
booktitle = {Generalized Recursion Theory II},
editor = {E. Fenstad and R. O. Gandy and G. E. Sacks},
pages = {239--269},
publisher = {North-Holland (Amsterdam)},
title = {High alpha-recursively enumerable degrees},
year = {1978}
}
@Article{Maass:79,
author = {W. Maass},
journal = {Ann. of Math. Logic},
pages = {205--231},
title = {On alpha- and beta-recursively enumerable degrees},
volume = {16},
year = {1979}
}
@Conference{GuptaMaass:91,
author = {A. Gupta and W. Maass},
booktitle = {Advances in Neural Information Processing Systems},
editor = {R. P. Lippmann and J. E. Moody and D. S. Touretzky},
pages = {825--831},
publisher = {Morgan Kaufmann, (San Mateo)},
title = {A method for the efficient design of {B}oltzmann machines
for classification problems},
volume = {3},
year = {1991}
}
@InProceedings{Maass:81,
author = {W. Maass},
booktitle = {Proceedings of the Conf. on Recursion Theory and
Computational Complexity},
editor = {G. Lolli},
pages = {229--236},
publisher = {Liguori editore (Napoli)},
title = {Recursively invariant beta-recursion theory -- a
preliminary survey},
year = {1981}
}
@Article{Maass:81a,
author = {W. Maass},
journal = {Proceedings Amer. Math. Soc.},
pages = {267--270},
title = {A countable basis for sigma-one-two sets and recursion
theory on aleph-one},
volume = {82},
year = {1981}
}
@Article{Maass:81b,
author = {W. Maass},
journal = {Ann. of Math. Logic},
pages = {27--73},
title = {Recursively invariant beta-recursion theory},
volume = {21},
year = {1981}
}
@Article{Maass:83,
author = {W. Maass},
journal = {The Journal of Symbolic Logic},
pages = {809--823},
title = {Recursively enumerable generic sets},
volume = {47},
year = {1983}
}
@Article{Maass:83a,
author = {W. Maass},
journal = {Trans. Amer. Math. Soc.},
pages = {311--336},
title = {Characterization of recursively enumerable sets with
supersets effectively isomorphic to all recursively
enumerable sets},
volume = {279},
year = {1983}
}
@Article{Maass:84,
author = {W. Maass},
journal = {J. Symbolic Logic},
pages = {51--62},
title = {On the orbits of hyperhypersimple sets},
volume = {49},
year = {1984}
}
@InProceedings{Maass:84a,
author = {W. Maass},
booktitle = {Proceedings of 16th Annual ACM Symp. on Theory of
Computing},
pages = {401--408},
title = {Quadratic lower bounds for deterministic and
nondeterministic one-tape {T}uring machines},
abstract = {We introduce new techniques for proving quadratic lower
bounds for deterministic and nondeterministic i-tape Turing
machines (all considered Turing machines have an additional
oneway input tape). In particular we produce quadratic
lower bounds for the simulation of 2-tape TM's by l-tape
TM's and thus answer a rather old question (problem No.1
and No.7 in the l i s t of Duris, Galil, Paul, Reischuk
[3]). Further we demo6strate a substantial superiority of
nondeterminism over determinism and of co-nondeterminism
over nondeterminism for l-tape TM's.},
year = {1984}
}
@Article{Maass:85,
author = {W. Maass},
journal = {J. Symbolic Logic},
pages = {138--148},
title = {Variations on promptly simple sets},
volume = {50},
year = {1985}
}
@Article{Maass:85a,
author = {W. Maass},
journal = {Proceedings of Symposia in Pure Mathematics},
pages = {21--32},
title = {Major subsets and automorphisms of recursively enumerable
sets},
volume = {42},
year = {1985}
}
@Article{Maass:85b,
author = {W. Maass},
journal = {Transactions of the American Mathematical Society},
pages = {675--693},
title = {Combinatorial lower bound arguments for deterministic and
nondeterministic {T}uring machines},
abstract = {We introduce new techniques for proving quadratic lower
bounds for deterministic and nondeterminisitc 1-tape
{T}uring machines (all considered {T}uring machines have an
additional one-way input tape). In particular, we derive
for the simulation of 2-tape {T}uring machines by 1-tape
{T}uring machines an optimal quadratic lower bound in the
deterministic case and a nearly optimal lower bound in the
nonderterministic case. This answers the rather old
question whether the computing power of the considered
types of {T}uring machines is significantly increased when
more than one tape is used (problem Nos. 1 and 7 in the
list of {D}uris, {G}alil, {P}aul, {R}eischuk [3]). Further,
we demonstrate a substantial superiority of nonderterminism
over determinism and of co-nondeterminism over
nondeterminism for 1-tage {T}uring machines},
volume = {292},
number = {2},
year = {1985},
note = {hard copy}
}
@InProceedings{Maass:86,
author = {W. Maass},
booktitle = {Proceedings of the International Conference on Logic,
Methodology and Philosphy of Science, Salzburg 1983},
pages = {141--158},
publisher = {North-Holland (Amsterdam)},
title = {Are recursion theoretic arguments useful in complexity
theory},
year = {1986}
}
@Article{Maass:86a,
author = {W. Maass},
journal = {SIAM J. Computing},
pages = {453--467},
title = {On the complexity of nonconvex covering},
volume = {15},
year = {1986}
}
@Article{Maass:88,
author = {W. Maass},
journal = {J. Symbolic Logic},
pages = {1098--1109},
title = {On the use of inaccessible numbers and order
indiscernibles in lower bound arguments for random access
machines},
volume = {53},
year = {1988},
abstract = {We prove optimal lower bounds on the computation time for
several well-known test problems on a quite realistic
computational model: the random access machine. These lower
bound arguments may be of special interest for logicians
because they rely on finitary analogues of two important
concepts from mathematical logic: inaccessible numbers and
order indiscernibles}
}
@InProceedings{Maass:91,
author = {W. Maass},
booktitle = {Proceedings of the 4th Annual ACM Workshop on
Computational Learning Theory},
pages = {167--175},
publisher = {Morgan Kaufmann (San Mateo)},
title = {On-line learning with an oblivious environment and the
power of randomization},
year = {1991}
}
@InProceedings{Maass:93,
author = {W. Maass},
booktitle = {Proceedings of the 25th Annual ACM Symposium on Theory
Computing},
pages = {335-344},
title = {Bounds for the computational power and learning complexity
of analog neural nets},
year = {1993}
}
@Article{Maass:93j,
author = {W. Maass},
title = {Bounds for the computational power and learning complexity
of analog neural nets},
journal = {SIAM J. on Computing},
year = 1997,
volume = 26,
number = 3,
pages = {708--732}
}
@InProceedings{Maass:94,
author = {W. Maass},
booktitle = {Advances in Neural Information Processing Systems},
pages = {311--318},
title = {Agnostic {PAC}-learning of functions on analog neural
nets},
editors = {G. Tesauro and D. S. Touretzky and T. K. Leen},
volume = {7},
year = {1995}
}
@InProceedings{Maass:94a,
author = {W. Maass},
booktitle = {Proceedings of the International Conference on Artificial
Neural Networks 1994 (ICANN'94)},
pages = {581--584},
publisher = {Springer (Berlin)},
title = {Neural nets with superlinear {VC}-dimension},
year = {1994}
}
@InCollection{Maass:94b,
author = {W. Maass},
booktitle = {Theoretical Advances in Neural Computation and Learning},
editor = {V. P. Roychowdhury and K. Y. Siu and A. Orlitsky},
pages = {295-336},
publisher = {Kluwer Academic Publishers (Boston)},
title = {Perspectives of current research about the complexity of
learning on neural nets},
year = {1994}
}
@InProceedings{Maass:94c,
author = {W. Maass},
booktitle = {Theoretical Andvances in Neural Computation and Learning},
editor = {V. P. Roychowdhury and K. Y. Siu and A. Orlitsky},
pages = {153--172},
publisher = {Kluwer Academics Publisher (Boston)},
title = {Computing on analog neural nets with arbitrary real
weights},
year = {1994}
}
@InProceedings{Maass:94d,
author = {W. Maass},
booktitle = {Proc. of the 7th Annual ACM Conference on Computational
Learning Theory},
pages = {67--75},
title = {Efficient agnostic {PAC}-learning with simple hypotheses},
year = {1994}
}
@InCollection{Maass:94e,
author = {W. Maass},
booktitle = {Computational Learning Theory: EuroColt'93},
editor = {J. Shawe-Taylor and M. Anthony},
pages = {1--17},
publisher = {Oxford University Press (Oxford)},
title = {On the complexity of learning on neural nets},
year = {1994}
}
@Article{Maass:94f,
author = {W. Maass},
title = {Agnostic {PAC}-learning of functions on analog neural
nets},
journal = {Neural Computation},
year = 1995,
volume = 7,
pages = {1054--1078},
abstract = {}
}
@Article{Maass:94j,
author = {W. Maass},
title = {Neural nets with superlinear {VC}-dimension},
abstract = {It has been known for quite a while that the
Vapnik-Chervonenkis dimension (VC-dimension) of a
feedforward neural net with linear threshold gates is at
most $O(w \cdot \log w)$, where $w$ is the total number of
weights in the neural net. We show in this paper that this
bound is in fact asymptotically optimal. More precisely, we
exhibit for any depth $d \geq 3$ a large class of
feedforward neural nets of depth \emph{d} with $w$ weights
that have VC-dimension $\Omega (w \cdot \log w)$. This
lower bound holds even if the inputs are restricted to
Boolean values. The proof of this result relies on a new
method that allows us to encode more "program-bits" in the
weights of a neural net than previously thought possible.},
journal = {Neural Computation },
year = 1994,
volume = {6},
pages = {877--884}
}
@InProceedings{Maass:95,
author = {W. Maass},
booktitle = {Proc. of the 7th Italian Workshop on Neural Nets 1995},
pages = {99--104},
publisher = {World Scientific (Singapore)},
title = {Analog computations on networks of spiking neurons
(extended abstract)},
year = {1996}
}
@InCollection{Maass:95a,
author = {W. Maass},
booktitle = {The Handbook of Brain Theory and Neural Networks},
editor = {M.~A.~Arbib},
pages = {1000--1003},
publisher = {MIT Press (Cambridge)},
title = {Vapnik-{C}hervonenkis dimension of neural nets},
year = {1995}
}
@InProceedings{Maass:95b,
author = {W. Maass},
booktitle = {Advances in Neural Information Processing Systems},
editor = {G. Tesauro and D. S. Touretzky and T. K. Leen},
pages = {183--190},
publisher = {MIT Press (Cambridge)},
title = {On the computational complexity of networks of spiking
neurons},
volume = {7},
year = {1995}
}
@Article{Maass:95c,
author = {W. Maass},
journal = {Telematik},
pages = {53--60},
volume = {2},
title = {{N}euronale {N}etze und maschinelles {L}ernen am
{I}nstitut fuer {G}rundlagen der {I}nformationsverarbeitung
an der {T}echnischen {U}niversitaet {G}raz},
year = {1995}
}
@Article{Maass:96,
author = {W. Maass},
journal = {Neural Computation},
pages = {1--40},
title = {Lower Bounds for the Computational Power of Networks of
Spiking Neurons},
abstract = {We investigate the computational power of a formal model
for networks of spiking neurons. It is shown that simple
operations on phase differences between spike-trains
provide a very powerful computational tool that can in
principle be used to carry out highly complex computations
on a small network of spiking neurons. We construct
networks of spiking neurons that simulate arbitrary
threshold circuits, Turing machines, and a certain type of
random access machines with real valued inputs. We also
show that relatively weak basic assumptions about the
response and threshold functions of the spiking neurons are
sufficient to employ them for such computations.},
volume = {8},
number = {1},
year = {1996}
}
@InProceedings{Maass:96a,
author = {W. Maass},
booktitle = {Advances in Neural Information Processing Systems},
editor = {D. Touretzky and M. C. Mozer and M. E. Hasselmo},
pages = {211--217},
publisher = {MIT Press (Cambridge)},
title = {On the computational power of noisy spiking neurons},
volume = {8},
year = {1996},
abstract = {It has remained unknown wether one can in principle carry
out reliable digital computations with networks of
biologically realistic models for neurons. This article
presents rigorous constructions for simulating in real-time
arbitrary given boolean circuits and finite automata with
arbitrarily high reliability by networks of noisy spiking
neurons. In addition we show that with the help of
"shunting inhibition" even networks of very unreliable
spiking neurons can simulate in real-time any
{McCulloch-Pitts} neuron (or "threshold gate"), and
therefore any multilayer perceptron (or "threshold
circuit") in a reliable manner. These constructions provide
a possible explanation for the fact that biological neural
systems can carry out quite complex computations within 100
msec. It turns out that the assumption that these
constructions require about the shape of EPSP's and the
behaviour of the noise are surprisingly weak.}
}
@InProceedings{Maass:96b,
author = {W. Maass},
title = {Networks of spiking neurons: the third generation of
neural network models},
booktitle = {Proc. of the 7th Australian Conference on Neural Networks
1996 in Canberra, Australia},
pages = {1-10},
year = {1996},
abstract = {The computational power of formal models for networks of
spiking neurons is compared with that of other neural
network models based on {McCulloch-Pitts} neurons (i.e.
threshold gates), respectively sigmoidal gates. In
particular it is shown that networks of spiking neurons
are, with regard to the number of the neurons that are
needed, computationally more powerful than these other
neural network models. A concrete biologically relevant
function is exhibited which can be computed by a single
spiking neuron (for biologically reasonable values of its
parameters), but which requires hundreds of hidden units on
a sigmoidal neuronal net. On the other hand it is known
that any function that can be computed by a small sigmoidal
neural net can also be computed by a small network of
spiking neurons. This article does not assume prior
knowledge about spiking neurons, and it contains an
extensive list of references to the currently available
literature on computations in networks of spiking neurons
and relevant results from neurobiology.}
}
@Article{Maass:97a,
author = {W. Maass},
journal = {Neural Computation},
pages = {279--304},
title = {Fast sigmoidal networks via spiking neurons},
volume = {9},
year = {1997},
abstract = {We show that networks of relatively realistic mathematical
models for biological neurons can in principle simulate
arbitrary feedforward sigmoidal neural nets in a way which
has previously not been considered. This new approach is
based on temporal coding by single spikes (respectively by
the timing of synchronous firing in pools of neurons),
rather than on the traditional interpretation of analog
variables in terms of firing rates. The resulting new
simulation is substantially faster and hence more
consistent with experimental results about the maximal
speed of information processing in cortical neural systems.
As a consequence we can show that networks of noisy spiking
neurons are "universal approximators" in the sense that
they can approximate with regard to temporal coding
\emph{any} given continuous function of several variables.
This result holds for a fairly large class of schemes for
coding analog variables by firing times of spiking neurons.
This new proposal for the possible organization of
computations in networks of spiking neurons systems has
some interesting consequences for the type of learning
rules that would be needed to explain the self-organization
of such networks. Finally, the fast and noise-robust
implementation of sigmoidal neural nets via temporal coding
points to possible new ways of implementing feedforward and
recurrent sigmoidal neural nets with pulse stream VLSI.}
}
@Article{Maass:97b,
author = {W. Maass},
howpublished = {FTP-host: archive.cis.ohio-state.edu FTP-filename:
/pub/neuroprose/maass.third-generation.ps.Z},
journal = {Neural Networks},
pages = {1659--1671},
title = {Networks of spiking neurons: the third generation of
neural network models},
volume = {10},
year = {1997},
abstract = {The computational power of formal models for networks of
spiking neurons is compared with that of other neural
network models based on McCulloch Pitts neurons (i.e.,
threshold gates), respectively, sigmoidal gates. In
particular it is shown that networks of spiking neurons
are, with regard to the number of neurons that are needed,
computationally more powerful than these other neural
network models. A concrete biologically relevant function
is exhibited which can be computed by a single spiking
neuron (for biologically reasonable values of its
parameters), but which requires hundreds of hidden units on
a sigmoidal neural net. On the other hand, it is known that
any function that can be computed by a small sigmoidal
neural net can also be computed by a small network of
spiking neurons. This article does not assume prior
knowledge about spiking neurons, and it contains an
extensive list of references to the currently available
literature on computations in networks of spiking neurons
and relevant results from neurobiology. \copyright 1997
Elsevier Science Ltd. All rights reserved.
Keywords--Spiking neuron, Integrate-and-fire neutron,
Computational complexity, Sigmoidal neural nets, Lower
bounds.}
}
@InProceedings{Maass:97c,
author = {W. Maass},
booktitle = {Computational Neuroscience: Trends in research},
editor = {James Bower},
pages = {123--127},
title = {A model for fast analog computations with noisy spiking
neurons},
year = {1997},
abstract = {We show that networks of spiking neurons can simulate
arbitrary feedforward sigmoidal neural nets in a way which
has previously not been considered. This new approach is
based on temporal coding by single spikes (respectively by
the timing of synchronous firing in pools of neurons),
rather than on the traditional interpretation of analog
variables in terms of firing rates. As a consequence we can
show that networks of noisy spiking neurons are "universal
approximators" in the sense that they can approximate with
regard to temporal coding any given continuous function of
several variables.}
}
@InCollection{Maass:97d,
author = {W. Maass},
booktitle = {Spatiotemporal Models in Biological and Artificial
Systems},
editor = {F. L. Silva},
pages = {97-104},
publisher = {IOS-Press},
title = {Analog computations with temporal coding in networks of
spiking neurons},
year = {1997}
}
@InProceedings{Maass:97e,
author = {W. Maass},
booktitle = {Advances in Neural Information Processing Systems},
editor = {M. Mozer and M. I. Jordan and T. Petsche},
pages = {211--217},
publisher = {MIT Press (Cambridge)},
title = {Noisy spiking neurons with temporal coding have more
computational power than sigmoidal neurons},
volume = {9},
year = {1997},
abstract = {We exhibit a novel way of simulating sigmoidal neural nets
by networks of noisy spiking neurons in temporal coding.
Furthermore it is shown that networks of noisy spiking
neurons with temporal coding have a strictly larger
computational power than sigmoidal neural nets with the
same number of units.}
}
@InProceedings{Maass:97f,
author = {W. Maass},
booktitle = {Proc. of the 8th International Conference on Algorithmic
Learning Theory in Sendai (Japan)},
editor = {M. Li and A. Maruoka},
pages = {364--384},
publisher = {Springer (Berlin)},
series = {Lecture Notes in Computer Science},
title = {On the relevance of time in neural computation and
learning},
volume = {1316},
year = {1997},
abstract = {We discuss models for computation in biological neural
systems that are based on the current state of knowledge in
neurophysiology. Differences and similarities to
traditional neural network models are highlighted. It turns
out that many important questions regarding computation and
learning in biological neural systems cannot be adequately
addressed in traditional neural network models. In
particular the role of time is quite different in
biologically more realistic models, and many fundamental
questions regarding computation and learning have to be
rethought for this context. Simultaneously a new generation
of {VLSI}-chips is emerging ("pulsed {VLSI}") where new
ideas about computing and learning with temporal coding can
be tested. Articles with details and further pointers to
the literature can be found at
http://www.igi.tugraz.at/maass/.}
}
@Article{Maass:97g,
author = {W. Maass},
title = {On the relevance of time in neural computation and
learning},
journal = {Theoretical Computer Science},
year = 2001,
volume = {261},
pages = {157-178},
abstract = {We discuss models for computation in biological neural
systems that are based on the current state of knowledge in
neurophysiology. Differences and similarities to
traditional neural network models are highlighted. It turns
out that many important questions regarding computation and
learning in biological neural systems cannot be adequately
addressed in traditional neural network models. In
particular, the role of time is quite different in
biologically more realistic models, and many fundamental
questions regarding computation and learning have to be
rethought for this context. Simultaneously, a somewhat
related new generation of VLSI-chips is emerging ("pulsed
VLSI") where new ideas about computing and learning with
temporal coding can be tested in an engineering context.
Articles with details to models and results that are
sketched in this article can be found at
http://www.igi.tugraz.at/maass/. We refer to Maass and
Bishop (Eds., Pulsed Neural Network, MIT Press, Cambridge,
MA, 1999) for a collection of survey articles that contain
further details and references. \copyright {2001} Elsevier
Science B.V. All rights reserved. \textbf{Key Words:}
Neural computation; Temporal coding; Spiking neurons;
Computational complexity}
}
@InProceedings{Maass:98a,
author = {W. Maass},
booktitle = {Proc. of the Federated Conference of CLS'98 and MFCS'98,
Mathematical Foundations of Computer Science 1998},
title = {On the role of time and space in neural computation},
publisher = {Springer (Berlin)},
series = {Lecture Notes in Computer Science},
volume = {1450},
year = {1998},
pages = {72-83},
note = {Invited talk},
abstract = {We discuss structural differences between models for
computation in biological neural systems and computational
models in theoretical computer science.}
}
@Article{Maass:98b,
author = {W. Maass},
journal = {Network: Computation in Neural Systems},
number = {3},
pages = {381-397},
title = {A simple model for neural computation with firing rates
and firing correlations},
volume = {9},
year = {1998},
abstract = {A simple extension of standard neural network models is
introduced which provides a model for neural computations
that involve both firing rates and firing correlations.
Such an extension appears to be useful since it has been
shown that firing correlations play a significant
computational role in many biological neural systems.
Standard neural network models are only suitable for
describing neural computations in terms of firing rates.
The resulting extended neural network models are still
relatively simple, so that their computational power can be
analysed theoretically. We prove rigorous separation
results, which show that the use of firing correlations in
addition to firing rates can drastically increase the
computational power of a neural network. Furthermore, one
of our separation results also throws new light on a
question that involves just standard neural network models:
we prove that the gap between the computational power of
high-order and first-order neural nets is substantially
larger than shown previously.}
}
@InProceedings{Maass:98d,
author = {W. Maass},
title = {Models for fast analog computation with spiking neurons},
booktitle = {Proc. of the International Conference on Neural
Information Processing 1998 (ICONIP'98) in Kytakyusyu,
Japan},
pages = {187--188},
year = 1998,
publisher = {IOS Press (Amsterdam)},
note = {Invited talk at the special session on ``Dynamic Brain''}
}
@InProceedings{Maass:98e,
author = {W. Maass},
title = {Spiking neurons},
booktitle = {Proceedings of the ICSC/IFAC Symposium on Neural
Computation 1998 (NC'98)},
pages = {16--20},
year = 1998,
publisher = {ICSC Academic Press (Alberta)},
note = {Invited talk}
}
@InCollection{Maass:98f,
author = {W. Maass},
title = {Computing with spiking neurons},
booktitle = {Pulsed Neural Networks},
pages = {55--85},
publisher = {MIT Press (Cambridge)},
year = {1999},
editor = {W. Maass and C.~M.~Bishop}
}
@InProceedings{Maass:99,
author = {W. Maass},
title = {Neural Computation with Winner-Take-All as the only
Nonlinear Operation},
booktitle = {Advances in Information Processing Systems},
editor = {Sara A. Solla and Todd K. Leen and Klaus-Robert Mueller},
volume = {12},
publisher = {MIT Press (Cambridge)},
year = {2000},
pages = {293--299}
}
@InCollection{Maass:99a,
author = {W. Maass},
title = {Das menschliche {G}ehirn -- nur ein {R}echner?},
booktitle = {Zur Kunst des Formalen Denkens},
publisher = {Passagen Verlag (Wien)},
year = 2000,
pages = {209-233},
editor = {R. E. Burkard and W. Maass and P. Weibel}
}
@InCollection{Maass:99c,
author = {W. Maass},
title = {Paradigms for computing with spiking neurons},
booktitle = {Models of Neural Networks. Early Vision and Attention},
publisher = {Springer (New York)},
year = {2002},
editor = {J. L. van Hemmen and J. D. Cowan and E. Domany},
volume = {4},
chapter = {9},
pages = {373--402}
}
@Article{Maass:99e,
author = {W. Maass},
title = {On the computational power of winner-take-all},
year = {2000},
journal = {Neural Computation},
volume = {12},
number = {11},
pages = {2519--2535}
}
@Article{MaassETAL:00,
author = {W. Maass and A. Pinz and R. Braunstingl and G. Wiesspeiner
and T. Natschlaeger and O. Friedl and H. Burgsteiner},
title = {Konstruktion von lernfaehigen mobilen {R}obotern im
{S}tudentenwettbewerb ``{R}obotik 2000'' an der
{T}echnischen {U}niversitaet {G}raz},
journal = {in: Telematik},
year = {2000},
pages = {20--24}
}
@Article{MaassETAL:01a,
author = {W. Maass and T. Natschlaeger and H. Markram},
title = {Real-time Computing Without Stable States: A New Framework
for Neural Computation Based on Perturbations},
journal = {Neural Computation},
volume = 14,
number = 11,
pages = {2531-2560},
year = 2002,
abstract = {A key challenge for neural modeling is to explain how a
continuous stream of multi-modal input from a rapidly
changing environment can be processed by stereotypical
recurrent circuits of integrate-and-fire neurons in
real-time. We propose a new framework for neural
computation that provides an alternative to previous
approaches based on attractor neural networks. It is shown
that the inherent transient dynamics of the
high-dimensional dynamical system formed by a neural
circuit may serve as a universal source of information
about past stimuli, from which readout neurons can extract
particular aspects needed for diverse tasks in real-time.
Stable internal states are not required for giving a stable
output, since transient internal states can be transformed
by readout neurons into stable target outputs due to the
high dimensionality of the dynamical system. Our approach
is based on a rigorous computational model, the liquid
state machine, that unlike Turing machines, does not
require sequential transitions between discrete internal
states. Like the Turing machine paradigm it allows for
universal computational power under idealized conditions,
but for real-time processing of time-varying input. The
resulting new framework for neural computation has novel
implications for the interpretation of neural coding, for
the design of experiments and data-analysis in
neurophysiology, and for neuromorphic engineering.}
}
@InProceedings{MaassETAL:02,
author = {W. Maass and G. Steinbauer and R. Koholka},
title = {Autonomous fast learning in a mobile robot},
booktitle = {Sensor Based Intelligent Robots. International Workshop,
Dagstuhl Castle, Germany, October 15--25, 2000, Selected
Revised Papers },
pages = {345--356},
year = {2002},
editor = {G. D. Hager and H. I. Christensen and H. Bunke and R.
Klein},
volume = {2238},
series = {lncs},
abstract = {We discuss a task for mobile robots that can only be
solved by robots that are able to learn fast in a
completely autonomous fashion. Furthermore we present
technical details of a rather inexpensive robot that solves
that tasks.}
}
@InProceedings{MaassETAL:02a,
author = {W. Maass and R. Legenstein and H. Markram},
title = {A New Approach towards Vision suggested by Biologically
Realistic Neural Microcircuit Models},
booktitle = {Biologically Motivated Computer Vision. Proc. of the
Second International Workshop, BMCV 2002, Tuebingen,
Germany, November 22--24, 2002},
editor = {H. H. Buelthoff and S. W. Lee and T. A. Poggio and C.
Wallraven},
series = {Lecture Notes in Computer Science},
volume = {2525},
pages = {282--293},
year = {2002},
publisher = {Springer (Berlin)},
abstract = {We propose an alternative paradigm for processing
time-varying visual inputs, in particular for tasks
involving temporal and spatial integration, which is
inspired by hypotheses about the computational role of
cortical microcircuits. Since detailed knowledge about the
precise structure of the microcircuit is not needed for
that, it can in principle also be implemented with
partially unknown or faulty analog hardware. In addition,
this approach supports parallel realtime processing of
time-varying visual inputs for diverse tasks, since
different readouts can be trained to extract concurrently
from the same microcircuit completely different information
components.}
}
@InProceedings{MaassETAL:02b,
author = {W. Maass and T. Natschlaeger and H. Markram},
title = {A Model for Real-Time Computation in Generic Neural
Microcircuits},
booktitle = {Proc. of NIPS 2002, Advances in Neural Information
Processing Systems},
editor = {S. Becker and S. Thrun and K. Obermayer},
publisher = {MIT Press},
year = {2003},
volume = {15},
pages = {229--236},
abstract = {A key challenge for neural modeling is to explain how a
continuous stream of multi-modal input from a rapidly
changing environment can be processed by stereotypical
recurrent circuits of integrate-and-fire neurons in
real-time. We propose a new computational model that does
not require a task-dependent construction of neural
circuits. Instead it is based on principles of high
dimensional dynamical systems in combination with
statistical learning theory, and can be implemented on
generic evolved or found recurrent circuitry.}
}
@Article{MaassETAL:02c,
author = {W. Maass and T. Natschlaeger and H. Markram},
title = {Fading Memory and Kernel Properties of Generic Cortical
Microcircuit Models},
journal = {Journal of Physiology -- Paris},
year = {2004},
pages = {315--330},
volume = {98},
number = {4--6},
abstract = {It is quite difficult to construct circuits of spiking
neurons that can carry out complex computational tasks. On
the other hand even randomly connected circuits of spiking
neurons can in principle be used for complex computational
tasks such as time-warp invariant speech recognition. This
is possible because such circuits have an inherent tendency
to integrate incoming information in such a way that simple
linear readouts can be trained to transform the current
circuit activity into the target output for a very large
number of computational tasks. Consequently we propose to
analyze circuits of spiking neurons in terms of their roles
as analog fading memory and nonlinear kernels, rather than
as implementations of specific computational operations and
algorithms. This article is a sequel to \cite{LSM}, and
contains new results about the performance of generic
neural microcircuit models for the recognition of speech
that is subject to linear and nonlinear time-warps, as well
as for computations on time-varying firing rates. These
computations rely, apart from general properties of generic
neural microcircuit models, just on capabilities of simple
linear readouts trained by linear regression. This article
also provides detailed data on the fading memory property
of generic neural microcircuit models, and a quick review
of other new results on the computational power of such
circuits of spiking neurons.}
}
@InCollection{MaassETAL:03,
author = {W. Maass and T. Natschlaeger and H. Markram},
title = {Computational Models for Generic Cortical Microcircuits},
booktitle = {Computational Neuroscience: A Comprehensive Approach},
publisher = {Chapman \& Hall/CRC},
year = {2004},
editor = {J. Feng},
chapter = {18},
pages = {575--605},
address = {Boca Raton},
abstract = {The human nervous system processes a continuous stream of
multi-modal input from a rapidly changing environment. A
key challenge for neural modeling is to explain how the
neural microcircuits (columns, minicolumns, etc.) in the
cerebral cortex whose anatomical and physiological
structure is quite similar in many brain areas and species
achieve this enormous computational task. We propose a
computational model that could explain the potentially
universal computational capabilities and does not require a
task-dependent construction of neural circuits. Instead it
is based on principles of high dimensional dynamical
systems in combination with statistical learning theory,
and can be implemented on generic evolved or found
recurrent circuitry. This new approach towards
understanding neural computation on the micro-level also
suggests new ways of modeling cognitive processing in
larger neural systems. In particular it questions
traditional ways of thinking about neural coding.}
}
@InProceedings{MaassETAL:04,
author = {W. Maass and R. Legenstein and N. Bertschinger},
booktitle = {Advances in Neural Information Processing Systems},
title = {Methods for Estimating the Computational Power and
Generalization Capability of Neural Microcircuits},
year = {2005},
volume = {17},
pages = {865--872},
editor = {L. K. Saul and Y. Weiss and L. Bottou},
publisher = {MIT Press},
abstract = {What makes a neural microcircuit computationally powerful?
Or more precisely, which measurable quantities could
explain why one microcircuit $C$ is better suited for a
particular family of computational tasks than another
microcircuit $C'$? We propose in this article quantitative
measures for evaluating the computational power and
generalization capability of a neural microcircuit, and
apply them to generic neural microcircuit models drawn from
different distributions. We validate the proposed measures
by comparing their prediction with direct evaluations of
the computational performance of these microcircuit models.
This procedure is applied first to microcircuit models that
differ with regard to the spatial range of synaptic
connections and with regard to the scale of synaptic
efficacies in the circuit, and then to microcircuit models
that differ with regard to the level of background input
currents and the level of noise on the membrane potential
of neurons. In this case the proposed method allows us to
quantify differences in the computational power and
generalization capability of circuits in different dynamic
regimes (UP- and DOWN-states) that have been demonstrated
through intracellular recordings in vivo.}
}
@InProceedings{MaassETAL:06,
author = {W. Maass and P. Joshi and E. D. Sontag},
title = {Principles of real-time computing with feedback applied to
cortical microcircuit models},
booktitle = {Advances in Neural Information Processing Systems},
abstract = {The network topology of neurons in the brain exhibits an
abundance of feedback connections, but the computational
function of these feedback connections is largely unknown.
We present a computational theory that characterizes the
gain in computational power achieved through feedback in
dynamical systems with fading memory. It implies that many
such systems acquire through feedback universal
computational capabilities for analog computing with a
non-fading memory. In particular, we show that feedback
enables such systems to process time-varying input streams
in diverse ways according to rules that are implemented
through internal states of the dynamical system. In
contrast to previous attractor-based computational models
for neural networks, these flexible internal states are
{\em high-dimensional} attractors of the circuit dynamics,
that still allow the circuit state to absorb new
information from online input streams. In this way one
arrives at novel models for working memory, integration of
evidence, and reward expectation in cortical circuits. We
show that they are applicable to circuits of
conductance-based Hodgkin-Huxley (HH) neurons with high
levels of noise that reflect experimental data on in-vivo
conditions. },
year = {2006},
volume = {18},
pages = {835--842},
publisher = {MIT Press},
editor = {Y. Weiss and B. Schoelkopf and J. Platt}
}
@Article{MaassETAL:06a,
author = {W. Maass and P. Joshi and E. D. Sontag},
title = {Computational aspects of feedback in neural circuits},
abstract = {It has previously been shown that generic cortical
microcircuit models can perform complex real-time
computations on continuous input streams, provided that
these computations can be carried out with a rapidly fading
memory. We investigate the computational capability of such
circuits in the more realistic case where not only readout
neurons, but in addition a few neurons within the circuit,
have been trained for specific tasks. This is essentially
equivalent to the case where the output of trained readout
neurons is fed back into the circuit. We show that this new
model overcomes the limitation of a rapidly fading memory.
In fact, we prove that in the idealized case without noise
it can carry out any conceivable digital or analog
computation on time-varying inputs. But even with noise,
the resulting computational model can perform a large class
of biologically relevant real-time computations that
require a nonfading memory. We demonstrate these
computational implications of feedback both theoretically,
and through computer simulations of detailed cortical
microcircuit models that are subject to noise and have
complex inherent dynamics. We show that the application of
simple learning procedures (such as linear regression or
perceptron learning) to a few neurons enables such circuits
to represent time over behaviorally relevant long time
spans, to integrate evidence from incoming spike trains
over longer periods of time, and to process new information
contained in such spike trains in diverse ways according to
the current internal state of the circuit. In particular we
show that such generic cortical microcircuits with feedback
provide a new model for working memory that is consistent
with a large set of biological constraints. Although this
article examines primarily the computational role of
feedback in circuits of neurons, the mathematical
principles on which its analysis is based apply to a
variety of dynamical systems. Hence they may also throw new
light on the computational role of feedback in other
complex biological dynamical systems, such as, for example,
genetic regulatory networks.},
journal = {PLoS Computational Biology},
year = 2007,
volume = {3},
number = {1},
pages = {e165},
htmlnote = {(Journal link to PDF)}
}
@Article{MaassETAL:07,
author = {H. Jaeger and W. Maass and J. Principe},
title = {Special Issue on Echo State Networks and Liquid State
Machines},
journal = {Neural Networks},
year = {2007},
volume = {20},
number = {3},
pages = {287--289},
note = {}
}
@Article{MaassETAL:81,
author = {W. Maass and A. Shore and M. Stob},
journal = {Israel J. Math.},
pages = {210--224},
title = {Splitting properties and jump classes},
volume = {39},
year = {1981}
}
@InProceedings{MaassETAL:87,
author = {W. Maass and G. Schnitger and E. Szemeredi},
booktitle = {Proceedings of the 19th Annual ACM Symposium on Theory of
Computing},
pages = {94--100},
title = {Two tapes are better than one for off-line Turing
machines},
year = {1987}
}
@InProceedings{MaassETAL:91,
author = {W. Maass and G. Schnitger and E. Sontag},
booktitle = {Proc. of the 32nd Annual IEEE Symposium on Foundations of
Computer Science 1991},
pages = {767-776},
title = {On the computational power of sigmoid versus boolean
threshold circuits},
abstract = {We examine the power of constant depth circuits with
sigmoid (i.e. smooth) threshold gates for computing boolean
functions. It is shown that, for depth 2, constant size
circuits of this type are strictly more powerful than
constant size boolean threshold circuits (i.e. circuits
with boolean threshold gates). On the other hand it turns
out that, for any constant depth d, polynomial size sigmoid
threshold circuits with polynomially bounded weights
compute exactly the same boolean functions as the
corresponding circuits with boolean threshold gates.},
year = {1991}
}
@InCollection{MaassETAL:91a,
author = {W. Maass and G. Schnitger and E. Sontag},
title = {A Comparison of the Computational Power of Sigmoid and
Boolean Threshold Circuits},
booktitle = {Theoretical Advances in Neural Computation and Learning},
pages = {127--151},
publisher = {Kluwer Academic Publishers (Boston)},
year = {1994},
editor = {V.~P.~Roychowdhury and K.~Y.~Siu and A.~Orlitsky}
}
@Article{MaassETAL:93,
author = {W. Maass and G. Schnitger and E. Szemeredi and G. Turan},
journal = {Computational Complexity},
pages = {392--401},
title = {Two tapes versus one for off-line {T}uring machines},
volume = {3},
year = {1993}
}
@InProceedings{MaassLegenstein:01,
author = {R. A. Legenstein and W. Maass},
title = {Foundations for a circuit complexity theory of sensory
processing},
booktitle = {Proc. of NIPS 2000, Advances in Neural Information
Processing Systems},
editor = {T. K. Leen and T. G. Dietterich and V. Tresp},
year = {2001},
volume = {13},
pages = {259--265},
publisher = {MIT Press},
address = {Cambridge},
abstract = {We introduce {\em total wire length} as salient complexity
measure for an analysis of the circuit complexity of
sensory processing in biological neural systems and
neuromorphic engineering. Furthermore we introduce a set of
basic computational problems that apparently need to be
solved by circuits for translation- and scale-invariant
sensory processing. Finally we exhibit a number of circuit
design strategies for these new benchmark functions that
can be implemented within realistic complexity bounds, in
particular with linear or almost linear total wire length.}
}
@Article{MaassLegenstein:01a,
author = {R. A. Legenstein and W. Maass},
title = {Wire Length as a Circuit Complexity Measure},
journal = {Journal of Computer and System Sciences},
year = {2005},
volume = {70},
pages = {53--72},
abstract = { We introduce {\em wire length} as a salient complexity
measure for analyzing the circuit complexity of sensory
processing in biological neural systems. This new
complexity measure is applied in this paper to two basic
computational problems that arise in translation- and
scale-invariant pattern recognition, and hence appear to be
useful as benchmark problems for sensory processing. We
present new circuit design strategies for these benchmark
problems that can be implemented within realistic
complexity bounds, in particular with linear or almost
linear wire length. Finally we derive some general bounds
which provide information about the relationship between
new complexity measure wire length and traditional circuit
complexity measures.}
}
@Article{MaassLegenstein:01c,
author = {R. A. Legenstein and W. Maass},
title = {Neural circuits for pattern recognition with small total
wire length},
journal = {Theoretical Computer Science},
volume = {287},
pages = {239--249},
year = {2002},
abstract = {One of the most basic pattern recognition problems is
whether a certain local feature occurs in some linear array
to the left of some other local feature. We construct in
this article circuits that solve this problem with an
asymptotically optimal number of threshold gates.
Furthermore it is shown that much fewer threshold gates are
needed if one employs in addition a small number of
winner-take-all gates. In either case the circuits that are
constructed have linear or almost linear total wire length,
and are therefore not unrealistic from the point of view of
physical implementations.}
}
@Article{MaassLegenstein:01d,
author = {R. A. Legenstein and W. Maass},
title = {Optimizing the Layout of a Balanced Tree},
journal = {Technical Report},
year = {2001},
abstract = { It is shown that the total wire length of layouts of a
balanced binary tree on a 2-dimensional grid can be reduced
by 33% if one does not choose the obvious ``symmetric''
layout strategy. Furthermore it is shown that the more
efficient layout strategy that is presented in this article
is optimal, not only for binary trees but for m-ary trees
with any m >= 2.}
}
@Article{MaassMarkram:00,
author = {W. Maass and H. Markram},
title = {Synapses as dynamic memory buffers},
journal = {Neural Networks},
volume = {15},
pages = {155--161},
year = {2002}
}
@Article{MaassMarkram:02,
author = {W. Maass and H. Markram},
title = {On the Computational Power of Circuits of Spiking
Neurons},
abstract = {Complex real-time computations on multi-modal time-varying
input streams are carried out by generic cortical
microcircuits. Obstacles for the development of adequate
theoretical models that could explain the seemingly
universal power of cortical microcircuits for real-time
computing are the complexity and diversity of their
computational units (neurons and synapses), as well as the
traditional emphasis on offline computing in almost all
theoretical approaches towards neural computation. In this
article, we initiate a rigorous mathematical analysis of
the real-time computing capabilities of a new generation of
models for neural computation, liquid state machines, that
can be implemented within fact benefit fromdiverse
computational units. Hence, realistic models for cortical
microcircuits represent special instances of such liquid
state machines, without any need to simplify or homogenize
their diverse computational units. We present proofs of two
theorems about the potential computational power of such
models for real-time computing, bothon analog input streams
and for spike trains as inputs.},
journal = {Journal of Computer and System Sciences},
year = {2004},
volume = {69},
number = {4},
pages = {593--616}
}
@InCollection{MaassMarkram:02a,
author = {W. Maass and H. Markram},
title = {Temporal Integration in Recurrent Microcircuits},
booktitle = {The Handbook
of Brain Theory and Neural Networks},
publisher = {MIT Press (Cambridge)},
year = {2003},
editor = {M. A. Arbib},
edition = {2nd},
pages = {1159--1163}
}
@InProceedings{MaassMarkram:04,
author = {W. Maass and H. Markram},
title = {Theory of the Computational Function of Microcircuit
Dynamics},
booktitle = {The Interface between Neurons and Global Brain Function},
editor = {S. Grillner and A. M. Graybiel},
year = {2006},
pages = {371--390},
chapter = {18},
publisher = {MIT Press},
series = {Dahlem Workshop Report 93}
}
@Article{MaassNatschlaeger:97a,
author = {W. Maass and T. Natschlaeger},
title = {Networks of Spiking Neurons can Emulate Arbitrary
{H}opfield nets in Temporal Coding},
journal = {Network: Computation in Neural Systems},
year = 1997,
volume = 8,
number = 4,
pages = {355--371},
keywords = {spiking neurons, hopfield net, temporal coding},
userlabel = {4},
abstract = {A theoretical model for analog computation with temporal
coding is introduced and tested through simulations in
GENESIS. It turns out that the use of multiple synapses
yields very noise robust mechanisms for analog computations
with temporal coding in networks of detailed compartmental
neuron models. One arrives in this way at a method for
emulating arbitrary Hopfield nets with spiking neurons in
temporal coding, yielding new models for associative recall
of spatio-temporal firing patterns. A corresponding layered
architecture yields a refinement of the synfire-chain model
that can assume a fairly large set of different firing
patterns for different inputs.}
}
@InProceedings{MaassNatschlaeger:98,
author = {W. Maass and T. Natschlaeger},
title = {Associative Memory with Networks of Spiking Neurons in
Temporal Coding},
booktitle = {Neuromorphic Systems: Engineering Silicon from
Neurobiology},
editor = {L. S. Smith and A. Hamilton},
year = 1998,
publisher = {World Scientific},
pages = {21--32},
keywords = {spiking neurons, hopfield net, temporal coding},
abstract = {A theoretical model for analog computation with temporal
coding is introduced and tested through simulations in
GENESIS. It turns out that the use of multiple synapses
yields very noise robust mechanisms for analog computations
with temporal coding in networks of detailed compartmental
neuron models. One arrives in this way at a method for
emulating arbitrary Hopfield nets with spiking neurons in
temporal coding, yielding new models for associative recall
of spatio-temporal firing patterns.}
}
@InProceedings{MaassNatschlaeger:98b,
author = {W. Maass and T. Natschlaeger},
title = {Emulation of {H}opfield Networks with Spiking Neurons in
Temporal Coding},
booktitle = {Computational Neuroscience: Trends in Research},
editor = {J. M. Bower},
year = 1998,
publisher = {Plenum Press},
pages = {221--226},
keywords = {spiking neurons, hopfield net, temporal coding},
abstract = {A theoretical model for analog computation with temporal
coding is introduced and tested through simulations in
GENESIS. It turns out that the use of multiple synapses
yields very noise robust mechanisms for analog computations
with temporal coding in networks of detailed compartmental
neuron models. One arrives in this way at a method for
emulating arbitrary Hopfield nets with spiking neurons in
temporal coding, yielding new models for associative recall
of spatio-temporal firing patterns. A corresponding layered
architecture yields a refinement of the synfire-chain model
that can assume a fairly large set of different firing
patterns for different inputs.}
}
@Article{MaassNatschlaeger:99,
author = {W. Maass and T. Natschlaeger},
title = {A model for Fast Analog Computation Based on Unreliable
Synapses},
journal = {Neural Computation},
year = {2000},
pages = {1679--1704},
volume = {12},
number = {7},
keywords = {unreliable synapses, universal function approximation,
fast analog computation, time series, hebbian learning,
space rate coding, population activity},
abstract = {We investigate through theoretical analysis and computer
simulations the consequences of unreliable synapses for
fast analog computations in networks of spiking neurons,
with analog variables encoded by the current firing
activities of pools of spiking neurons. Our results suggest
that the known unreliability of synaptic transmission may
be viewed as a useful tool for analog computing, rather
than as a ``bug'' in neuronal hardware. We also investigate
computations on time series and Hebbian learning in this
context of space-rate coding.},
natschl_label = {6a}
}
@InProceedings{MaassOrponen:97,
author = {W. Maass and P. Orponen},
booktitle = {Advances in Neural Information Processing Systems},
editor = {M. Mozer and M. I. Jordan and T. Petsche},
pages = {218--224},
publisher = {MIT Press (Cambridge)},
title = {On the effect of analog noise in discrete-time analog
computations},
volume = {9},
year = {1997},
abstract = {We introduce a model for noise-robust analog computations
with discrete time that is flexible enough to cover the
most important concrete cases, such as computations in
noisy analog neural nets and networks of noisy spiking
neurons. We show that the presence of arbitrarily small
amounts of analog noise reduces the power of analog
computational models to that of finite automata, and we
also prove a new type of upper bound for the {VC-dimension}
of computational models with analog noise.}
}
@Article{MaassOrponen:97j,
author = {W. Maass and P. Orponen},
title = {On the effect of analog noise in discrete-time analog
computations},
journal = {Neural Computation},
year = 1998,
volume = 10,
pages = {1071--1095},
abstract = {We introduce a model for analog computation with discrete
time in the presence of analog noise that is flexible
enough to cover the most important concrete cases, such as
noisy analog neural nets and networks of spiking neurons.
This model subsumes the classical model for digital
computation in the presence of noise. We show that the
presence of arbitrarily small amounts of analog noise
reduces the power of analog computational models to that of
finite automata, and we also prove a new type of upper
bound for the {VC-dimension} of computational models with
analog noise.}
}
@InProceedings{MaassRuf:95,
author = {W. Maass and B. Ruf},
address = {Paris},
booktitle = {Proc. of the International Conference on Artificial Neural
Networks ICANN},
pages = {515--520},
publisher = {EC2\&Cie},
title = {On the Relevance of the Shape of Postsynaptic Potentials
for the Computational Power of Networks of Spiking
Neurons},
year = {1995},
keywords = {spiking neuron, postsynaptic potential, computational
complexity},
abstract = {The firing of a neuron in a biological neural system
causes in certain other neurons excitatory postsynaptic
potential changes (EPSP's) that are not "rectangular", but
have the form of a smooth hill. We prove in this article
for a formal model of a network of spiking neurons, that
the rising respectively declining segments of these EPSP's
are in fact essential for the computational power of the
model.}
}
@Article{MaassRuf:97,
author = {W. Maass and B. Ruf},
journal = {Information and Computation},
title = {On computation with pulses},
year = {1999},
volume = {148},
pages = {202--218},
keywords = {spiking neuron, computational complexity, postsynaptic
potential}
}
@InProceedings{MaassSchmitt:97,
author = {W. Maass and M. Schmitt},
booktitle = {Proc. of the 10th Conference on Computational Learning
Theory 1997},
note = {See also Electronic Proc. of the Fifth International
Symposium on Artificial Intelligence and Mathematics
(http://rutcor.rutgers.edu/\~{}amai)},
pages = {54--61},
publisher = {ACM-Press (New York)},
title = {On the complexity of learning for a spiking neuron},
year = {1997}
}
@Article{MaassSchmitt:98,
author = {W. Maass and M. Schmitt},
journal = {Information and Computation},
title = {On the complexity of learning for spiking neurons with
temporal coding},
year = {1999},
volume = {153},
pages = {26--46},
abstract = {Spiking neurons are models for the computational units in
biological neural systems where information is considered
to be encoded mainly in the temporal patterns of their
activity. In a network of spiking neurons a new set of
parameters becomes relevant which has no counterpart in
traditional neural network models: the time that a pulse
needs to travel through a connection between two neurons
(also known as delay of a connection). It is knoen that
these delays are tuned in biological neural systems through
a variety of mechanism. In this article we consider the
arguably most simple model for a spikeing neuron, which can
also easily be implemented in pulsed {VLSI}. We investigate
the {V}apnik-{C}hervonenkis ({VC}) dimension of networks of
spiking neurons, where the delays are viewed as
programmable parameters and we prove tight bounds for this
{VC} dimension. Thus, we get quantitative estimates for the
diversity of functions that a network with fixed
architecture can compute with different settings of its
delays. In particular, it turns out that a network of
spiking neurons with k adjustable delays is able to compute
a much richer class of functions than a threshold circuit
with k adjustable weights. The results also yield bounds
for the number of training examples that an algorithm needs
for tuning the delays of a network of spiking neurons.
Results about the computational complexity of such
algorithms are also given.}
}
@InProceedings{MaassSchnitger:86,
author = {W. Maass and G. Schnitger},
booktitle = {Proceedings of the Structure in Complexity Theory
Conference, Berkeley 1986},
pages = {249--264},
publisher = {Springer (Berlin)},
series = {Lecture Notes in Computer Science},
title = {An optimal lower bound for {T}uring machines with one Work
Tape and Two-Way Input Tape},
year = {1986},
volume = {223}
}
@Article{MaassSchorr:87,
author = {W. Maass and A. Schorr},
journal = {SIAM J. Comput.},
pages = {195--202},
title = {Speed-up of {T}uring machines with one work tape and a
two-way input tape},
volume = {16},
year = {1987}
}
@InProceedings{MaassSlaman:89,
author = {W. Maass and T. A. Slaman},
booktitle = {Proceedings of the Logic Colloquium '88, Padova, Italy},
editor = {Ferro and Bonotto and Valentini and Zanardo},
pages = {79-89},
publisher = {Elsevier Science Publishers (North-Holland)},
title = {Some problems and results in the theory of actually
computable functions},
year = {1989}
}
@InProceedings{MaassSlaman:89a,
author = {W. Maass and T. A. Slaman},
booktitle = {Proceedings of the 4th Annual Conference on Structure in
Complexity Theory},
pages = {231--239},
publisher = {IEEE Computer Society Press (Washington)},
title = {The complexity types of computable sets (extended
abstract)},
year = {1989}
}
@InProceedings{MaassSlaman:89b,
author = {W. Maass and T. A. Slaman},
booktitle = {Proceedings of the 7th International Conference on
Fundamentals of Computation Theory},
pages = {318--326},
publisher = {Springer (Berlin)},
series = {Lecture Notes in Computer Science},
title = {Extensional properties of sets of time bounded complexity
(extended abstract)},
volume = {380},
year = {1989}
}
@InProceedings{MaassSlaman:90,
author = {W. Maass and T. A. Slaman},
booktitle = {Proceedings of the 1989 Recursion Theory Week
Oberwolfach},
pages = {297--322},
publisher = {Springer (Berlin)},
title = {On the relationship between the complexity, the degree,
and the extension of a computable set},
year = {1990}
}
@InProceedings{MaassSlaman:91,
author = {W. Maass and T. A. Slaman},
booktitle = {Proceedings of a Workshop on Logic from Computer Science},
editor = {Y. N. Moschovakis},
pages = {359--372},
publisher = {Springer (Berlin)},
title = {Splitting and density for the recursive sets of a fixed
time complexity},
year = {1991}
}
@Article{MaassSlaman:92,
author = {W. Maass and T. A. Slaman},
journal = {Journal of Computer and System Sciences},
note = {Invited paper for a special issue of the J. Comput. Syst.
Sci.},
pages = {168--192},
title = {The complexity types of computable sets},
volume = {44},
year = {1992}
}
@Article{MaassSontag:97,
author = {W. Maass and E. Sontag},
journal = {Neural Computation},
title = {Analog neural nets with {G}aussian or other common noise
distributions cannot recognize arbitrary regular
languages},
year = {1999},
volume = {11},
pages = {771--782},
abstract = {We consider recurrent analog neural netw where the output
of each gate is subject to gaussian noise or any other
common noise distribution that is nonzero on a sufficiently
large part of the state-space. We show that many regular
languages cannot be recognized by networks of this type,
and we give a precise characterization of languges that can
be recognized. This result implies severe constraints on
possibilities for constructing recurrent analog neural nets
that are robust against realistic types of analog noise. On
the other hand, we present a method for constructing
feedforward analog neural nets that are robust with regard
to analog noise of this type.}
}
@Article{MaassSontag:99a,
author = {W. Maass and E. D. Sontag},
title = {Neural systems as nonlinear filters},
journal = {Neural Computation},
volume = {12},
number = {8},
year = {2000},
pages = {1743--1772}
}
@InProceedings{MaassSontag:99b,
author = {W. Maass and E. D. Sontag},
title = {A precise characterization of the class of languages
recognized by neural nets under {G}aussian and other common
noise distributions},
booktitle = {Advances in Neural Information Processing Systems},
year = 1999,
volume = {11},
pages = {281--287},
editor = {M.~S.~Kearns and S.~S.~Solla and D.~A.~Cohn},
publisher = {MIT Press (Cambridge)}
}
@Article{MaassStob:83,
author = {W. Maass and M. Stob},
journal = {Ann. of Pure and Applied Logic},
pages = {189--212},
title = {The Intervals of the lattice of recursively enumerable
sets determined by major subsets},
volume = {24},
year = {1983}
}
@Article{MaassSutner:88,
author = {W. Maass and K. Sutner},
journal = {Acta Informatica},
pages = {93-122},
title = {Motion planning among time dependent obstacles},
volume = {26},
year = {1988}
}
@InProceedings{MaassTuran:89,
author = {W. Maass and G. Tur\'{a}n},
booktitle = {Proceedings of the 30th Annual IEEE Symposium on
Foundations of Computer Science},
pages = {262--267},
title = {On the complexity of learning from counterexamples
(extended abstract)},
year = {1989}
}
@InProceedings{MaassTuran:90,
author = {W. Maass and G. Turan},
booktitle = {Proceedings of the 31th Annual IEEE Symposium on
Foundations of Computer Science},
pages = {203--210},
title = {On the complexity of learning from counterexamples and
membership queries},
year = {1990}
}
@Article{MaassTuran:92,
author = {W. Maass and G. Turan},
journal = {Machine Learning},
note = {Invited paper for a special issue of Machine Learning},
pages = {107--145},
title = {Lower bound methods and separation results for on-line
learning models},
volume = {9},
year = {1992},
abstract = {We consider the complexity of concept learning in various
common models for on-line learning, focusing on methods for
proving lower bounds to the learning complexity of a
concept class. Among others, we consider the model for
learning with equivalence and membership queries. For this
model we give lower bounds on the number of queries that
are needed to learn a concept class $C$ in terms of the
Vapnik-Chervonenkis dimension of $C$, and in terms of the
complexity of learning $C$ with arbitrary equivalence
queries. Furthermore, we survey other known lower bound
methods and we exhibit all known relationships between
learning complexities in the models considered and some
relevant combinatorial parameters. As it turns out, the
picture is almost complete. This paper has been written so
that it can be read without previous knowledge of
Computational Learning Theory. \textbf{Keywords.} Formal
models for learning, learning algorithms, lower bound
arguments, VC-dimension, machine learning}
}
@InCollection{MaassTuran:94,
author = {W. Maass and G. Turan},
booktitle = {Computational Learning Theory and Natural Learning System:
Constraints and Prospects},
editor = {S. J. Hanson and G. A. Drastal and R. L. Rivest},
pages = {381--414},
publisher = {MIT Press (Cambridge)},
title = {How fast can a threshold gate learn},
year = {1994}
}
@Article{MaassTuran:94a,
author = {W. Maass and G. Turan},
journal = {Machine Learning},
pages = {251--269},
title = {Algorithms and lower bounds for on-line learning of
geometrical concepts},
volume = {14},
year = {1994}
}
@InProceedings{MaassTuran:95,
author = {W. Maass and G. Turan},
address = {Jerusalem},
booktitle = {Proc. of the 4th Bar-Ilan Symposium on Foundations of
Artificial Intelligence (BISFAI'95)},
title = {On learnability and predicate logic (extended abstract)},
year = {1995},
pages = {126--136}
}
@InProceedings{MaassWarmuth:95,
author = {W. Maass and M. Warmuth},
booktitle = {Proc. of the 12th International Machine Learning
Conference, Tahoe City, USA},
editor = {Morgan Kaufmann (San Francisco)},
pages = {378-386},
title = {Efficient learning with virtual threshold gates},
year = {1995}
}
@Article{MaassWarmuth:95j,
author = {W. Maass and M. Warmuth},
title = {Efficient learning with virtual threshold gates},
journal = {Information and Computation},
year = 1998,
volume = 141,
number = 1,
pages = {66--83}
}
@InCollection{MaassWeibel:97,
author = {W. Maass and P. Weibel},
booktitle = {{Jenseits von Kunst}},
editor = {P. Weibel},
pages = {745--747},
publisher = {Passagen Verlag},
title = {{I}st die {V}ertreibung der {V}ernunft reversibel?
{U}eberlegungen zu einem {W}issenschafts- und
{M}edienzentrum},
year = {1997}
}
@InProceedings{MaassZador:98,
author = {W. Maass and A. M. Zador},
booktitle = {Advances in Neural Processing Systems},
publisher = {MIT Press (Cambridge)},
title = {Dynamic stochastic synapses as computational units},
volume = {10},
pages = {194--200},
year = {1998},
abstract = {In most neural network models, synapses are treated as
static weights that change only on the slow time scales of
learning. In fact, however, synapses are highly dynamic,
and show use-dependet plasticity over a wide range of time
scales. Moreover, synaptic transmission is an inherently
stochastic process: a spike arriving at a presynaptic
terminal triggers release of a vesicle of neurotransmitter
from a release site with a probability that can be much
less than one. Changes in release probability represent one
of the main mechanisms by which synaptic efficacy is
modulated in neural circuits. We propose and investigate a
simple model for dynamic stochastic synapses that can
easily be integrated into common models for neural
computation. We show through computer simulations and
rigorous theoretical analysis thath this model for a
dynamic stochastic synapse increases computational power in
a nontrivial wa. Our results may have implications for the
processing of time-warying signals by both biological and
artificial neural networks.}
}
@InCollection{MaassZador:98a,
author = {W. Maass and A. Zador},
booktitle = {Pulsed Neural Networks},
editor = {W. Maass and C. Bishop},
publisher = {MIT-Press (Cambridge)},
title = {Computing and learning with dynamic synapses},
year = {1998},
pages = {321-336}
}
@Article{MaassZador:98j,
author = {W. Maass and A. M. Zador},
title = {Dynamic stochastic synapses as computational units},
journal = {Neural Computation},
year = 1999,
volume = 11,
number = 4,
pages = {903--917},
abstract = {In most neural network models, synapses are treated as
static weights that change only with the slow time scales
of learning. It is well known, however, that synapses are
highly dynamic and show use-dependent plasticity over a
wide range of time scales. Moreover, synaptic transmission
is an inherently stochastic process: a spike arriving at a
presynaptic terminal triggers the release of a vesicle of
neurotransmitter from a release site with a probability
that can be much less than one. We consider a simple model
for dynamic stochastic synapses that can easily be
integrated into common models for networks of
integrate-and-fire neurons (spiking neurons). The
parameters of this model have direct interpretations in
terms of synaptic physiology. We investigate the
consequences of the model for computing with individual
spikes and demonstrate through rigorous theoretical results
that the computational power of the network is increased
through the use of dynamic synapses.}
}
@Article{MelamedETAL:03,
author = {O. Melamed and W. Gerstner and W. Maass and M. Tsodyks and
H. Markram},
title = {Coding and Learning of Behavioral Sequences},
abstract = {A major challenge to understanding behavior is how the
nervous system allows the learning of behavioral sequences
that can occur over arbitrary timescales, ranging from
milliseconds up to seconds, using a fixed millisecond
learning rule. This article describes some potential
solutions, and then focuses on a study by Mehta et al. that
could contribute towards solving this puzzle. They have
discovered that an experience-dependent asymmetric shape of
hippocampal receptive fields combined with oscillatory
inhibition can serve to map behavioral sequences on a fixed
timescale.},
journal = {Trends in Neurosciences},
volume = 27,
number = 1,
year = {2004},
pages = {11--14}
}
@InProceedings{NatschlaegerETAL:01,
author = {T. Natschlaeger and W. Maass and E. D. Sontag and A.
Zador},
title = {Processing of Time Series by Neural Circuits with
Biologically Realistic Synaptic Dynamics},
booktitle = {Advances in Neural Information Processing Systems 2000
({NIPS '2000})},
editor = {Todd K. Leen and Thomas G. Dietterich and Volker Tresp},
year = 2001,
volume = 13,
pages = {145--151},
address = {Cambridge},
publisher = {MIT Press},
abstract = {Experimental data show that biological synapses behave
quite differently from the symbolic synapses in common
artificial neural network models. Biological synapses are
dynamic, i.e., their weight changes on a short time scale
by several hundred percent in dependence of the past input
to the synapse. In this article we explore the consequences
that this synaptic dynamics entails for the computational
power of feedforward neural networks. It turns out that
even with just a single hidden layer such networks can
approximate a surprisingly large class of nonlinear
filters: all filters that can be characterized by Volterra
series. This result is robust with regard to various
changes in the model for synaptic dynamics. Furthermore we
show that simple gradient descent suffices to approximate a
given quadratic filter by a rather small neural system with
dynamic synapses. We demonstrate that with this approach
the nonlinear filter considered in (Back and Tsoi 93) can
be approximated even better than by their model.},
htmlnote = {The poster presented at NIPS is available as PDF
file.}
}
@Article{NatschlaegerETAL:01a,
author = {T. Natschlaeger and W. Maass and A. Zador},
title = {Efficient Temporal Processing with Biologically Realistic
Dynamic Synapses},
journal = {Network: Computation in Neural Systems},
year = 2001,
pages = {75--87},
volume = 12,
abstract = {Experimental data show that biological synapses behave
quite differently from the symbolic synapses in common
artificial neural network models. Biological synapses are
dynamic, i.e., their ``weight'' changes on a short time
scale by several hundred percent in dependence of the past
input to the synapse. Here we describe a general model of
computation that exploits dynamic synapses, and use a
backpropagation-like algorithm to adjust the synaptic
parameters. We show that such gradient descent suffices to
approximate a given quadratic filter by a rather small
neural system with dynamic synapses. We demonstrate that
with this approach the nonlinear filter considered in (Back
and Tsoi, 1993) can be approximated slightly better than by
their model. Our numerical results are complemented by
theoretical analysis which show that even with just a
single hidden layer such networks can approximate a
surprisingly large class of nonlinear filters: all filters
that can be characterized by Volterra series. This result
is robust with regard to various changes in the model for
synaptic dynamics. }
}
@Article{NatschlaegerETAL:02,
author = {T. Natschlaeger and W. Maass and H. Markram},
title = {The "Liquid Computer": A Novel Strategy for Real-Time
Computing on Time Series},
journal = {Special Issue on Foundations of Information Processing of
{TELEMATIK}},
year = {2002},
pages = {39--43},
volume = {8},
number = {1},
abstract = {We will discuss in this survey article a new framework for
analysing computations on time series and in particular on
spike trains, introduced in (Maass et. al. 2002). In
contrast to common computational models this new framework
does not require that information can be stored in some
stable states of a computational system. It has recently
been shown that such models where all events are transient
can be successfully applied to analyse computations in
neural systems and (independently) that the basic ideas can
also be used to solve engineering tasks such as the design
of nonlinear controllers. Using an illustrative example we
will develop the main ideas of the proposed model. This
illustrative example is generalized and cast into a
rigorous mathematical model: the Liquid State Machine. A
mathematical analysis shows that there are in principle no
computational limitations of liquid state machines in the
domain of time series computing. Finally we discuss several
successful applications of the framework in the area of
computational neuroscience and in the field of artificial
neural networks.}
}
@InCollection{NatschlaegerETAL:03,
author = {T. Natschlaeger and H. Markram and W. Maass},
title = {Computer Models and Analysis Tools for Neural
Microcircuits},
booktitle = {Neuroscience Databases. A Practical Guide},
publisher = {Kluwer Academic Publishers (Boston)},
year = {2003},
editor = {R. Koetter},
chapter = {9},
pages = {121--136},
abstract = {This chapter surveys web resources regarding computer
models and analysis tools for neural microcircuits. In
particular it describes the features of a new website
(www.lsm.tugraz.at) that facilitates the creation of
computer models for cortical neural microcircuits of
various sizes and levels of detail, as well as tools for
evaluating the computational power of these models in a
Matlabenvironment.}
}
@Article{NatschlaegerMaass:01a,
author = {T. Natschlaeger and W. Maass},
title = {Computing the Optimally Fitted Spike Train for a Synapse},
journal = {Neural Computation},
year = 2001,
volume = 13,
number = 11,
pages = {2477--2494},
abstract = {Experimental data have shown that synapses are
heterogeneous: different synapses respond with different
sequences of amplitudes of postsynaptic responses to the
same spike train. Neither the role of synaptic dynamics
itself nor the role of the heterogeneity of synaptic
dynamics for computations in neural circuits is well
understood. We present in this article two computational
methods that make it feasible to compute for a given
synapse with known synaptic parameters the spike train that
is optimally fitted to the synapse in a certain sense. With
the help of these methods one can compute for example the
temporal pattern of a spike train (with a given number of
spikes) that produces the largest sum of postsynaptic
responses for a specific synapse. Several other
applications are also discussed. To our surprise we find
that most of these optimally fitted spike trains match
common firing patterns of specific types of neurons that
are discussed in the literature. Hence our analysis
provides a possible functional explanation for the
experimentally observed regularity in the combination of
specific types of synapses with specific types of neurons
in neural circuits. }
}
@InProceedings{NatschlaegerMaass:01b,
author = {T. Natschlaeger and W. Maass},
title = {Finding the Key to a Synapse},
booktitle = {Advances in Neural Information Processing Systems ({NIPS
'2000})},
editor = {Todd K. Leen and Thomas G. Dietterich and Volker Tresp},
year = 2001,
pages = {138--144},
volume = 13,
address = {Cambridge},
publisher = {MIT Press},
abstract = {Experimental data have shown that synapses are
heterogeneous: different synapses respond with different
sequences of amplitudes of postsynaptic responses to the
same presynaptic spike train. Neither the role of synaptic
dynamics itself nor the role of the heterogeneity of
synaptic dynamics for computations in neural circuits is
well understood. We present in this article methods that
make it feasible to compute for a given synapse with known
synaptic parameters the spike train that is optimally
fitted to the synapse, in the sense that it produces the
largest sum of postsynaptic responses. To our surprise we
find that most of these optimally fitted spike trains match
common firing patterns of specific types of neurons that
are discussed in the literature. Hence our analysis
provides a possible functional explanation for the
experimentally observed regularity in the combination of
specific types of synapses with specific types of neurons
in neural circuits.},
htmlnote = {The poster presented at NIPS is available as PDF
file.}
}
@InProceedings{NatschlaegerMaass:03,
author = {T. Natschlaeger and W. Maass},
title = {Information Dynamics and Emergent Computation in Recurrent
Circuits of Spiking Neurons},
booktitle = {Proc. of NIPS 2003, Advances in Neural Information
Processing Systems},
year = {2004},
volume = {16},
pages = {1255--1262},
editor = {S. Thrun and L. Saul and B. Schoelkopf},
publisher = {MIT Press},
address = {Cambridge},
abstract = {An efficient method using Bayesian and linear classifiers
is presented for analyzing the dynamics of information in
high dimensional circuit states, and applied to investigate
emergent computation in generic cortical microcircuit
models. It is shown that such recurrent circuits of spiking
neurons have an inherent capability to carry out rapid
computations on complex spike patterns, merging information
contained in the order of spike arrival with previously
acquired context information.}
}
@Article{NatschlaegerMaass:04,
author = {T. Natschlaeger and W. Maass},
title = {Dynamics of Information and Emergent Computation in
Generic Neural Microcircuit Models},
journal = {Neural Networks},
volume = {18},
number = {10},
pages = {1301--1308},
year = {2005},
abstract = {Numerous methods have already been developed to estimate
the information contained in single spike trains. In this
article we explore efficient methods for estimating the
information contained in the simultaneous firing activity
of hundreds of neurons. Obviously such methods are needed
to analyze data from multi-unit recordings. We test these
methods on generic neural microcircuit models consisting of
800 neurons, and analyze the temporal dynamics of
information about preceding spike inputs in such circuits.
It turns out that information spreads with high speed in
such generic neural microcircuit models, thereby supporting
-- without the postulation of any additional neural or
synaptic mechanisms -- the possibility of ultra-rapid
computations on the first input spikes.}
}
@Article{NatschlaegerMaass:2001,
author = {T. Natschlaeger and W. Maass},
title = {Spiking Neurons and the Induction of Finite State
Machines},
journal = {Theoretical Computer Science: Special Issue on Natural
Computing},
volume = 287,
year = 2002,
pages = {251--265},
abstract = {We discuss in this short survey article some current
mathematical models from neurophysiology for the
computational units of biological neural systems: neurons
and synapses. These models are contrasted with the
computational units of common artificial neural network
models, which reflect the state of knowledge in
neurophysiology 50 years ago. We discuss the problem of
carrying out computations in circuits consisting of
biologically realistic computational units, focusing on the
biologically particularly relevant case of computations on
time series. Finite state machines are frequently used in
computer science as models for computations on time series.
One may argue that these models provide a reasonable common
conceptual basis for analyzing computations in computers
and biological neural systems, although the emphasis in
biological neural systems is shifted more towards
asynchronous computation on analog time series. In the
second half of this article some new computer experiments
and theoretical results are discussed, which address the
question whether a biological neural system can in
principle learn to behave like a given simple finite state
machine. }
}
@InProceedings{NatschlaegerMaass:99,
author = {T. Natschlaeger and W. Maass},
title = {Fast Analog Computation in Networks of Spiking Neurons
Using Unreliable Synapses},
booktitle = {{ESANN'99} Proceedings of the European Symposium on
Artificial Neural Networks},
year = 1999,
address = {Bruges, Belgium},
pages = {417--422},
keywords = {unreliable synapses, universal function approximation,
fast analog computation, time series, hebbian learning,
space rate coding, population activity},
abstract = {We investigate through theoretical analysis and computer
simulations the consequences of unreliable synapses for
fast analog computations in networks of spiking neurons,
with analog variables encoded by the firing activities of
pools of spiking neurons. Our results suggest that the
known unreliability of synaptic transmission may be viewed
as a useful tool for analog computing, rather than as a
``bug'' in neuronal hardware. We also investigate
computations on analog time series encoded by the firing
activities of pools of spiking neurons.}
}
@Article{NesslerETAL:08,
author = {B. Nessler and M. Pfeiffer and W. Maass},
title = {Hebbian learning of {B}ayes optimal decisions},
journal = {In Proc. of NIPS 2008: Advances in Neural Information
Processing Systems},
year = {2009},
volume = {21},
number = {},
pages = {},
note = {MIT Press},
abstract = {When we perceive our environment, make a decision, or take
an action, our brain has to deal with multiple sources of
uncertainty. The Bayesian framework of statistical
estimation provides computational methods for dealing
optimally with uncertainty. Bayesian inference however is
algorithmically quite complex, and learning of Bayesian
inference involves the storage and updating of probability
tables or other data structures that are hard to implement
in neural networks. Hence it is unclear how our nervous
system could acquire the capability to approximate optimal
Bayesian inference and action selection. This article shows
that the simplest and experimentally best supported type of
synaptic plasticity, Hebbian learning, can in principle
achieve this. Even inference in complex Bayesian networks
can be approximated by Hebbian learning in combination with
population coding and lateral inhibition
(``Winner-Take-All'') in cortical microcircuits that
produce a sparse encoding of complex sensory stimuli. We
also show that a corresponding reward-modulated Hebbian
plasticity rule provides a principled framework for
understanding how Bayesian inference could support fast
reinforcement learning in the brain. In particular we show
that recent experimental results by Yang and Shadlen on
reinforcement learning of probabilistic inference in
primates can be modeled in this way.}
}
@InProceedings{NesslerETAL:10,
author = {B. Nessler and M. Pfeiffer and W. Maass},
title = {{STDP} enables spiking neurons to detect hidden causes of
their inputs},
booktitle = {Proc. of NIPS 2009: Advances in Neural Information
Processing Systems},
editor = {},
publisher = {MIT Press},
year = {2010},
volume = {22},
pages = {1357--1365},
abstract = {The principles by which spiking neurons contribute to the
astounding computational power of generic cortical
microcircuits, and how spike-timing-dependent plasticity
(STDP) of synaptic weights could generate and maintain this
computational function, are unknown. We show here that
STDP, in conjunction with a stochastic soft winner-take-all
(WTA) circuit, induces spiking neurons to generate through
their synaptic weights implicit internal models for
subclasses (or causes) of the high-dimensional spike
patterns of hundreds of pre-synaptic neurons. Hence these
neurons will fire after learning whenever the current input
best matches their internal model. The resulting
computational function of soft WTA circuits, a common
network motif of cortical microcircuits, could therefore be
a drastic dimensionality reduction of information streams,
together with the autonomous creation of internal models
for the probability distributions of their input patterns.
We show that the autonomous generation and maintenance of
this computational function can be explained on the basis
of rigorous mathematical principles. In particular, we show
that STDP is able to approximate a stochastic online
Expectation-Maximization (EM) algorithm for modeling the
input data. A corresponding result is shown for Hebbian
learning in artificial neural networks.}
}
@InProceedings{NeumannETAL:07,
author = {G. Neumann and M. Pfeiffer and W. Maass},
booktitle = {Proceedings of the 18th European Conference on Machine
Learning (ECML) and the 11th European Conference on
Principles and Practice of Knowledge Discovery in Databases
(PKDD) 2007, Warsaw (Poland)},
title = {Efficient Continuous-Time Reinforcement Learning with
Adaptive State Graphs},
publisher = {Springer (Berlin)},
year = {2007},
pages = {},
note = {in press},
abstract = {We present a new reinforcement learning approach for
deterministic continuous control problems in environments
with unknown, arbitrary reward functions. The difficulty of
finding solution trajectories for such problems can be
reduced by incorporating limited prior knowledge of the
approximative local system dynamics. The presented
algorithm builds an adaptive state graph of sample points
within the continuous state space. The nodes of the graph
are generated by an efficient principled exploration scheme
that directs the agent towards promising regions, while
maintaining good online performance. Global solution
trajectories are formed as combinations of local
controllers that connect nodes of the graph, thereby
naturally allowing continuous actions and continuous time
steps. We demonstrate our approach on various movement
planning tasks in continuous domains.}
}
@InProceedings{NeumannETAL:09,
author = {G. Neumann and W. Maass and J. Peters},
title = {Learning Complex Motions by Sequencing Simpler Motion
Templates},
booktitle = {Proc. of the 26th Int. Conf. on Machine Learning (ICML
2009), Montreal},
year = {2009},
volume = {},
number = {},
pages = {},
note = {},
abstract = {Abstraction of complex, longer motor tasks into simpler
elemental movements enables humans and animals to exhibit
motor skills which have not yet been matched by robots.
Humans intuitively decompose complex motions into smaller,
simpler segments. For example when describing simple
movements like drawing a triangle with a pen, we can easily
name the basic steps of this movement. Surprisingly, such
abstractions have rarely been used in artificial motor
skill learning algorithms. These algorithms typically
choose a new action (such as a torque or a force) at a very
fast time-scale. As a result, both policy and temporal
credit assignment problem become unnecessarily complex -
often beyond the reach of current machine learning methods.
We introduce a new framework for temporal abstractions in
reinforcement learning (RL), i.e. RL with motion templates.
We present a new algorithm for this framework which can
learn high-quality policies by making only few abstract
decisions. }
}
@Article{NikolicETAL:06,
author = {D. Nikolic and S. Haeusler and W. Singer and W. Maass},
title = {Temporal dynamics of informational contents carried by
neurons in the primary visual cortex},
journal = {submitted for publication},
year = 2006
}
@Article{NikolicETAL:07,
author = {D. Nikolic and S. Haeusler and W. Singer and W. Maass},
title = {Distributed fading memory for stimulus properties in the
primary visual},
journal = {submitted for publication},
year = {2009},
volume = {},
number = {},
pages = {},
abstract = {}
}
@InProceedings{NikolicETAL:07a,
author = {D. Nikoli\'{c} and S. Haeusler and W. Singer and W. Maass},
title = {Temporal dynamics of information content carried by
neurons in the primary visual cortex},
booktitle = {Proc. of NIPS 2006, Advances in Neural Information
Processing Systems},
editor = {},
publisher = {MIT Press},
year = {2007},
volume = {19},
pages = {1041--1048},
abstract = {We use multi-electrode recordings from cat primary visual
cortex and investigate whether a simple linear classifier
ca n extract information about the presented stimuli. We
find that information is extractable and that it even las
ts for several hundred milliseconds after the stimulus has
b een removed. In a fast sequence of stimulus presentation,
information about both new and old stimuli is present
simultaneously and nonlinear relations between these
stimuli can be extracted. These results suggest nonlinear
properties of cortical representat ions. The implications
of these properties for the nonlinear brain theory are
discussed.}
}
@Article{NikolicETAL:09,
author = {D. Nikolic and S. Haeusler and W. Singer and W. Maass},
title = {Distributed fading memory for stimulus properties in the
primary visual cortex},
journal = {PLoS Biology},
year = {2009},
volume = {7},
number = {12},
pages = {1--19},
abstract = {It is currently not known how distributed neuronal
responses in early visual areas carry stimulus-related
information. We made multi-electrode recordings from cat
primary visual cortex and applied methods from machine
learning in order to analyze the temporal evolution of
stimulus-related information in the spiking activity of
large ensembles of around 100 neurons. We used sequences of
up to three different visual stimuli (letters) presented
for 100 ms and with intervals of 100 ms or larger. Most of
the information about visual stimuli extractable by
sophisticated methods of machine learning, i.e. support
vector machines with non-linear kernel functions, was also
extractable by simple linear classification such as can be
achieved by individual neurons. New stimuli did not erase
information about previous stimuli. The responses to the
most recent stimulus contained about equal amounts of
information about both this and the preceding stimulus.
This information was encoded both in the discharge rates
(response amplitudes) of the ensemble of neurons and, when
using short time-constants for integration (e.g., 20 ms),
in the precise timing of individual spikes (<= $~20$ ms),
and persisted for several 100 ms beyond the offset of
stimuli. The results indicate that the network from which
we recorded is endowed with fading memory and is capable of
performing online computations utilizing information about
temporally sequential stimuli. This result challenges
models assuming frame-by-frame analyses of sequential
inputs.},
htmlnote = {(Journal link to PDF)},
note = {}
}
@TechReport{PecevskiETAL:11,
author = {D. Pecevski and L. B\"using and W. Maass},
title = {Networks of spiking neurons are able to carry out
probabilistic inference in general graphical models through
their inherent stochastic dynamics},
institution = {Graz University of Technology},
year = {2011}
}
@Article{PecevskiETAL:11a,
author = {D. Pecevski and L. B\"using and W. Maass},
title = {Probabilistic Inference in General Graphical Models
through Sampling in Stochastic Networks of Spiking
Neurons},
journal = {PLoS Computational Biology},
year = {2011},
pages = {e1002294},
volume = {7},
number = {12},
abstract = {An important open problem of computational neuroscience is
the generic organization of computations in networks of
neurons in the brain. We show here through rigorous
theoretical analysis that inherent stochastic features of
spiking neurons, in combination with simple nonlinear
computational operations in specific network motifs and
dendritic arbors, enable networks of spiking neurons to
carry out probabilistic inference through sampling in
general graphical models. In particular, it enables them to
carry out probabilistic inference in Bayesian networks with
converging arrows (explaining away) and with undirected
loops, that occur in many real-world tasks. Ubiquitous
stochastic features of networks of spiking neurons, such as
trial-to-trial variability and spontaneous activity, are
necessary ingredients of the underlying computational
organization. We demonstrate through computer simulations
that this approach can be scaled up to neural emulations of
probabilistic inference in fairly large graphical models,
yielding some of the most complex computations that have
been carried out so far in networks of spiking neurons.},
htmlnote = {(Journal link to PDF)}
}
@Article{PfeifferETAL:09,
author = {M. Pfeiffer and B. Nessler and W. Maass and R. J.
Douglas},
title = {Reward-modulated {H}ebbian Learning of Optimal Decision
Making},
journal = {Neural Computation},
year = {2009},
pages = {},
volume = {}
}
@Article{PfeifferETAL:09b,
author = {M. Pfeiffer and B. Nessler and W. Maass},
title = {{STDP} approximates Expectation Maximization in networks
of spiking neurons with lateral inhibition},
journal = {in preparation},
year = {2009},
pages = {},
volume = {}
}
@Article{PfeifferETAL:09c,
author = {M. Pfeiffer and B. Nessler and R. Douglas and W. Maass},
title = {Reward-modulated {H}ebbian {L}earning of {D}ecision
{M}aking},
journal = {Neural Computation},
year = {2010},
volume = {22},
pages = {1399--1444},
abstract = {We introduce a framework for decision making in which the
learning of decision making is reduced to its simplest
andbiologically most plausible form: Hebbian learning on a
linear neuron. We cast our Bayesian-Hebb learning rule as
reinforcement learning in which certain decisions are
rewarded and prove that each synaptic weight will on
average converge exponentially fast to the log-odd of
receiving a reward when its pre- and postsynaptic neurons
are active. In our simple architecture, a particular action
is selected from the set of candidate actions by a
winner-takeall operation. The global reward assigned to
this action then modulates the update of each synapse.
Apart from this global reward signal, our reward-modulated
Bayesian Hebb rule is a pure Hebb update that depends only
on the coactivation of the pre- and postsynaptic neurons,
not on theweighted sum of all presynaptic inputs to the
postsynaptic neuron as in the perceptron learning rule or
the Rescorla-Wagner rule. This simple approach to
action-selection learning requires that information about
sensory inputs be presented to the Bayesian decision stage
in a suitably preprocessed form resulting from other
adaptive processes (acting on a larger timescale) that
detect salient dependencies among input features. Hence our
proposed framework for fast learning of decisions also
provides interesting new hypotheses regarding neural nodes
and computational goals of cortical areas that provide
input to the final decision stage.}
}
@Article{PfeifferETAL:12,
author = {M. Pfeiffer and M. Hartbauer and A. B. Lang and W. Maass
and H. R\"omer},
title = {Probing Real Sensory Worlds of Receivers with Unsupervised
Clustering},
journal = {PLoS ONE},
year = {2012},
pages = {e37354. doi:10.1371},
volume = {7},
number = {6},
abstract = {The task of an organism to extract information about the
external environment from sensory signals is based entirely
on the analysis of ongoing afferent spike activity provided
by the sense organs. We investigate the processing of
auditory stimuli by an acoustic interneuron of insects. In
contrast to most previous work we do this by using stimuli
and neurophysiological recordings directly in the nocturnal
tropical rainforest, where the insect communicates.
Different from typical recordings in sound proof
laboratories, strong environmental noise from multiple
sound sources interferes with the perception of acoustic
signals in these realistic scenarios. We apply a recently
developed unsupervised machine learning algorithm based on
probabilistic inference to find frequently occurring firing
patterns in the response of the acoustic interneuron. We
can thus ask how much information the central nervous
system of the receiver can extract from bursts without ever
being told which type and which variants of bursts are
characteristic for particular stimuli. Our results show
that the reliability of burst coding in the time domain is
so high that identical stimuli lead to extremely similar
spike pattern responses, even for different preparations on
different dates, and even if one of the preparations is
recorded outdoors and the other one in the sound proof lab.
Simultaneous recordings in two preparations exposed to the
same acoustic environment reveal that characteristics of
burst patterns are largely preserved among individuals of
the same species. Our study shows that burst coding can
provide a reliable mechanism for acoustic insects to
classify and discriminate signals under very noisy
real-world conditions. This gives new insights into the
neural mechanisms potentially used by bushcrickets to
discriminate conspecific songs from sounds of predators in
similar carrier frequency bands.},
htmlnote = {(Journal link to PDF)}
}
@TechReport{RaschETAL:06,
author = {M. Rasch and S. Haeusler and Z. Kisvarday and W. Maass and
N. Logothetis},
title = {Comparison of a detailed model for area {V}1 with
simultaneous recordings from {LGN} and {V}1},
institution = {Technische Universitaet Graz and MPI Tuebingen},
year = {2006}
}
@TechReport{RaschETAL:06b,
author = {M. Rasch and A. Gretton and Y. Murayama and W. Maass and
N. Logothetis},
title = {Interaction of local field potential and spiking activity
in area {V}1},
institution = {Technische Universitaet Graz and MPI Tuebingen},
year = {2006}
}
@Article{RaschETAL:07,
author = {M. J. Rasch and A. Gretton and Y. Murayama and W. Maass
and N. K. Logothetis},
title = {Inferring spike trains from local field potentials},
journal = {Journal of Neurophysiology},
year = {2008},
volume = {99},
number = {},
pages = {1461--1476},
note = {},
abstract = {We investigated whether it is possible to infer spike
trains solely on the basis of the underling local field
potentials ({LFP}s). Employing support vector machines and
linear regression models, we found that in the primary
visual cortex ({V}1) of monkeys, spikes can indeed be
inferred from {LFP}s, at least with moderate success.
Although there is a considerable degree of variation across
electrodes, the low-frequency structure in spike trains (in
the 100 ms range) can be inferred with reasonable accuracy,
whereas exact spike positions are not reliably predicted.
Two kinds of features of the {LFP} are exploited for
prediction: the frequency power of bands in the high
$\gamma$-range (40-90 {H}z), and information contained in
low-frequency oscillations (<10 {H}z), where both phase and
power modulations are informative. Information analysis
revealed that both features code (mainly) independent
aspects of the spike-to-{LFP} relationship, with the
low-frequency {LFP} phase coding for temporally clustered
spiking activity. Although both features and prediction
quality are similar during semi-natural movie stimuli and
spontaneous activity, prediction performance during
spontaneous activity degrades much more slowly with
increasing electrode distance. The general trend of data
obtained with anesthetized animals is qualitatively
mirrored in that of a more limited data set recorded in
{V}1 of awake monkeys. In contrast to the cortical field
potentials, thalamic {LFP}s (e.g. {LFP}s derived from
recordings in d{LGN}) hold no useful information for
predicting spiking activity.}
}
@Article{RaschETAL:09,
author = {M. J. Rasch and K. Schuch and N. K. Logothetis and W.
Maass},
title = {Statistical characterization of the spike response to
natural stimuli in monkey area {V}1 and in a detailed model
for a patch of {V}1},
journal = {submitted},
year = {2009},
pages = {},
volume = {}
}
@Article{RaschETAL:10,
author = {M. J. Rasch and K. Schuch and N. K. Logothetis and W.
Maass},
title = {Statistical Comparision of Spike Responses to Natural
Stimuli in Monkey Area {V}1 With Simulated Responses of a
Detailed Laminar Network Model for a Patch of {V}1},
journal = {Journal of Neurophysiology},
year = {2011},
pages = {757--778},
volume = {105},
abstract = {A major goal of computational neuroscience is the creation
of computer models for cortical areas whose response to
sensory stimuli resembles that of cortical areas in vivo in
important aspects. It is seldom considered whether the
simulated spiking activity is realistic (in a statistical
sense) in response to natural stimuli. Because certain
statistical properties of spike responses were suggested to
facilitate computations in the cortex, acquiring a
realistic firing regimen in cortical network models might
be a prerequisite for analyzing their computational
functions. We present a characterization and comparison of
the statistical response properties of the primary visual
cortex (V1) in vivo and in silico in response to natural
stimuli. We recorded from multiple electrodes in area V1 of
4 macaque monkeys and developed a large state-of-the-art
network model for a 5 x 5-mm patch of V1 composed of 35,000
neurons and 3.9 million synapses that integrates previously
published anatomical and physiological details. By
quantitative comparison of the model response to the
"statistical fingerprint" of responses in vivo, we find
that our model for a patch of V1 responds to the same movie
in a way which matches the statistical structure of the
recorded data surprisingly well. The deviation between the
firing regimen of the model and the in vivo data are on the
same level as deviations among monkeys and sessions. This
suggests that, despite strong simplifications and
abstractions of cortical network models, they are
nevertheless capable of generating realistic spiking
activity. To reach a realistic firing state, it was not
only necessary to include both N-methyl- D-aspartate and
GABAB synaptic conductances in our model, but also to
markedly increase the strength of excitatory synapses onto
inhibitory neurons (more than 2-fold) in comparison to
literature values, hinting at the importance to carefully
adjust the effect of inhibition for achieving realistic
dynamics in current network models.},
htmlnote = {(Commentary by W.S. Anderson and B. Kreiman in Current
Biology 2011 PDF)}
}
@Article{SteimerETAL:09,
author = {A. Steimer and W. Maass and R. Douglas},
title = {Belief-propagation in networks of spiking neurons},
journal = {Neural Computation},
year = {2009},
volume = {21},
number = {},
pages = {2502--2523},
abstract = {From a theoretical point of view, statistical inference is
an attractive model of brain operation. However, it is
unclear how to implement these inferential processes in
neuronal networks. We offer a solution to this problem by
showing in detailed simulations how the Belief-Propagation
algorithm on a factor graph can be embedded in a network of
spiking neurons. We use pools of spiking neurons as the
function nodes of the factor graph. Each pool gathers
'messages' in the form of population activities from its
input nodes and combines them through its network dynamics.
The various output messages to be transmitted over the
edges of the graph are each computed by a group of readout
neurons that feed in their respective destination pools. We
use this approach to implement two examples of factor
graphs. The first example is drawn from coding theory. It
models the transmission of signals through an unreliable
channel and demonstrates the principles and generality of
our network approach. The second, more applied example, is
of a psychophysical mechanism in which visual cues are used
to resolve hypotheses about the interpretation of an
object's shape and illumination. These two examples, and
also a statistical analysis, all demonstrate good agreement
between the performance of our networks and the direct
numerical evaluation of beliefpropagation.}
}
@Article{SteinbauerETAL:02,
author = {G. Steinbauer and R. Koholka and W. Maass},
title = {A very short story about autonomous robots},
journal = {Special Issue on Foundations of Information Processing of
{TELEMATIK}},
year = {2002},
volume = {8},
number = {1},
pages = {26--29},
abstract = {Machine learning for autonomous mobile robots is a very
interesting but also very difficult problem. In this domain
you need robust, fast and efficient learning algorithms.
The combination of machine learning and mobile robots
allows us to create machines whose performance surpasses
both explicitly programmed robots and humans. Although
humans can learn, their sensors and actuators are in
several aspects inferior to those of robots. On the other
hand a non-learning robot can only perform tasks where all
the individual difficulties and complexities can be
anticipated by the programmer of the robot. In this short
article we discuss both a completed project, and a new one
that has just started. The first project produced a robot
that applied learning to improve his minigolf skills. The
new project is the creation of the first Austrian team of
autonomous robot soccer players that will compete in the
robot soccer world championship RoboCup1.}
}
@Article{SussilloETAL:07,
author = {D. Sussillo and T. Toyoizumi and W. Maass},
title = {Self-tuning of neural circuits through short-term synaptic
plasticity},
journal = {Journal of Neurophysiology},
year = {2007},
volume = {97},
pages = {4079--4095},
abstract = {Circuits of neurons in the cortex have a remarkable
capability to maintain functional and dynamic stability in
spite of changes in the level of external inputs, synaptic
plasticity and changes in the circuit structure that occur
during development and adult learning. The source of this
characteristic stability of cortical circuits has remained
a mystery, especially since even stronglysimplified models
of such circuits do not exhibit similar stability
properties. One simplification that is usually made in such
models is that the empirically found nonlinear and diverse
inherent short term dynamics (paired-pulse facilitation and
depression) of biological synapses is replaced by static
and uniform linear synapse models. We show in this article
that this is a mistake, since the complex and diverse
nonlinear dynamics of biological synapses supports the
implementation of powerful control principles that endow
circuits of spiking neurons with almost in-vivo like
stability properties.},
htmlnote = {(Supplementary material PDF)}
}
@Article{UchizawaETAL:06,
author = {K. Uchizawa and R. Douglas and W. Maass},
title = {On the Computational Power of Threshold Circuits with
Sparse Activity},
journal = {Neural Computation},
year = 2006,
volume = 18,
number = 12,
pages = {2994--3008},
abstract = {Circuits composed of threshold gates (McCulloch-Pitts
neurons, or perceptrons) are simplified models of neural
circuits with the advantage that they are theoretically
more tractable than their biological counterparts. However,
when such threshold circuits are designed to perform a
specific computational task they usually differ in one
important respect from computations in the brain: they
require very high activity. On average every second
threshold gate fires (sets a ``1'' as output) during a
computation. By contrast, the activity of neurons in the
brain is much more sparse, with only about 1\% of neurons
firing. This mismatch between threshold and neuronal
circuits is due to the particular complexity measures
(circuit size and circuit depth) that have been minimized
in previous threshold circuit constructions. In this
article we investigate a new complexity measure for
threshold circuits, {\em energy complexity}, whose
minimization yields computations with sparse activity. We
prove that all computations by threshold circuits of
polynomial size with entropy $O(\log n)$ can be
restructured so that their energy complexity is reduced to
a level near the {\em entropy of circuit states}. This
entropy of circuit states is a novel circuit complexity
measure, which is of interest not only in the context of
threshold circuits, but for circuit complexity in general.
As an example of how this measure can be applied we show
that any polynomial size threshold circuit with entropy
$O(\log n)$ can be simulated by a polynomial size threshold
circuit of depth 3.
Our results demonstrate that the structure of circuits
which result from a minimization of their energy complexity
is quite different from the structure which results from a
minimization of previously considered complexity measures,
and potentially closer to the structure of neural circuits
in the nervous system. In particular, different pathways
are activated in these circuits for different classes of
inputs. This article shows that such circuits with sparse
activity have a surprisingly large computational power.}
}
@InProceedings{UchizawaETAL:06a,
author = {K. Uchizawa and R. Douglas and W. Maass},
booktitle = {Proceedings of the 33rd International Colloquium on
Automata, Languages and Programming, ICALP (1) 2006,
Venice, Italy, July 10-14, 2006, Part I},
title = {Energy Complexity and Entropy of Threshold Circuits},
year = {2006},
volume = {4051},
pages = {631--642},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
editor = {M. Bugliesi and B. Preneel and V. Sassone and I. Wegener},
abstract = {Circuits composed of threshold gates (McCulloch-Pitts
neurons, or perceptrons) are simplified models of neural
circuits with the advantage that they are theoretically
more tractable than their biological counterparts. However,
when such threshold circuits are designed to perform a
specific computational task they usually differ in one
important respect from computations in the brain: they
require very high activity. On average every second
threshold gate fires (sets a ``1'' as output) during a
computation. By contrast, the activity of neurons in the
brain is much more sparse, with only about 1\% of neurons
firing. This mismatch between threshold and neuronal
circuits is due to the particular complexity measures
(circuit size and circuit depth) that have been minimized
in previous threshold circuit constructions. In this
article we investigate a new complexity measure for
threshold circuits, {\em energy complexity}, whose
minimization yields computations with sparse activity. We
prove that all computations by threshold circuits of
polynomial size with entropy $O(\log n)$ can be
restructured so that their energy complexity is reduced to
a level near the {\em entropy of circuit states}. This
entropy of circuit states is a novel circuit complexity
measure, which is of interest not only in the context of
threshold circuits, but for circuit complexity in general.
As an example of how this measure can be applied we show
that any polynomial size threshold circuit with entropy
$O(\log n)$ can be simulated by a polynomial size threshold
circuit of depth 3.}
}
@TechReport{WinterETAL:06,
author = {M. Winter and H. Bischof and M. Rasch and W. Maass},
title = {Results and open problems regarding the role of local
feature descriptors for image classification in computer
vision, and image representations in primary visual
cortex},
institution = {Technische Universitaet Graz},
year = {2006}
}