@article{eprints1751, author = {Giorgio Gnecco and V{\v e}ra K{\r u}rkov{\'a} and Marcello Sanguineti}, journal = {Neural Networks }, publisher = {Elseviers}, note = {Special Issue "Artificial Neural Networks: Selected Papers from ICANN 2010"}, year = {2011}, title = {Can Dictionary-Based Computational Models Outperform the Best Linear Ones? }, number = {8}, pages = {881 -- 887}, volume = {24}, keywords = {Dictionary-based approximation; Linear approximation; Rates of approximation; Worst-case error; Kolmogorov width; Perceptron networks}, url = {http://eprints.imtlucca.it/1751/}, abstract = {Approximation capabilities of two types of computational models are explored: dictionary-based models (i.e., linear combinations of n -tuples of basis functions computable by units belonging to a set called ?dictionary?) and linear ones (i.e., linear combinations of n fixed basis functions). The two models are compared in terms of approximation rates, i.e., speeds of decrease of approximation errors for a growing number n of basis functions. Proofs of upper bounds on approximation rates by dictionary-based models are inspected, to show that for individual functions they do not imply estimates for dictionary-based models that do not hold also for some linear models. Instead, the possibility of getting faster approximation rates by dictionary-based models is demonstrated for worst-case errors in approximation of suitable sets of functions. For such sets, even geometric upper bounds hold. } }