2013
Luengo, David; Via, Javier; Monzon, Sandra; Trigano, Tom; Artés-Rodríguez, Antonio
Cross-Products LASSO Proceedings Article
En: 2013 IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 6118–6122, IEEE, Vancouver, 2013, ISSN: 1520-6149.
Resumen | Enlaces | BibTeX | Etiquetas: Approximation methods, approximation theory, concave programming, convex programming, Cost function, cross-product LASSO cost function, Dictionaries, dictionary, Encoding, LASSO, learning (artificial intelligence), negative co-occurrence, negative cooccurrence phenomenon, nonconvex optimization problem, Signal processing, signal processing application, signal reconstruction, sparse coding, sparse learning approach, Sparse matrices, sparsity-aware learning, successive convex approximation, Vectors
@inproceedings{Luengo2013,
title = {Cross-Products LASSO},
author = {David Luengo and Javier Via and Sandra Monzon and Tom Trigano and Antonio Art\'{e}s-Rodr\'{i}guez},
url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=6638840},
issn = {1520-6149},
year = {2013},
date = {2013-01-01},
booktitle = {2013 IEEE International Conference on Acoustics, Speech and Signal Processing},
pages = {6118--6122},
publisher = {IEEE},
address = {Vancouver},
abstract = {Negative co-occurrence is a common phenomenon in many signal processing applications. In some cases the signals involved are sparse, and this information can be exploited to recover them. In this paper, we present a sparse learning approach that explicitly takes into account negative co-occurrence. This is achieved by adding a novel penalty term to the LASSO cost function based on the cross-products between the reconstruction coefficients. Although the resulting optimization problem is non-convex, we develop a new and efficient method for solving it based on successive convex approximations. Results on synthetic data, for both complete and overcomplete dictionaries, are provided to validate the proposed approach.},
keywords = {Approximation methods, approximation theory, concave programming, convex programming, Cost function, cross-product LASSO cost function, Dictionaries, dictionary, Encoding, LASSO, learning (artificial intelligence), negative co-occurrence, negative cooccurrence phenomenon, nonconvex optimization problem, Signal processing, signal processing application, signal reconstruction, sparse coding, sparse learning approach, Sparse matrices, sparsity-aware learning, successive convex approximation, Vectors},
pubstate = {published},
tppubtype = {inproceedings}
}
2012
Garcia-Moreno, Pablo; Artés-Rodríguez, Antonio; Hansen, Lars Kai
A Hold-out Method to Correct PCA Variance Inflation Proceedings Article
En: 2012 3rd International Workshop on Cognitive Information Processing (CIP), pp. 1–6, IEEE, Baiona, 2012, ISBN: 978-1-4673-1878-5.
Resumen | Enlaces | BibTeX | Etiquetas: Approximation methods, classification scenario, computational complexity, computational cost, Computational efficiency, correction method, hold-out method, hold-out procedure, leave-one-out procedure, LOO method, LOO procedure, Mathematical model, PCA algorithm, PCA variance inflation, Principal component analysis, singular value decomposition, Standards, SVD, Training
@inproceedings{Garcia-Moreno2012,
title = {A Hold-out Method to Correct PCA Variance Inflation},
author = {Pablo Garcia-Moreno and Antonio Art\'{e}s-Rodr\'{i}guez and Lars Kai Hansen},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6232926},
isbn = {978-1-4673-1878-5},
year = {2012},
date = {2012-01-01},
booktitle = {2012 3rd International Workshop on Cognitive Information Processing (CIP)},
pages = {1--6},
publisher = {IEEE},
address = {Baiona},
abstract = {In this paper we analyze the problem of variance inflation experienced by the PCA algorithm when working in an ill-posed scenario where the dimensionality of the training set is larger than its sample size. In an earlier article a correction method based on a Leave-One-Out (LOO) procedure was introduced. We propose a Hold-out procedure whose computational cost is lower and, unlike the LOO method, the number of SVD's does not scale with the sample size. We analyze its properties from a theoretical and empirical point of view. Finally we apply it to a real classification scenario.},
keywords = {Approximation methods, classification scenario, computational complexity, computational cost, Computational efficiency, correction method, hold-out method, hold-out procedure, leave-one-out procedure, LOO method, LOO procedure, Mathematical model, PCA algorithm, PCA variance inflation, Principal component analysis, singular value decomposition, Standards, SVD, Training},
pubstate = {published},
tppubtype = {inproceedings}
}