## 2013 |

## Inproceedings |

Luengo, David ; Via, Javier ; Monzon, Sandra ; Trigano, Tom ; Artés-Rodríguez, Antonio Cross-Products LASSO Inproceedings 2013 IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 6118–6122, IEEE, Vancouver, 2013, ISSN: 1520-6149. Abstract | Links | BibTeX | Tags: Approximation methods, approximation theory, concave programming, convex programming, Cost function, cross-product LASSO cost function, Dictionaries, dictionary, Encoding, LASSO, learning (artificial intelligence), negative co-occurrence, negative cooccurrence phenomenon, nonconvex optimization problem, Signal processing, signal processing application, signal reconstruction, sparse coding, sparse learning approach, Sparse matrices, sparsity-aware learning, successive convex approximation, Vectors @inproceedings{Luengo2013, title = {Cross-Products LASSO}, author = {Luengo, David and Via, Javier and Monzon, Sandra and Trigano, Tom and Artés-Rodríguez, Antonio}, url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=6638840}, issn = {1520-6149}, year = {2013}, date = {2013-01-01}, booktitle = {2013 IEEE International Conference on Acoustics, Speech and Signal Processing}, pages = {6118--6122}, publisher = {IEEE}, address = {Vancouver}, abstract = {Negative co-occurrence is a common phenomenon in many signal processing applications. In some cases the signals involved are sparse, and this information can be exploited to recover them. In this paper, we present a sparse learning approach that explicitly takes into account negative co-occurrence. This is achieved by adding a novel penalty term to the LASSO cost function based on the cross-products between the reconstruction coefficients. Although the resulting optimization problem is non-convex, we develop a new and efficient method for solving it based on successive convex approximations. Results on synthetic data, for both complete and overcomplete dictionaries, are provided to validate the proposed approach.}, keywords = {Approximation methods, approximation theory, concave programming, convex programming, Cost function, cross-product LASSO cost function, Dictionaries, dictionary, Encoding, LASSO, learning (artificial intelligence), negative co-occurrence, negative cooccurrence phenomenon, nonconvex optimization problem, Signal processing, signal processing application, signal reconstruction, sparse coding, sparse learning approach, Sparse matrices, sparsity-aware learning, successive convex approximation, Vectors}, pubstate = {published}, tppubtype = {inproceedings} } Negative co-occurrence is a common phenomenon in many signal processing applications. In some cases the signals involved are sparse, and this information can be exploited to recover them. In this paper, we present a sparse learning approach that explicitly takes into account negative co-occurrence. This is achieved by adding a novel penalty term to the LASSO cost function based on the cross-products between the reconstruction coefficients. Although the resulting optimization problem is non-convex, we develop a new and efficient method for solving it based on successive convex approximations. Results on synthetic data, for both complete and overcomplete dictionaries, are provided to validate the proposed approach. |

## 2009 |

## Inproceedings |

Miguez, Joaquin ; Maiz, Cristina S; Djuric, Petar M; Crisan, Dan Sequential Monte Carlo Optimization Using Artificial State-Space Models Inproceedings 2009 IEEE 13th Digital Signal Processing Workshop and 5th IEEE Signal Processing Education Workshop, pp. 268–273, IEEE, Marco Island, FL, 2009. Abstract | Links | BibTeX | Tags: Acceleration, Cost function, Design optimization, discrete-time dynamical system, Educational institutions, Mathematics, maximum a posteriori estimate, maximum likelihood estimation, minimisation, Monte Carlo methods, Optimization methods, Probability distribution, sequential Monte Carlo optimization, Sequential optimization, Signal design, State-space methods, state-space model, Stochastic optimization @inproceedings{Miguez2009, title = {Sequential Monte Carlo Optimization Using Artificial State-Space Models}, author = {Miguez, Joaquin and Maiz, Cristina S. and Djuric, Petar M. and Crisan, Dan}, url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4785933}, year = {2009}, date = {2009-01-01}, booktitle = {2009 IEEE 13th Digital Signal Processing Workshop and 5th IEEE Signal Processing Education Workshop}, pages = {268--273}, publisher = {IEEE}, address = {Marco Island, FL}, abstract = {We introduce a method for sequential minimization of a certain class of (possibly non-convex) cost functions with respect to a high dimensional signal of interest. The proposed approach involves the transformation of the optimization problem into one of estimation in a discrete-time dynamical system. In particular, we describe a methodology for constructing an artificial state-space model which has the signal of interest as its unobserved dynamic state. The model is "adapted" to the cost function in the sense that the maximum a posteriori (MAP) estimate of the system state is also a global minimizer of the cost function. The advantage of the estimation framework is that we can draw from a pool of sequential Monte Carlo methods, for particle approximation of probability measures in dynamic systems, that enable the numerical computation of MAP estimates. We provide examples of how to apply the proposed methodology, including some illustrative simulation results.}, keywords = {Acceleration, Cost function, Design optimization, discrete-time dynamical system, Educational institutions, Mathematics, maximum a posteriori estimate, maximum likelihood estimation, minimisation, Monte Carlo methods, Optimization methods, Probability distribution, sequential Monte Carlo optimization, Sequential optimization, Signal design, State-space methods, state-space model, Stochastic optimization}, pubstate = {published}, tppubtype = {inproceedings} } We introduce a method for sequential minimization of a certain class of (possibly non-convex) cost functions with respect to a high dimensional signal of interest. The proposed approach involves the transformation of the optimization problem into one of estimation in a discrete-time dynamical system. In particular, we describe a methodology for constructing an artificial state-space model which has the signal of interest as its unobserved dynamic state. The model is "adapted" to the cost function in the sense that the maximum a posteriori (MAP) estimate of the system state is also a global minimizer of the cost function. The advantage of the estimation framework is that we can draw from a pool of sequential Monte Carlo methods, for particle approximation of probability measures in dynamic systems, that enable the numerical computation of MAP estimates. We provide examples of how to apply the proposed methodology, including some illustrative simulation results. |

Goez, Roger ; Lazaro, Marcelino Training of Neural Classifiers by Separating Distributions at the Hidden Layer Inproceedings 2009 IEEE International Workshop on Machine Learning for Signal Processing, pp. 1–6, IEEE, Grenoble, 2009, ISBN: 978-1-4244-4947-7. Abstract | Links | BibTeX | Tags: Artificial neural networks, Bayesian methods, Cost function, Curve fitting, Databases, Function approximation, Neural networks, Speech recognition, Support vector machine classification, Support vector machines @inproceedings{Goez2009, title = {Training of Neural Classifiers by Separating Distributions at the Hidden Layer}, author = {Goez, Roger and Lazaro, Marcelino}, url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5306240}, isbn = {978-1-4244-4947-7}, year = {2009}, date = {2009-01-01}, booktitle = {2009 IEEE International Workshop on Machine Learning for Signal Processing}, pages = {1--6}, publisher = {IEEE}, address = {Grenoble}, abstract = {A new cost function for training of binary classifiers based on neural networks is proposed. This cost function aims at separating the distributions for patterns of each class at the output of the hidden layer of the network. It has been implemented in a Generalized Radial Basis Function (GRBF) network and its performance has been evaluated under three different databases, showing advantages with respect to the conventional Mean Squared Error (MSE) cost function. With respect to the Support Vector Machine (SVM) classifier, the proposed method has also advantages both in terms of performance and complexity.}, keywords = {Artificial neural networks, Bayesian methods, Cost function, Curve fitting, Databases, Function approximation, Neural networks, Speech recognition, Support vector machine classification, Support vector machines}, pubstate = {published}, tppubtype = {inproceedings} } A new cost function for training of binary classifiers based on neural networks is proposed. This cost function aims at separating the distributions for patterns of each class at the output of the hidden layer of the network. It has been implemented in a Generalized Radial Basis Function (GRBF) network and its performance has been evaluated under three different databases, showing advantages with respect to the conventional Mean Squared Error (MSE) cost function. With respect to the Support Vector Machine (SVM) classifier, the proposed method has also advantages both in terms of performance and complexity. |