2009
Vinuelas-Peris, Pablo; Artés-Rodríguez, Antonio
Sensing Matrix Optimization in Distributed Compressed Sensing Proceedings Article
En: 2009 IEEE/SP 15th Workshop on Statistical Signal Processing, pp. 638–641, IEEE, Cardiff, 2009, ISBN: 978-1-4244-2709-3.
Resumen | Enlaces | BibTeX | Etiquetas: Compressed sensing, Computer Simulation, computer simulations, correlated signal, Correlated signals, correlation theory, Dictionaries, distributed coding strategy, distributed compressed sensing, Distributed control, efficient projection method, Encoding, joint recovery method, Matching pursuit algorithms, Optimization methods, orthogonal matching pursuit, Projection Matrix Optimization, sensing matrix optimization, Sensor Network, Sensor phenomena and characterization, Sensor systems, Signal processing, Sparse matrices, Technological innovation
@inproceedings{Vinuelas-Peris2009,
title = {Sensing Matrix Optimization in Distributed Compressed Sensing},
author = {Pablo Vinuelas-Peris and Antonio Art\'{e}s-Rodr\'{i}guez},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5278496},
isbn = {978-1-4244-2709-3},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE/SP 15th Workshop on Statistical Signal Processing},
pages = {638--641},
publisher = {IEEE},
address = {Cardiff},
abstract = {Distributed compressed sensing (DCS) seeks to simultaneously measure signals that are each individually sparse in some domain(s) and also mutually correlated. In this paper we consider the scenario in which the (overcomplete) bases for common component and innovations are different. We propose and analyze a distributed coding strategy for the common component, and also the use of efficient projection (EP) method for optimizing the sensing matrices in this setting. We show the effectiveness of our approach by computer simulations using the orthogonal matching pursuit (OMP) as joint recovery method, and we discuss the configuration of the distribution strategy.},
keywords = {Compressed sensing, Computer Simulation, computer simulations, correlated signal, Correlated signals, correlation theory, Dictionaries, distributed coding strategy, distributed compressed sensing, Distributed control, efficient projection method, Encoding, joint recovery method, Matching pursuit algorithms, Optimization methods, orthogonal matching pursuit, Projection Matrix Optimization, sensing matrix optimization, Sensor Network, Sensor phenomena and characterization, Sensor systems, Signal processing, Sparse matrices, Technological innovation},
pubstate = {published},
tppubtype = {inproceedings}
}
Miguez, Joaquin; Maiz, Cristina S; Djuric, Petar M; Crisan, Dan
Sequential Monte Carlo Optimization Using Artificial State-Space Models Proceedings Article
En: 2009 IEEE 13th Digital Signal Processing Workshop and 5th IEEE Signal Processing Education Workshop, pp. 268–273, IEEE, Marco Island, FL, 2009.
Resumen | Enlaces | BibTeX | Etiquetas: Acceleration, Cost function, Design optimization, discrete-time dynamical system, Educational institutions, Mathematics, maximum a posteriori estimate, maximum likelihood estimation, minimisation, Monte Carlo methods, Optimization methods, Probability distribution, sequential Monte Carlo optimization, Sequential optimization, Signal design, State-space methods, state-space model, Stochastic optimization
@inproceedings{Miguez2009,
title = {Sequential Monte Carlo Optimization Using Artificial State-Space Models},
author = {Joaquin Miguez and Cristina S Maiz and Petar M Djuric and Dan Crisan},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4785933},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE 13th Digital Signal Processing Workshop and 5th IEEE Signal Processing Education Workshop},
pages = {268--273},
publisher = {IEEE},
address = {Marco Island, FL},
abstract = {We introduce a method for sequential minimization of a certain class of (possibly non-convex) cost functions with respect to a high dimensional signal of interest. The proposed approach involves the transformation of the optimization problem into one of estimation in a discrete-time dynamical system. In particular, we describe a methodology for constructing an artificial state-space model which has the signal of interest as its unobserved dynamic state. The model is \"{a}dapted" to the cost function in the sense that the maximum a posteriori (MAP) estimate of the system state is also a global minimizer of the cost function. The advantage of the estimation framework is that we can draw from a pool of sequential Monte Carlo methods, for particle approximation of probability measures in dynamic systems, that enable the numerical computation of MAP estimates. We provide examples of how to apply the proposed methodology, including some illustrative simulation results.},
keywords = {Acceleration, Cost function, Design optimization, discrete-time dynamical system, Educational institutions, Mathematics, maximum a posteriori estimate, maximum likelihood estimation, minimisation, Monte Carlo methods, Optimization methods, Probability distribution, sequential Monte Carlo optimization, Sequential optimization, Signal design, State-space methods, state-space model, Stochastic optimization},
pubstate = {published},
tppubtype = {inproceedings}
}
2007
Leiva-Murillo, Jose M; Artés-Rodríguez, Antonio
Maximization of Mutual Information for Supervised Linear Feature Extraction Artículo de revista
En: IEEE Transactions on Neural Networks, vol. 18, no 5, pp. 1433–1441, 2007, ISSN: 1045-9227.
Resumen | Enlaces | BibTeX | Etiquetas: Algorithms, Artificial Intelligence, Automated, component-by-component gradient-ascent method, Computer Simulation, Data Mining, Entropy, Feature extraction, gradient methods, gradient-based entropy, Independent component analysis, Information Storage and Retrieval, information theory, Iron, learning (artificial intelligence), Linear discriminant analysis, Linear Models, Mutual information, Optimization methods, Pattern recognition, Reproducibility of Results, Sensitivity and Specificity, supervised linear feature extraction, Vectors
@article{Leiva-Murillo2007,
title = {Maximization of Mutual Information for Supervised Linear Feature Extraction},
author = {Jose M Leiva-Murillo and Antonio Art\'{e}s-Rodr\'{i}guez},
url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=4298118},
issn = {1045-9227},
year = {2007},
date = {2007-01-01},
journal = {IEEE Transactions on Neural Networks},
volume = {18},
number = {5},
pages = {1433--1441},
publisher = {IEEE},
abstract = {In this paper, we present a novel scheme for linear feature extraction in classification. The method is based on the maximization of the mutual information (MI) between the features extracted and the classes. The sum of the MI corresponding to each of the features is taken as an heuristic that approximates the MI of the whole output vector. Then, a component-by-component gradient-ascent method is proposed for the maximization of the MI, similar to the gradient-based entropy optimization used in independent component analysis (ICA). The simulation results show that not only is the method competitive when compared to existing supervised feature extraction methods in all cases studied, but it also remarkably outperform them when the data are characterized by strongly nonlinear boundaries between classes.},
keywords = {Algorithms, Artificial Intelligence, Automated, component-by-component gradient-ascent method, Computer Simulation, Data Mining, Entropy, Feature extraction, gradient methods, gradient-based entropy, Independent component analysis, Information Storage and Retrieval, information theory, Iron, learning (artificial intelligence), Linear discriminant analysis, Linear Models, Mutual information, Optimization methods, Pattern recognition, Reproducibility of Results, Sensitivity and Specificity, supervised linear feature extraction, Vectors},
pubstate = {published},
tppubtype = {article}
}