2020
Akyildiz, O. D.; Crisan, Dan; Miguez, Joaquín
Parallel sequential Monte Carlo for stochastic gradient-free nonconvex optimization Artículo de revista
En: Statistics and Computing, 2020.
Enlaces | BibTeX | Etiquetas: Gradient-free optimization, Nonconvex optimization, Sampling, Sequential Monte Carlo, Stochastic optimization
@article{JMiguez20c,
title = {Parallel sequential Monte Carlo for stochastic gradient-free nonconvex optimization},
author = {O. D. Akyildiz and Dan Crisan and Joaqu\'{i}n Miguez},
doi = {https://doi.org/10.1007/s11222-020-09964-4},
year = {2020},
date = {2020-07-29},
journal = {Statistics and Computing},
keywords = {Gradient-free optimization, Nonconvex optimization, Sampling, Sequential Monte Carlo, Stochastic optimization},
pubstate = {published},
tppubtype = {article}
}
2018
Akyildiz, O. D.; Elvira, Víctor; Míguez, Joaquín
The Incremental Proximal Method: A Probabilistic Perspective Proceedings Article
En: 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 2018, ISSN: 2379-190X.
Enlaces | BibTeX | Etiquetas: Incremental proximal methods, Kalman filtering, Stochastic optimization
@inproceedings{JMiguez18f,
title = {The Incremental Proximal Method: A Probabilistic Perspective},
author = {O. D. Akyildiz and V\'{i}ctor Elvira and Joaqu\'{i}n M\'{i}guez},
doi = {10.1109/ICASSP.2018.8462131},
issn = {2379-190X},
year = {2018},
date = {2018-04-15},
booktitle = {2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
keywords = {Incremental proximal methods, Kalman filtering, Stochastic optimization},
pubstate = {published},
tppubtype = {inproceedings}
}
2015
Martino, Luca; Elvira, Victor; Luengo, David; Artés-Rodríguez, Antonio; Corander, Jukka
Smelly Parallel MCMC Chains Proceedings Article
En: 2015 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 4070–4074, IEEE, Brisbane, 2015, ISBN: 978-1-4673-6997-8.
Resumen | Enlaces | BibTeX | Etiquetas: Bayesian inference, learning (artificial intelligence), Machine learning, Markov chain Monte Carlo, Markov chain Monte Carlo algorithms, Markov processes, MC methods, MCMC algorithms, MCMC scheme, mean square error, mean square error methods, Monte Carlo methods, optimisation, parallel and interacting chains, Probability density function, Proposals, robustness, Sampling methods, Signal processing, Signal processing algorithms, signal sampling, smelly parallel chains, smelly parallel MCMC chains, Stochastic optimization
@inproceedings{Martino2015a,
title = {Smelly Parallel MCMC Chains},
author = {Luca Martino and Victor Elvira and David Luengo and Antonio Art\'{e}s-Rodr\'{i}guez and Jukka Corander},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=7178736 http://www.tsc.uc3m.es/~velvira/papers/ICASSP2015_martino.pdf},
doi = {10.1109/ICASSP.2015.7178736},
isbn = {978-1-4673-6997-8},
year = {2015},
date = {2015-04-01},
booktitle = {2015 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {4070--4074},
publisher = {IEEE},
address = {Brisbane},
abstract = {Monte Carlo (MC) methods are useful tools for Bayesian inference and stochastic optimization that have been widely applied in signal processing and machine learning. A well-known class of MC methods are Markov Chain Monte Carlo (MCMC) algorithms. In this work, we introduce a novel parallel interacting MCMC scheme, where the parallel chains share information, thus yielding a faster exploration of the state space. The interaction is carried out generating a dynamic repulsion among the “smelly” parallel chains that takes into account the entire population of current states. The ergodicity of the scheme and its relationship with other sampling methods are discussed. Numerical results show the advantages of the proposed approach in terms of mean square error, robustness w.r.t. to initial values and parameter choice.},
keywords = {Bayesian inference, learning (artificial intelligence), Machine learning, Markov chain Monte Carlo, Markov chain Monte Carlo algorithms, Markov processes, MC methods, MCMC algorithms, MCMC scheme, mean square error, mean square error methods, Monte Carlo methods, optimisation, parallel and interacting chains, Probability density function, Proposals, robustness, Sampling methods, Signal processing, Signal processing algorithms, signal sampling, smelly parallel chains, smelly parallel MCMC chains, Stochastic optimization},
pubstate = {published},
tppubtype = {inproceedings}
}
2010
Zoubir, A; Viberg, M; Yang, B; Miguez, Joaquin
Analysis of a Sequential Monte Carlo Method for Optimization in Dynamical Systems Artículo de revista
En: Signal Processing, vol. 90, no 5, pp. 1609–1622, 2010.
Resumen | Enlaces | BibTeX | Etiquetas: Dynamic optimization, Nonlinear dynamics, Nonlinear tracking, Sequential Monte Carlo, Stochastic optimization
@article{Zoubir2010,
title = {Analysis of a Sequential Monte Carlo Method for Optimization in Dynamical Systems},
author = {A Zoubir and M Viberg and B Yang and Joaquin Miguez},
url = {http://www.sciencedirect.com/science/article/pii/S0165168409004708},
year = {2010},
date = {2010-01-01},
journal = {Signal Processing},
volume = {90},
number = {5},
pages = {1609--1622},
abstract = {We investigate a recently proposed sequential Monte Carlo methodology for recursively tracking the minima of a cost function that evolves with time. These methods, subsequently referred to as sequential Monte Carlo minimization (SMCM) procedures, have an algorithmic structure similar to particle filters: they involve the generation of random paths in the space of the signal of interest (SoI), the stochastic selection of the fittest paths and the ranking of the survivors according to their cost. In this paper, we propose an extension of the original SMCM methodology (that makes it applicable to a broader class of cost functions) and introduce an asymptotic-convergence analysis. Our analytical results are based on simple induction arguments and show how the SoI-estimates computed by a SMCM algorithm converge, in probability, to a sequence of minimizers of the cost function. We illustrate these results by means of two computer simulation examples.},
keywords = {Dynamic optimization, Nonlinear dynamics, Nonlinear tracking, Sequential Monte Carlo, Stochastic optimization},
pubstate = {published},
tppubtype = {article}
}
2009
Miguez, Joaquin; Maiz, Cristina S; Djuric, Petar M; Crisan, Dan
Sequential Monte Carlo Optimization Using Artificial State-Space Models Proceedings Article
En: 2009 IEEE 13th Digital Signal Processing Workshop and 5th IEEE Signal Processing Education Workshop, pp. 268–273, IEEE, Marco Island, FL, 2009.
Resumen | Enlaces | BibTeX | Etiquetas: Acceleration, Cost function, Design optimization, discrete-time dynamical system, Educational institutions, Mathematics, maximum a posteriori estimate, maximum likelihood estimation, minimisation, Monte Carlo methods, Optimization methods, Probability distribution, sequential Monte Carlo optimization, Sequential optimization, Signal design, State-space methods, state-space model, Stochastic optimization
@inproceedings{Miguez2009,
title = {Sequential Monte Carlo Optimization Using Artificial State-Space Models},
author = {Joaquin Miguez and Cristina S Maiz and Petar M Djuric and Dan Crisan},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4785933},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE 13th Digital Signal Processing Workshop and 5th IEEE Signal Processing Education Workshop},
pages = {268--273},
publisher = {IEEE},
address = {Marco Island, FL},
abstract = {We introduce a method for sequential minimization of a certain class of (possibly non-convex) cost functions with respect to a high dimensional signal of interest. The proposed approach involves the transformation of the optimization problem into one of estimation in a discrete-time dynamical system. In particular, we describe a methodology for constructing an artificial state-space model which has the signal of interest as its unobserved dynamic state. The model is \"{a}dapted" to the cost function in the sense that the maximum a posteriori (MAP) estimate of the system state is also a global minimizer of the cost function. The advantage of the estimation framework is that we can draw from a pool of sequential Monte Carlo methods, for particle approximation of probability measures in dynamic systems, that enable the numerical computation of MAP estimates. We provide examples of how to apply the proposed methodology, including some illustrative simulation results.},
keywords = {Acceleration, Cost function, Design optimization, discrete-time dynamical system, Educational institutions, Mathematics, maximum a posteriori estimate, maximum likelihood estimation, minimisation, Monte Carlo methods, Optimization methods, Probability distribution, sequential Monte Carlo optimization, Sequential optimization, Signal design, State-space methods, state-space model, Stochastic optimization},
pubstate = {published},
tppubtype = {inproceedings}
}