2014
Crisan, Dan; Miguez, Joaquin
Particle-Kernel Estimation of the Filter Density in State-Space Models Artículo de revista
En: Bernoulli, vol. (to appear, 2014.
Resumen | Enlaces | BibTeX | Etiquetas: density estimation, Markov systems., Models, Sequential Monte Carlo, state-space, stochastic filtering
@article{Crisan2014bb,
title = {Particle-Kernel Estimation of the Filter Density in State-Space Models},
author = {Dan Crisan and Joaquin Miguez},
url = {http://www.tsc.uc3m.es/~jmiguez/papers/P43_2014_Particle-Kernel Estimation of the Filter Density in State-Space Models.pdf
http://www.bernoulli-society.org/index.php/publications/bernoulli-journal/bernoulli-journal-papers},
year = {2014},
date = {2014-01-01},
journal = {Bernoulli},
volume = {(to appear},
abstract = {Sequential Monte Carlo (SMC) methods, also known as particle filters, are simulation-based recursive algorithms for the approximation of the a posteriori probability measures generated by state-space dynamical models. At any given time t, a SMC method produces a set of samples over the state space of the system of interest (often termed “particles”) that is used to build a discrete and random approximation of the posterior probability distribution of the state variables, conditional on a sequence of available observations. One potential application of the methodology is the estimation of the densities associated to the sequence of a posteriori distributions. While practitioners have rather freely applied such density approximations in the past, the issue has received less attention from a theoretical perspective. In this paper, we address the problem of constructing kernel-based estimates of the posterior probability density function and its derivatives, and obtain asymptotic convergence results for the estimation errors. In particular, we find convergence rates for the approximation errors that hold uniformly on the state space and guarantee that the error vanishes almost surely as the number of particles in the filter grows. Based on this uniform convergence result, we first show how to build continuous measures that converge almost surely (with known rate) toward the posterior measure and then address a few applications. The latter include maximum a posteriori estimation of the system state using the approximate derivatives of the posterior density and the approximation of functionals of it, e.g., Shannon’s entropy.},
keywords = {density estimation, Markov systems., Models, Sequential Monte Carlo, state-space, stochastic filtering},
pubstate = {published},
tppubtype = {article}
}
2008
Perez-Cruz, Fernando
Kullback-Leibler Divergence Estimation of Continuous Distributions Proceedings Article
En: 2008 IEEE International Symposium on Information Theory, pp. 1666–1670, IEEE, Toronto, 2008, ISBN: 978-1-4244-2256-2.
Resumen | Enlaces | BibTeX | Etiquetas: Convergence, density estimation, Density measurement, Entropy, Frequency estimation, H infinity control, information theory, k-nearest-neighbour density estimation, Kullback-Leibler divergence estimation, Machine learning, Mutual information, neuroscience, Random variables, statistical distributions, waiting-times distributions
@inproceedings{Perez-Cruz2008,
title = {Kullback-Leibler Divergence Estimation of Continuous Distributions},
author = {Fernando Perez-Cruz},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4595271},
isbn = {978-1-4244-2256-2},
year = {2008},
date = {2008-01-01},
booktitle = {2008 IEEE International Symposium on Information Theory},
pages = {1666--1670},
publisher = {IEEE},
address = {Toronto},
abstract = {We present a method for estimating the KL divergence between continuous densities and we prove it converges almost surely. Divergence estimation is typically solved estimating the densities first. Our main result shows this intermediate step is unnecessary and that the divergence can be either estimated using the empirical cdf or k-nearest-neighbour density estimation, which does not converge to the true measure for finite k. The convergence proof is based on describing the statistics of our estimator using waiting-times distributions, as the exponential or Erlang. We illustrate the proposed estimators and show how they compare to existing methods based on density estimation, and we also outline how our divergence estimators can be used for solving the two-sample problem.},
keywords = {Convergence, density estimation, Density measurement, Entropy, Frequency estimation, H infinity control, information theory, k-nearest-neighbour density estimation, Kullback-Leibler divergence estimation, Machine learning, Mutual information, neuroscience, Random variables, statistical distributions, waiting-times distributions},
pubstate = {published},
tppubtype = {inproceedings}
}