### 2014

Miguez, Joaquin

On the uniform asymptotic convergence of a distributed particle filter Artículo en actas

En: 2014 IEEE 8th Sensor Array and Multichannel Signal Processing Workshop (SAM), pp. 241–244, IEEE, A Coruña, 2014, ISBN: 978-1-4799-1481-4.

Resumen | Enlaces | BibTeX | Etiquetas: ad hoc networks, Approximation algorithms, approximation errors, Approximation methods, classical convergence theorems, Convergence, convergence of numerical methods, distributed particle filter scheme, distributed signal processing algorithms, Monte Carlo methods, parallel computing systems, particle filtering (numerical methods), Signal processing, Signal processing algorithms, stability assumptions, uniform asymptotic convergence, Wireless Sensor Networks, WSNs

@inproceedings{Miguez2014,

title = {On the uniform asymptotic convergence of a distributed particle filter},

author = {Joaquin Miguez},

url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=6882385},

doi = {10.1109/SAM.2014.6882385},

isbn = {978-1-4799-1481-4},

year = {2014},

date = {2014-06-01},

booktitle = {2014 IEEE 8th Sensor Array and Multichannel Signal Processing Workshop (SAM)},

pages = {241--244},

publisher = {IEEE},

address = {A Coru\~{n}a},

abstract = {Distributed signal processing algorithms suitable for their implementation over wireless sensor networks (WSNs) and ad hoc networks with communications and computing capabilities have become a hot topic during the past years. One class of algorithms that have received special attention are particles filters. However, most distributed versions of this type of methods involve various heuristic or simplifying approximations and, as a consequence, classical convergence theorems for standard particle filters do not hold for their distributed counterparts. In this paper, we look into a distributed particle filter scheme that has been proposed for implementation in both parallel computing systems and WSNs, and prove that, under certain stability assumptions regarding the physical system of interest, its asymptotic convergence is guaranteed. Moreover, we show that convergence is attained uniformly over time. This means that approximation errors can be kept bounded for an arbitrarily long period of time without having to progressively increase the computational effort.},

keywords = {ad hoc networks, Approximation algorithms, approximation errors, Approximation methods, classical convergence theorems, Convergence, convergence of numerical methods, distributed particle filter scheme, distributed signal processing algorithms, Monte Carlo methods, parallel computing systems, particle filtering (numerical methods), Signal processing, Signal processing algorithms, stability assumptions, uniform asymptotic convergence, Wireless Sensor Networks, WSNs},

pubstate = {published},

tppubtype = {inproceedings}

}

### 2008

Perez-Cruz, Fernando

Kullback-Leibler Divergence Estimation of Continuous Distributions Artículo en actas

En: 2008 IEEE International Symposium on Information Theory, pp. 1666–1670, IEEE, Toronto, 2008, ISBN: 978-1-4244-2256-2.

Resumen | Enlaces | BibTeX | Etiquetas: Convergence, density estimation, Density measurement, Entropy, Frequency estimation, H infinity control, information theory, k-nearest-neighbour density estimation, Kullback-Leibler divergence estimation, Machine learning, Mutual information, neuroscience, Random variables, statistical distributions, waiting-times distributions

@inproceedings{Perez-Cruz2008,

title = {Kullback-Leibler Divergence Estimation of Continuous Distributions},

author = {Fernando Perez-Cruz},

url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4595271},

isbn = {978-1-4244-2256-2},

year = {2008},

date = {2008-01-01},

booktitle = {2008 IEEE International Symposium on Information Theory},

pages = {1666--1670},

publisher = {IEEE},

address = {Toronto},

abstract = {We present a method for estimating the KL divergence between continuous densities and we prove it converges almost surely. Divergence estimation is typically solved estimating the densities first. Our main result shows this intermediate step is unnecessary and that the divergence can be either estimated using the empirical cdf or k-nearest-neighbour density estimation, which does not converge to the true measure for finite k. The convergence proof is based on describing the statistics of our estimator using waiting-times distributions, as the exponential or Erlang. We illustrate the proposed estimators and show how they compare to existing methods based on density estimation, and we also outline how our divergence estimators can be used for solving the two-sample problem.},

keywords = {Convergence, density estimation, Density measurement, Entropy, Frequency estimation, H infinity control, information theory, k-nearest-neighbour density estimation, Kullback-Leibler divergence estimation, Machine learning, Mutual information, neuroscience, Random variables, statistical distributions, waiting-times distributions},

pubstate = {published},

tppubtype = {inproceedings}

}