2014
Crisan, Dan; Miguez, Joaquin
Particle-Kernel Estimation of the Filter Density in State-Space Models Artículo de revista
En: Bernoulli, vol. (to appear, 2014.
Resumen | Enlaces | BibTeX | Etiquetas: density estimation, Markov systems., Models, Sequential Monte Carlo, state-space, stochastic filtering
@article{Crisan2014bb,
title = {Particle-Kernel Estimation of the Filter Density in State-Space Models},
author = {Dan Crisan and Joaquin Miguez},
url = {http://www.tsc.uc3m.es/~jmiguez/papers/P43_2014_Particle-Kernel Estimation of the Filter Density in State-Space Models.pdf
http://www.bernoulli-society.org/index.php/publications/bernoulli-journal/bernoulli-journal-papers},
year = {2014},
date = {2014-01-01},
journal = {Bernoulli},
volume = {(to appear},
abstract = {Sequential Monte Carlo (SMC) methods, also known as particle filters, are simulation-based recursive algorithms for the approximation of the a posteriori probability measures generated by state-space dynamical models. At any given time t, a SMC method produces a set of samples over the state space of the system of interest (often termed “particles”) that is used to build a discrete and random approximation of the posterior probability distribution of the state variables, conditional on a sequence of available observations. One potential application of the methodology is the estimation of the densities associated to the sequence of a posteriori distributions. While practitioners have rather freely applied such density approximations in the past, the issue has received less attention from a theoretical perspective. In this paper, we address the problem of constructing kernel-based estimates of the posterior probability density function and its derivatives, and obtain asymptotic convergence results for the estimation errors. In particular, we find convergence rates for the approximation errors that hold uniformly on the state space and guarantee that the error vanishes almost surely as the number of particles in the filter grows. Based on this uniform convergence result, we first show how to build continuous measures that converge almost surely (with known rate) toward the posterior measure and then address a few applications. The latter include maximum a posteriori estimation of the system state using the approximate derivatives of the posterior density and the approximation of functionals of it, e.g., Shannon’s entropy.},
keywords = {density estimation, Markov systems., Models, Sequential Monte Carlo, state-space, stochastic filtering},
pubstate = {published},
tppubtype = {article}
}
2012
Oquendo, Maria A; Baca-García, Enrique; Artés-Rodríguez, Antonio; Perez-Cruz, Fernando; Galfalvy, H C; Blasco-Fontecilla, Hilario; Madigan, D; Duan, N
Machine Learning and Data Mining: Strategies for Hypothesis Generation Artículo de revista
En: Molecular psychiatry, vol. 17, no. 10, pp. 956–959, 2012, ISSN: 1476-5578.
Resumen | Enlaces | BibTeX | Etiquetas: Artificial Intelligence, Biological, Data Mining, Humans, Mental Disorders, Mental Disorders: diagnosis, Mental Disorders: therapy, Models
@article{Oquendo2012,
title = {Machine Learning and Data Mining: Strategies for Hypothesis Generation},
author = {Maria A Oquendo and Enrique Baca-Garc\'{i}a and Antonio Art\'{e}s-Rodr\'{i}guez and Fernando Perez-Cruz and H C Galfalvy and Hilario Blasco-Fontecilla and D Madigan and N Duan},
url = {http://www.ncbi.nlm.nih.gov/pubmed/22230882},
issn = {1476-5578},
year = {2012},
date = {2012-01-01},
journal = {Molecular psychiatry},
volume = {17},
number = {10},
pages = {956--959},
abstract = {Strategies for generating knowledge in medicine have included observation of associations in clinical or research settings and more recently, development of pathophysiological models based on molecular biology. Although critically important, they limit hypothesis generation to an incremental pace. Machine learning and data mining are alternative approaches to identifying new vistas to pursue, as is already evident in the literature. In concert with these analytic strategies, novel approaches to data collection can enhance the hypothesis pipeline as well. In data farming, data are obtained in an \'{o}rganic' way, in the sense that it is entered by patients themselves and available for harvesting. In contrast, in evidence farming (EF), it is the provider who enters medical data about individual patients. EF differs from regular electronic medical record systems because frontline providers can use it to learn from their own past experience. In addition to the possibility of generating large databases with farming approaches, it is likely that we can further harness the power of large data sets collected using either farming or more standard techniques through implementation of data-mining and machine-learning strategies. Exploiting large databases to develop new hypotheses regarding neurobiological and genetic underpinnings of psychiatric illness is useful in itself, but also affords the opportunity to identify novel mechanisms to be targeted in drug discovery and development.},
keywords = {Artificial Intelligence, Biological, Data Mining, Humans, Mental Disorders, Mental Disorders: diagnosis, Mental Disorders: therapy, Models},
pubstate = {published},
tppubtype = {article}
}