2012
Salamanca, Luis; Murillo-Fuentes, Juan Jose; Olmos, Pablo M; Perez-Cruz, Fernando
Tree-Structured Expectation Propagation for LDPC Decoding over the AWGN Channel Proceedings Article
En: 2012 IEEE International Workshop on Machine Learning for Signal Processing, pp. 1–6, IEEE, Santander, 2012, ISSN: 1551-2541.
Resumen | Enlaces | BibTeX | Etiquetas: additive white Gaussian noise channel, Approximation algorithms, Approximation methods, approximation theory, AWGN channel, AWGN channels, belief propagation solution, Bit error rate, Decoding, error floor reduction, finite-length regime, Gain, Joints, LDPC decoding, low-density parity-check decoding, pairwise marginal constraint, parity check codes, TEP decoder, tree-like approximation, tree-structured expectation propagation, trees (mathematics)
@inproceedings{Salamanca2012,
title = {Tree-Structured Expectation Propagation for LDPC Decoding over the AWGN Channel},
author = {Luis Salamanca and Juan Jose Murillo-Fuentes and Pablo M Olmos and Fernando Perez-Cruz},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6349716},
issn = {1551-2541},
year = {2012},
date = {2012-01-01},
booktitle = {2012 IEEE International Workshop on Machine Learning for Signal Processing},
pages = {1--6},
publisher = {IEEE},
address = {Santander},
abstract = {In this paper, we propose the tree-structured expectation propagation (TEP) algorithm for low-density parity-check (LDPC) decoding over the additive white Gaussian noise (AWGN) channel. By imposing a tree-like approximation over the graphical model of the code, this algorithm introduces pairwise marginal constraints over pairs of variables, which provide joint information of the variables related. Thanks to this, the proposed TEP decoder improves the performance of the standard belief propagation (BP) solution. An efficient way of constructing the tree-like structure is also described. The simulation results illustrate the TEP decoder gain in the finite-length regime, compared to the standard BP solution. For code lengths shorter than n = 512, the gain in the waterfall region achieves up to 0.25 dB. We also notice a remarkable reduction of the error floor.},
keywords = {additive white Gaussian noise channel, Approximation algorithms, Approximation methods, approximation theory, AWGN channel, AWGN channels, belief propagation solution, Bit error rate, Decoding, error floor reduction, finite-length regime, Gain, Joints, LDPC decoding, low-density parity-check decoding, pairwise marginal constraint, parity check codes, TEP decoder, tree-like approximation, tree-structured expectation propagation, trees (mathematics)},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhong, Jingshan; Dauwels, Justin; Vazquez, Manuel A; Waller, Laura
Low-Complexity Noise-Resilient Recovery of Phase and Amplitude from Defocused Intensity Images Proceedings Article
En: Imaging and Applied Optics Technical Papers, pp. CTu4B.1, OSA, Washington, D.C., 2012, ISBN: 1-55752-947-7.
Resumen | Enlaces | BibTeX | Etiquetas: Image reconstruction techniques, Phase retrieval, Wave propagation
@inproceedings{Zhong2012,
title = {Low-Complexity Noise-Resilient Recovery of Phase and Amplitude from Defocused Intensity Images},
author = {Jingshan Zhong and Justin Dauwels and Manuel A Vazquez and Laura Waller},
url = {http://www.opticsinfobase.org/abstract.cfm?URI=COSI-2012-CTu4B.1},
isbn = {1-55752-947-7},
year = {2012},
date = {2012-01-01},
booktitle = {Imaging and Applied Optics Technical Papers},
pages = {CTu4B.1},
publisher = {OSA},
address = {Washington, D.C.},
abstract = {A low-complexity augmented Kalman filter is proposed to efficiently recover the phase from a series of noisy intensity images. The proposed method is robust to noise, has low complexity, and may enable real-time phase recovery.},
keywords = {Image reconstruction techniques, Phase retrieval, Wave propagation},
pubstate = {published},
tppubtype = {inproceedings}
}
Koch, Tobias; Martinez, Alfonso; i Fabregas, Albert Guillen
The Capacity Loss of Dense Constellations Proceedings Article
En: 2012 IEEE International Symposium on Information Theory Proceedings, pp. 572–576, IEEE, Cambridge, MA, 2012, ISSN: 2157-8095.
Resumen | Enlaces | BibTeX | Etiquetas: capacity loss, channel capacity, Constellation diagram, dense constellations, Entropy, general complex-valued additive-noise channels, high signal-to-noise ratio, loss 1.53 dB, power loss, Quadrature amplitude modulation, Random variables, signal constellations, Signal processing, Signal to noise ratio, square signal constellations, Upper bound
@inproceedings{Koch2012,
title = {The Capacity Loss of Dense Constellations},
author = {Tobias Koch and Alfonso Martinez and Albert Guillen i Fabregas},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6283482},
issn = {2157-8095},
year = {2012},
date = {2012-01-01},
booktitle = {2012 IEEE International Symposium on Information Theory Proceedings},
pages = {572--576},
publisher = {IEEE},
address = {Cambridge, MA},
abstract = {We determine the loss in capacity incurred by using signal constellations with a bounded support over general complex-valued additive-noise channels for suitably high signal-to-noise ratio. Our expression for the capacity loss recovers the power loss of 1.53 dB for square signal constellations.},
keywords = {capacity loss, channel capacity, Constellation diagram, dense constellations, Entropy, general complex-valued additive-noise channels, high signal-to-noise ratio, loss 1.53 dB, power loss, Quadrature amplitude modulation, Random variables, signal constellations, Signal processing, Signal to noise ratio, square signal constellations, Upper bound},
pubstate = {published},
tppubtype = {inproceedings}
}
O'Mahony, Niamh; Perez-Cruz, Fernando
A novel Sequential Bayesian Approach to GPS Acquisition Proceedings Article
En: 2012 3rd International Workshop on Cognitive Information Processing (CIP), pp. 1–6, IEEE, Baiona, 2012, ISBN: 978-1-4673-1878-5.
Resumen | Enlaces | BibTeX | Etiquetas: Bayes methods, coarse synchronization, Correlators, data acquisition, Delay, Doppler effect, Global Positioning System, GPS acquisition, GPS signal parameters, learning (artificial intelligence), online learning algorithm, Receivers, Satellites, sequential Bayesian approach, signal acquisition, signal detection, Synchronization
@inproceedings{O'Mahony2012,
title = {A novel Sequential Bayesian Approach to GPS Acquisition},
author = {Niamh O'Mahony and Fernando Perez-Cruz},
url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=6232921},
isbn = {978-1-4673-1878-5},
year = {2012},
date = {2012-01-01},
booktitle = {2012 3rd International Workshop on Cognitive Information Processing (CIP)},
pages = {1--6},
publisher = {IEEE},
address = {Baiona},
abstract = {In this work, a novel online learning algorithm is presented for the synchronization of Global Positioning System (GPS) signal parameters at the acquisition, or coarse synchronization, stage. The algorithm is based on a Bayesian approach, which has, to date, not been exploited for the acquisition problem. Simulated results are presented to illustrate the algorithm performance, in terms of accuracy and acquisition time, along with results from the acquisition of signals from live GPS satellites using both the new algorithm and a state-of-the-art approach for comparison.},
keywords = {Bayes methods, coarse synchronization, Correlators, data acquisition, Delay, Doppler effect, Global Positioning System, GPS acquisition, GPS signal parameters, learning (artificial intelligence), online learning algorithm, Receivers, Satellites, sequential Bayesian approach, signal acquisition, signal detection, Synchronization},
pubstate = {published},
tppubtype = {inproceedings}
}
Perez-Cruz, Fernando
New Tools to Generate Predictive Models for Attempts Suicide Proceedings Article
En: National Conference on Psychiatry, Bilbao, 2012.
BibTeX | Etiquetas:
@inproceedings{Perez-Cruz2012,
title = {New Tools to Generate Predictive Models for Attempts Suicide},
author = {Fernando Perez-Cruz},
year = {2012},
date = {2012-01-01},
booktitle = {National Conference on Psychiatry},
address = {Bilbao},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Olmos, Pablo M; Perez-Cruz, Fernando; Salamanca, Luis; Murillo-Fuentes, Juan Jose
Finite-Length Performance of Spatially-Coupled LDPC Codes under TEP Decoding Proceedings Article
En: 2012 IEEE Information Theory Workshop, pp. 1–6, IEEE, Lausanne, 2012, ISBN: 978-1-4673-0223-4.
Enlaces | BibTeX | Etiquetas: asymptotic limit, belief propagation decoding, Complexity theory, convolutional codes, convolutional LDPC codes, Decoding, decoding latency, decoding threshold, erasure channel, Error analysis, error rates, finite-length analysis, finite-length performance, maximum a posteriori threshold, maximum likelihood estimation, parity check codes, regular sparse codes, spatially-coupled LDPC codes, TEP decoding, tree-structured expectation propagation, underlying regular code, very large code length, window-sliding scheme
@inproceedings{Olmos2012,
title = {Finite-Length Performance of Spatially-Coupled LDPC Codes under TEP Decoding},
author = {Pablo M Olmos and Fernando Perez-Cruz and Luis Salamanca and Juan Jose Murillo-Fuentes},
url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=6404722},
isbn = {978-1-4673-0223-4},
year = {2012},
date = {2012-01-01},
booktitle = {2012 IEEE Information Theory Workshop},
pages = {1--6},
publisher = {IEEE},
address = {Lausanne},
keywords = {asymptotic limit, belief propagation decoding, Complexity theory, convolutional codes, convolutional LDPC codes, Decoding, decoding latency, decoding threshold, erasure channel, Error analysis, error rates, finite-length analysis, finite-length performance, maximum a posteriori threshold, maximum likelihood estimation, parity check codes, regular sparse codes, spatially-coupled LDPC codes, TEP decoding, tree-structured expectation propagation, underlying regular code, very large code length, window-sliding scheme},
pubstate = {published},
tppubtype = {inproceedings}
}
Read, Jesse; Bifet, Albert; Pfahringer, Bernhard; Holmes, Geoff
Advances in Intelligent Data Analysis XI Proceedings Article
En: Hollmén, Jaakko; Klawonn, Frank; Tucker, Allan (Ed.): Proc. of The Eleventh International Symposium on Intelligent Data Analysis (IDA 2012), Springer Berlin Heidelberg, Helsinki, 2012, ISBN: 978-3-642-34155-7.
Resumen | Enlaces | BibTeX | Etiquetas:
@inproceedings{Read2012,
title = {Advances in Intelligent Data Analysis XI},
author = {Jesse Read and Albert Bifet and Bernhard Pfahringer and Geoff Holmes},
editor = {Jaakko Hollm\'{e}n and Frank Klawonn and Allan Tucker},
url = {http://www.springerlink.com/index/10.1007/978-3-642-34156-4},
isbn = {978-3-642-34155-7},
year = {2012},
date = {2012-01-01},
booktitle = {Proc. of The Eleventh International Symposium on Intelligent Data Analysis (IDA 2012)},
publisher = {Springer Berlin Heidelberg},
address = {Helsinki},
series = {Lecture Notes in Computer Science},
abstract = {Many real world problems involve the challenging context of data streams, where classifiers must be incremental: able to learn from a theoretically-infinite stream of examples using limited time and memory, while being able to predict at any point. Two approaches dominate the literature: batch-incremental methods that gather examples in batches to train models; and instance-incremental methods that learn from each example as it arrives. Typically, papers in the literature choose one of these approaches, but provide insufficient evidence or references to justify their choice. We provide a first in-depth analysis comparing both approaches, including how they adapt to concept drift, and an extensive empirical study to compare several different versions of each approach. Our results reveal the respective advantages and disadvantages of the methods, which we discuss in detail.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Florentino-Liaño, Blanca; O'Mahony, Niamh; Artés-Rodríguez, Antonio
Hierarchical Dynamic Model for Human Daily Activity Recognition Proceedings Article
En: BIOSIGNALS 2012 (BIOSTEC), Vilamoura, 2012.
Resumen | Enlaces | BibTeX | Etiquetas:
@inproceedings{Florentino-Liano2012c,
title = {Hierarchical Dynamic Model for Human Daily Activity Recognition},
author = {Blanca Florentino-Lia\~{n}o and Niamh O'Mahony and Antonio Art\'{e}s-Rodr\'{i}guez},
url = {http://www.biosignals.biostec.org/Abstracts/2012/BIOSIGNALS_2012_Abstracts.htm},
year = {2012},
date = {2012-01-01},
booktitle = {BIOSIGNALS 2012 (BIOSTEC)},
volume = {85},
address = {Vilamoura},
abstract = {This work deals with the task of human daily activity recognition using miniature inertial sensors. The proposed method is based on the development of a hierarchical dynamic model, incorporating both inter-activity and intra-activity dynamics, thereby exploiting the inherently dynamic nature of the problem to aid the classification task. The method uses raw acceleration and angular velocity signals, directly recorded by inertial sensors, bypassing commonly used feature extraction and selection techniques and, thus, keeping all information regarding the dynamics of the signals. Classification results show a competitive performance compared to state-of-the-art methods.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Taborda, Camilo G; Perez-Cruz, Fernando
Derivative of the Relative Entropy over the Poisson and Binomial Channel Proceedings Article
En: 2012 IEEE Information Theory Workshop, pp. 386–390, IEEE, Lausanne, 2012, ISBN: 978-1-4673-0223-4.
Resumen | Enlaces | BibTeX | Etiquetas: binomial channel, binomial distribution, Channel estimation, conditional distribution, Entropy, Estimation, function expectation, Mutual information, mutual information concept, Poisson channel, Poisson distribution, Random variables, relative entropy derivative, similar expression
@inproceedings{Taborda2012,
title = {Derivative of the Relative Entropy over the Poisson and Binomial Channel},
author = {Camilo G Taborda and Fernando Perez-Cruz},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6404699},
isbn = {978-1-4673-0223-4},
year = {2012},
date = {2012-01-01},
booktitle = {2012 IEEE Information Theory Workshop},
pages = {386--390},
publisher = {IEEE},
address = {Lausanne},
abstract = {In this paper it is found that, regardless of the statistics of the input, the derivative of the relative entropy over the Binomial channel can be seen as the expectation of a function that has as argument the mean of the conditional distribution that models the channel. Based on this relationship we formulate a similar expression for the mutual information concept. In addition to this, using the connection between the Binomial and Poisson distribution we develop similar results for the Poisson channel. Novelty of the results presented here lies on the fact that, expressions obtained can be applied to a wide range of scenarios.},
keywords = {binomial channel, binomial distribution, Channel estimation, conditional distribution, Entropy, Estimation, function expectation, Mutual information, mutual information concept, Poisson channel, Poisson distribution, Random variables, relative entropy derivative, similar expression},
pubstate = {published},
tppubtype = {inproceedings}
}
Florentino-Liaño, Blanca; O'Mahony, Niamh; Artés-Rodríguez, Antonio
Long Term Human Activity Recognition with Automatic Orientation Estimation Proceedings Article
En: 2012 IEEE International Workshop on Machine Learning for Signal Processing, pp. 1–6, IEEE, Santander, 2012, ISSN: 1551-2541.
Resumen | Enlaces | BibTeX | Etiquetas: Acceleration, Activity recognition, automatic orientation estimation, biomedical equipment, Estimation, Gravity, Hidden Markov models, human daily activity recognition, Humans, Legged locomotion, long term human activity recognition, medical signal processing, object recognition, orientation estimation, sensors, single miniature inertial sensor, time intervals, Vectors, virtual sensor orientation, wearable sensors
@inproceedings{Florentino-Liano2012b,
title = {Long Term Human Activity Recognition with Automatic Orientation Estimation},
author = {Blanca Florentino-Lia\~{n}o and Niamh O'Mahony and Antonio Art\'{e}s-Rodr\'{i}guez},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6349789},
issn = {1551-2541},
year = {2012},
date = {2012-01-01},
booktitle = {2012 IEEE International Workshop on Machine Learning for Signal Processing},
pages = {1--6},
publisher = {IEEE},
address = {Santander},
abstract = {This work deals with the elimination of sensitivity to sensor orientation in the task of human daily activity recognition using a single miniature inertial sensor. The proposed method detects time intervals of walking, automatically estimating the orientation in these intervals and transforming the observed signals to a “virtual” sensor orientation. Classification results show that excellent performance, in terms of both precision and recall (up to 100%), is achieved, for long-term recordings in real-life settings.},
keywords = {Acceleration, Activity recognition, automatic orientation estimation, biomedical equipment, Estimation, Gravity, Hidden Markov models, human daily activity recognition, Humans, Legged locomotion, long term human activity recognition, medical signal processing, object recognition, orientation estimation, sensors, single miniature inertial sensor, time intervals, Vectors, virtual sensor orientation, wearable sensors},
pubstate = {published},
tppubtype = {inproceedings}
}
Durisi, Giuseppe; Koch, Tobias; Polyanskiy, Yury
Diversity Versus Channel Knowledge at Finite Block-Length Proceedings Article
En: 2012 IEEE Information Theory Workshop, pp. 572–576, IEEE, Lausanne, 2012, ISBN: 978-1-4673-0223-4.
Resumen | Enlaces | BibTeX | Etiquetas: Approximation methods, block error probability, channel coherence time, Channel estimation, channel knowledge, Coherence, diversity, diversity reception, error statistics, Fading, finite block-length, maximal achievable rate, noncoherent setting, Rayleigh block-fading channels, Rayleigh channels, Receivers, Signal to noise ratio, Upper bound
@inproceedings{Durisi2012,
title = {Diversity Versus Channel Knowledge at Finite Block-Length},
author = {Giuseppe Durisi and Tobias Koch and Yury Polyanskiy},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6404740},
isbn = {978-1-4673-0223-4},
year = {2012},
date = {2012-01-01},
booktitle = {2012 IEEE Information Theory Workshop},
pages = {572--576},
publisher = {IEEE},
address = {Lausanne},
abstract = {We study the maximal achievable rate R*(n, ∈) for a given block-length n and block error probability o over Rayleigh block-fading channels in the noncoherent setting and in the finite block-length regime. Our results show that for a given block-length and error probability, R*(n, ∈) is not monotonic in the channel's coherence time, but there exists a rate maximizing coherence time that optimally trades between diversity and cost of estimating the channel.},
keywords = {Approximation methods, block error probability, channel coherence time, Channel estimation, channel knowledge, Coherence, diversity, diversity reception, error statistics, Fading, finite block-length, maximal achievable rate, noncoherent setting, Rayleigh block-fading channels, Rayleigh channels, Receivers, Signal to noise ratio, Upper bound},
pubstate = {published},
tppubtype = {inproceedings}
}
Florentino-Liaño, Blanca; O'Mahony, Niamh; Artés-Rodríguez, Antonio
Human Activity Recognition Using Inertial Sensors with Invariance to Sensor Orientation Proceedings Article
En: 2012 3rd International Workshop on Cognitive Information Processing (CIP), pp. 1–6, IEEE, Baiona, 2012, ISBN: 978-1-4673-1878-5.
Resumen | Enlaces | BibTeX | Etiquetas: Acceleration, Accelerometers, biomechanics, classification algorithm, Gyroscopes, Hidden Markov models, human daily activity recognition, inertial measurement unit, Legged locomotion, miniature inertial sensors, raw sensor signal classification, sensor orientation invariance, sensor orientation sensitivity, sensor placement, sensor position sensitivity, sensors, signal classification, signal transformation, Training, triaxial accelerometer, triaxial gyroscope, virtual sensor orientation
@inproceedings{Florentino-Liano2012a,
title = {Human Activity Recognition Using Inertial Sensors with Invariance to Sensor Orientation},
author = {Blanca Florentino-Lia\~{n}o and Niamh O'Mahony and Antonio Art\'{e}s-Rodr\'{i}guez},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6232914},
isbn = {978-1-4673-1878-5},
year = {2012},
date = {2012-01-01},
booktitle = {2012 3rd International Workshop on Cognitive Information Processing (CIP)},
pages = {1--6},
publisher = {IEEE},
address = {Baiona},
abstract = {This work deals with the task of human daily activity recognition using miniature inertial sensors. The proposed method reduces sensitivity to the position and orientation of the sensor on the body, which is inherent in traditional methods, by transforming the observed signals to a “virtual” sensor orientation. By means of this computationally low-cost transform, the inputs to the classification algorithm are made invariant to sensor orientation, despite the signals being recorded from arbitrary sensor placements. Classification results show that improved performance, in terms of both precision and recall, is achieved with the transformed signals, relative to classification using raw sensor signals, and the algorithm performs competitively compared to the state-of-the-art. Activity recognition using data from a sensor with completely unknown orientation is shown to perform very well over a long term recording in a real-life setting.},
keywords = {Acceleration, Accelerometers, biomechanics, classification algorithm, Gyroscopes, Hidden Markov models, human daily activity recognition, inertial measurement unit, Legged locomotion, miniature inertial sensors, raw sensor signal classification, sensor orientation invariance, sensor orientation sensitivity, sensor placement, sensor position sensitivity, sensors, signal classification, signal transformation, Training, triaxial accelerometer, triaxial gyroscope, virtual sensor orientation},
pubstate = {published},
tppubtype = {inproceedings}
}
Garcia-Moreno, Pablo; Artés-Rodríguez, Antonio; Hansen, Lars Kai
A Hold-out Method to Correct PCA Variance Inflation Proceedings Article
En: 2012 3rd International Workshop on Cognitive Information Processing (CIP), pp. 1–6, IEEE, Baiona, 2012, ISBN: 978-1-4673-1878-5.
Resumen | Enlaces | BibTeX | Etiquetas: Approximation methods, classification scenario, computational complexity, computational cost, Computational efficiency, correction method, hold-out method, hold-out procedure, leave-one-out procedure, LOO method, LOO procedure, Mathematical model, PCA algorithm, PCA variance inflation, Principal component analysis, singular value decomposition, Standards, SVD, Training
@inproceedings{Garcia-Moreno2012,
title = {A Hold-out Method to Correct PCA Variance Inflation},
author = {Pablo Garcia-Moreno and Antonio Art\'{e}s-Rodr\'{i}guez and Lars Kai Hansen},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6232926},
isbn = {978-1-4673-1878-5},
year = {2012},
date = {2012-01-01},
booktitle = {2012 3rd International Workshop on Cognitive Information Processing (CIP)},
pages = {1--6},
publisher = {IEEE},
address = {Baiona},
abstract = {In this paper we analyze the problem of variance inflation experienced by the PCA algorithm when working in an ill-posed scenario where the dimensionality of the training set is larger than its sample size. In an earlier article a correction method based on a Leave-One-Out (LOO) procedure was introduced. We propose a Hold-out procedure whose computational cost is lower and, unlike the LOO method, the number of SVD's does not scale with the sample size. We analyze its properties from a theoretical and empirical point of view. Finally we apply it to a real classification scenario.},
keywords = {Approximation methods, classification scenario, computational complexity, computational cost, Computational efficiency, correction method, hold-out method, hold-out procedure, leave-one-out procedure, LOO method, LOO procedure, Mathematical model, PCA algorithm, PCA variance inflation, Principal component analysis, singular value decomposition, Standards, SVD, Training},
pubstate = {published},
tppubtype = {inproceedings}
}
Montoya-Martinez, Jair; Artés-Rodríguez, Antonio; Hansen, Lars Kai; Pontil, Massimiliano
Structured Sparsity Regularization Approach to the EEG Inverse Problem Proceedings Article
En: 2012 3rd International Workshop on Cognitive Information Processing (CIP), pp. 1–6, IEEE, Baiona, 2012, ISBN: 978-1-4673-1878-5.
Resumen | Enlaces | BibTeX | Etiquetas: BES, brain electrical sources matrix, Brain modeling, EEG inverse problem, Electrodes, Electroencephalography, good convergence, Inverse problems, large nonsmooth convex problems, medical signal processing, optimisation, Optimization, proximal splitting optimization methods, Sparse matrices, spatio-temporal source space, structured sparsity regularization approach, undetermined ill-posed problem
@inproceedings{Montoya-Martinez2012,
title = {Structured Sparsity Regularization Approach to the EEG Inverse Problem},
author = {Jair Montoya-Martinez and Antonio Art\'{e}s-Rodr\'{i}guez and Lars Kai Hansen and Massimiliano Pontil},
url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=6232898},
isbn = {978-1-4673-1878-5},
year = {2012},
date = {2012-01-01},
booktitle = {2012 3rd International Workshop on Cognitive Information Processing (CIP)},
pages = {1--6},
publisher = {IEEE},
address = {Baiona},
abstract = {Localization of brain activity involves solving the EEG inverse problem, which is an undetermined ill-posed problem. We propose a novel approach consisting in estimating, using structured sparsity regularization techniques, the Brain Electrical Sources (BES) matrix directly in the spatio-temporal source space. We use proximal splitting optimization methods, which are efficient optimization techniques, with good convergence rates and with the ability to handle large nonsmooth convex problems, which is the typical scenario in the EEG inverse problem. We have evaluated our approach under a simulated scenario, consisting in estimating a synthetic BES matrix with 5124 sources. We report results using ℓ1 (LASSO), ℓ1/ℓ2 (Group LASSO) and ℓ1 + ℓ1/ℓ2 (Sparse Group LASSO) regularizers.},
keywords = {BES, brain electrical sources matrix, Brain modeling, EEG inverse problem, Electrodes, Electroencephalography, good convergence, Inverse problems, large nonsmooth convex problems, medical signal processing, optimisation, Optimization, proximal splitting optimization methods, Sparse matrices, spatio-temporal source space, structured sparsity regularization approach, undetermined ill-posed problem},
pubstate = {published},
tppubtype = {inproceedings}
}
Monzon, Sandra; Trigano, Tom; Luengo, David; Artés-Rodríguez, Antonio
Sparse Spectral Analysis of Atrial Fibrillation Electrograms. Proceedings Article
En: 2012 IEEE International Workshop on Machine Learning for Signal Processing, pp. 1–6, IEEE, Santander, 2012, ISSN: 1551-2541.
Resumen | Enlaces | BibTeX | Etiquetas: Algorithm design and analysis, atrial fibrillation, atrial fibrillation electrogram, biomedical signal processing, dominant frequency, Doped fiber amplifiers, electrocardiography, Harmonic analysis, Heart, heart disorder, Indexes, Mathematical model, medical signal processing, multiple foci, multiple uncoordinated activation foci, signal processing technique, sparse spectral analysis, sparsity-aware learning, sparsity-aware learning technique, spectral analysis, spike train
@inproceedings{Monzon2012,
title = {Sparse Spectral Analysis of Atrial Fibrillation Electrograms.},
author = {Sandra Monzon and Tom Trigano and David Luengo and Antonio Art\'{e}s-Rodr\'{i}guez},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6349721},
issn = {1551-2541},
year = {2012},
date = {2012-01-01},
booktitle = {2012 IEEE International Workshop on Machine Learning for Signal Processing},
pages = {1--6},
publisher = {IEEE},
address = {Santander},
abstract = {Atrial fibrillation (AF) is a common heart disorder. One of the most prominent hypothesis about its initiation and maintenance considers multiple uncoordinated activation foci inside the atrium. However, the implicit assumption behind all the signal processing techniques used for AF, such as dominant frequency and organization analysis, is the existence of a single regular component in the observed signals. In this paper we take into account the existence of multiple foci, performing a spectral analysis to detect their number and frequencies. In order to obtain a cleaner signal on which the spectral analysis can be performed, we introduce sparsity-aware learning techniques to infer the spike trains corresponding to the activations. The good performance of the proposed algorithm is demonstrated both on synthetic and real data.},
keywords = {Algorithm design and analysis, atrial fibrillation, atrial fibrillation electrogram, biomedical signal processing, dominant frequency, Doped fiber amplifiers, electrocardiography, Harmonic analysis, Heart, heart disorder, Indexes, Mathematical model, medical signal processing, multiple foci, multiple uncoordinated activation foci, signal processing technique, sparse spectral analysis, sparsity-aware learning, sparsity-aware learning technique, spectral analysis, spike train},
pubstate = {published},
tppubtype = {inproceedings}
}
Koblents, Eugenia; Miguez, Joaquin
Importance Sampling with Transformed Weights Proceedings Article
En: Data Assimilation Workshop, Oxford–Man Institute, Oxford, 2012.
@inproceedings{Koblents2012,
title = {Importance Sampling with Transformed Weights},
author = {Eugenia Koblents and Joaquin Miguez},
url = {http://www.oxford-man.ox.ac.uk/sites/default/files/events/Mon_24_JoaquinMiguez_06FINAL.pdf},
year = {2012},
date = {2012-01-01},
booktitle = {Data Assimilation Workshop, Oxford\textendashMan Institute},
address = {Oxford},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Olmos, Pablo M; Perez-Cruz, Fernando; Salamanca, Luis; Murillo-Fuentes, Juan Jose
Finite-Length Analysis of the TEP Decoder for LDPC Ensembles over the BEC Proceedings Article
En: 2012 IEEE International Symposium on Information Theory Proceedings, pp. 2346–2350, IEEE, Cambridge, MA, 2012, ISSN: 2157-8095.
Resumen | Enlaces | BibTeX | Etiquetas: Approximation methods, BEC, binary codes, binary erasure channel, Decoding, Error analysis, error probability, finite-length analysis, LDPC ensembles, low-density parity check ensembles, parity check codes, TEP decoder, Trajectory, tree-expectation propagation algorithm, waterfall region
@inproceedings{Olmos2012a,
title = {Finite-Length Analysis of the TEP Decoder for LDPC Ensembles over the BEC},
author = {Pablo M Olmos and Fernando Perez-Cruz and Luis Salamanca and Juan Jose Murillo-Fuentes},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6283932},
issn = {2157-8095},
year = {2012},
date = {2012-01-01},
booktitle = {2012 IEEE International Symposium on Information Theory Proceedings},
pages = {2346--2350},
publisher = {IEEE},
address = {Cambridge, MA},
abstract = {In this work, we analyze the finite-length performance of low-density parity check (LDPC) ensembles decoded over the binary erasure channel (BEC) using the tree-expectation propagation (TEP) algorithm. In a previous paper, we showed that the TEP improves the BP performance for decoding regular and irregular short LDPC codes, but the perspective was mainly empirical. In this work, given the degree-distribution of an LDPC ensemble, we explain and predict the range of code lengths for which the TEP improves the BP solution. In addition, for LDPC ensembles that present a single critical point, we propose a scaling law to accurately predict the performance in the waterfall region. These results are of critical importance to design practical LDPC codes for the TEP decoder.},
keywords = {Approximation methods, BEC, binary codes, binary erasure channel, Decoding, Error analysis, error probability, finite-length analysis, LDPC ensembles, low-density parity check ensembles, parity check codes, TEP decoder, Trajectory, tree-expectation propagation algorithm, waterfall region},
pubstate = {published},
tppubtype = {inproceedings}
}
Pastore, Adriano; Koch, Tobias; Fonollosa, Javier Rodriguez
Improved Capacity Lower Bounds for Fading Channels with Imperfect CSI Using Rate Splitting Proceedings Article
En: 2012 IEEE 27th Convention of Electrical and Electronics Engineers in Israel, pp. 1–5, IEEE, Eilat, 2012, ISBN: 978-1-4673-4681-8.
Resumen | Enlaces | BibTeX | Etiquetas: channel capacity, channel capacity lower bounds, conditional entropy, Decoding, Entropy, Fading, fading channels, Gaussian channel, Gaussian channels, Gaussian random variable, imperfect channel-state information, imperfect CSI, independent Gaussian variables, linear minimum mean-square error, mean square error methods, Medard lower bound, Mutual information, Random variables, rate splitting approach, Resource management, Upper bound, wireless communications
@inproceedings{Pastore2012,
title = {Improved Capacity Lower Bounds for Fading Channels with Imperfect CSI Using Rate Splitting},
author = {Adriano Pastore and Tobias Koch and Javier Rodriguez Fonollosa},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6377031},
isbn = {978-1-4673-4681-8},
year = {2012},
date = {2012-01-01},
booktitle = {2012 IEEE 27th Convention of Electrical and Electronics Engineers in Israel},
pages = {1--5},
publisher = {IEEE},
address = {Eilat},
abstract = {As shown by Medard (“The effect upon channel capacity in wireless communications of perfect and imperfect knowledge of the channel,” IEEE Trans. Inform. Theory, May 2000), the capacity of fading channels with imperfect channel-state information (CSI) can be lower-bounded by assuming a Gaussian channel input X, and by upper-bounding the conditional entropy h(XY, Ĥ), conditioned on the channel output Y and the CSI Ĥ, by the entropy of a Gaussian random variable with variance equal to the linear minimum mean-square error in estimating X from (Y, Ĥ). We demonstrate that, by using a rate-splitting approach, this lower bound can be sharpened: we show that by expressing the Gaussian input X as as the sum of two independent Gaussian variables X(1) and X(2), and by applying Medard's lower bound first to analyze the mutual information between X(1) and Y conditioned on Ĥ while treating X(2) as noise, and by applying the lower bound then to analyze the mutual information between X(2) and Y conditioned on (X(1), Ĥ), we obtain a lower bound on the capacity that is larger than Medard's lower bound.},
keywords = {channel capacity, channel capacity lower bounds, conditional entropy, Decoding, Entropy, Fading, fading channels, Gaussian channel, Gaussian channels, Gaussian random variable, imperfect channel-state information, imperfect CSI, independent Gaussian variables, linear minimum mean-square error, mean square error methods, Medard lower bound, Mutual information, Random variables, rate splitting approach, Resource management, Upper bound, wireless communications},
pubstate = {published},
tppubtype = {inproceedings}
}
Ruiz, Francisco J R; Valera, Isabel; Blanco, Carlos; Perez-Cruz, Fernando
Bayesian Nonparametric Modeling of Suicide Attempts Proceedings Article
En: Advances in Neural Information Processing Systems 25, Lake Tahoe, 2012.
Resumen | Enlaces | BibTeX | Etiquetas:
@inproceedings{Ruiz2012,
title = {Bayesian Nonparametric Modeling of Suicide Attempts},
author = {Francisco J R Ruiz and Isabel Valera and Carlos Blanco and Fernando Perez-Cruz},
url = {http://nips.cc/Conferences/2012/Program/event.php?ID=3582},
year = {2012},
date = {2012-01-01},
booktitle = {Advances in Neural Information Processing Systems 25},
address = {Lake Tahoe},
abstract = {The National Epidemiologic Survey on Alcohol and Related Conditions (NESARC) database contains a large amount of information, regarding the way of life, medical conditions, depression, etc., of a representative sample of the U.S. population. In the present paper, we are interested in seeking the hidden causes behind the suicide attempts, for which we propose to model the subjects using a nonparametric latent model based on the Indian Buffet Process (IBP). Due to the nature of the data, we need to adapt the observation model for discrete random variables. We propose a generative model in which the observations are drawn from a multinomial-logit distribution given the IBP matrix. The implementation of an efficient Gibbs sampler is accomplished using the Laplace approximation, which allows us to integrate out the weighting factors of the multinomial-logit likelihood model. Finally, the experiments over the NESARC database show that our model properly captures some of the hidden causes that model suicide attempts.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhong, Jingshan; Dauwels, Justin; Vazquez, Manuel A; Waller, Laura
Efficient Gaussian Inference Algorithms for Phase Imaging Proceedings Article
En: 2012 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 617–620, IEEE, Kyoto, 2012, ISSN: 1520-6149.
Resumen | Enlaces | BibTeX | Etiquetas: biomedical optical imaging, complex optical field, computational complexity, defocus distances, Fourier domain, Gaussian inference algorithms, image sequences, inference mechanisms, intensity image sequence, iterative Kalman smoothing, iterative methods, Kalman filter, Kalman filters, Kalman recursions, linear model, Manganese, Mathematical model, medical image processing, Noise, noisy intensity image, nonlinear observation model, Optical imaging, Optical sensors, Phase imaging, phase inference algorithms, smoothing methods
@inproceedings{Zhong2012a,
title = {Efficient Gaussian Inference Algorithms for Phase Imaging},
author = {Jingshan Zhong and Justin Dauwels and Manuel A Vazquez and Laura Waller},
url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=6287959},
issn = {1520-6149},
year = {2012},
date = {2012-01-01},
booktitle = {2012 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {617--620},
publisher = {IEEE},
address = {Kyoto},
abstract = {Novel efficient algorithms are developed to infer the phase of a complex optical field from a sequence of intensity images taken at different defocus distances. The non-linear observation model is approximated by a linear model. The complex optical field is inferred by iterative Kalman smoothing in the Fourier domain: forward and backward sweeps of Kalman recursions are alternated, and in each such sweep, the approximate linear model is refined. By limiting the number of iterations, one can trade off accuracy vs. complexity. The complexity of each iteration in the proposed algorithm is in the order of N logN, where N is the number of pixels per image. The storage required scales linearly with N. In contrast, the complexity of existing phase inference algorithms scales with N3 and the required storage with N2. The proposed algorithms may enable real-time estimation of optical fields from noisy intensity images.},
keywords = {biomedical optical imaging, complex optical field, computational complexity, defocus distances, Fourier domain, Gaussian inference algorithms, image sequences, inference mechanisms, intensity image sequence, iterative Kalman smoothing, iterative methods, Kalman filter, Kalman filters, Kalman recursions, linear model, Manganese, Mathematical model, medical image processing, Noise, noisy intensity image, nonlinear observation model, Optical imaging, Optical sensors, Phase imaging, phase inference algorithms, smoothing methods},
pubstate = {published},
tppubtype = {inproceedings}
}
Campo, Adria Tauste; Vazquez-Vilar, Gonzalo; i Fàbregas, Albert Guillen; Koch, Tobias; Martinez, Alfonso
Random Coding Bounds that Attain the Joint Source-Channel Exponent Proceedings Article
En: 2012 46th Annual Conference on Information Sciences and Systems (CISS), pp. 1–5, IEEE, Princeton, 2012, ISBN: 978-1-4673-3140-1.
Resumen | Enlaces | BibTeX | Etiquetas: code construction, combined source-channel coding, Csiszár error exponent, Ducts, error probability, error statistics, Gallager exponent, joint source-channel coding, joint source-channel exponent, random codes, random-coding upper bound, Yttrium
@inproceedings{Campo2012,
title = {Random Coding Bounds that Attain the Joint Source-Channel Exponent},
author = {Adria Tauste Campo and Gonzalo Vazquez-Vilar and Albert Guillen i F\`{a}bregas and Tobias Koch and Alfonso Martinez},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6310910},
isbn = {978-1-4673-3140-1},
year = {2012},
date = {2012-01-01},
booktitle = {2012 46th Annual Conference on Information Sciences and Systems (CISS)},
pages = {1--5},
publisher = {IEEE},
address = {Princeton},
abstract = {This paper presents a random-coding upper bound on the average error probability of joint source-channel coding that attains Csiszár's error exponent. The bound is based on a code construction for which source messages are assigned to disjoint subsets (classes), and codewords are generated according to a distribution that depends on the class of the source message. For a single class, the bound recovers Gallager's exponent; identifying the classes with source type classes, it recovers Csiszár's exponent. Moreover, it is shown that as a two appropriately designed classes are sufficient to attain Csiszár's exponent.},
keywords = {code construction, combined source-channel coding, Csiszár error exponent, Ducts, error probability, error statistics, Gallager exponent, joint source-channel coding, joint source-channel exponent, random codes, random-coding upper bound, Yttrium},
pubstate = {published},
tppubtype = {inproceedings}
}
Campo, Adria Tauste; Vazquez-Vilar, Gonzalo; i Fabregas, Albert Guillen; Koch, Tobias; Martinez, Alfonso
Achieving Csiszár's Source-Channel Coding Exponent with Product Distributions Proceedings Article
En: 2012 IEEE International Symposium on Information Theory Proceedings, pp. 1548–1552, IEEE, Cambridge, MA, 2012, ISSN: 2157-8095.
Resumen | Enlaces | BibTeX | Etiquetas: average probability of error, Channel Coding, code construction, codewords, Csiszár's source-channel coding, Decoding, Encoding, error probability, error statistics, Joints, Manganese, product distributions, random codes, random-coding upper bound, source coding, source messages, Upper bound
@inproceedings{Campo2012a,
title = {Achieving Csisz\'{a}r's Source-Channel Coding Exponent with Product Distributions},
author = {Adria Tauste Campo and Gonzalo Vazquez-Vilar and Albert Guillen i Fabregas and Tobias Koch and Alfonso Martinez},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6283524},
issn = {2157-8095},
year = {2012},
date = {2012-01-01},
booktitle = {2012 IEEE International Symposium on Information Theory Proceedings},
pages = {1548--1552},
publisher = {IEEE},
address = {Cambridge, MA},
abstract = {We derive a random-coding upper bound on the average probability of error of joint source-channel coding that recovers Csiszár's error exponent when used with product distributions over the channel inputs. Our proof technique for the error probability analysis employs a code construction for which source messages are assigned to subsets and codewords are generated with a distribution that depends on the subset.},
keywords = {average probability of error, Channel Coding, code construction, codewords, Csiszár's source-channel coding, Decoding, Encoding, error probability, error statistics, Joints, Manganese, product distributions, random codes, random-coding upper bound, source coding, source messages, Upper bound},
pubstate = {published},
tppubtype = {inproceedings}
}
Taborda, Camilo G; Perez-Cruz, Fernando
Mutual Information and Relative Entropy over the Binomial and Negative Binomial Channels Proceedings Article
En: 2012 IEEE International Symposium on Information Theory Proceedings, pp. 696–700, IEEE, Cambridge, MA, 2012, ISSN: 2157-8095.
Resumen | Enlaces | BibTeX | Etiquetas: Channel estimation, conditional mean estimation, Entropy, Estimation, estimation theoretical quantity, estimation theory, Gaussian channel, Gaussian channels, information theory concept, loss function, mean square error methods, Mutual information, negative binomial channel, Poisson channel, Random variables, relative entropy
@inproceedings{Taborda2012a,
title = {Mutual Information and Relative Entropy over the Binomial and Negative Binomial Channels},
author = {Camilo G Taborda and Fernando Perez-Cruz},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6284304},
issn = {2157-8095},
year = {2012},
date = {2012-01-01},
booktitle = {2012 IEEE International Symposium on Information Theory Proceedings},
pages = {696--700},
publisher = {IEEE},
address = {Cambridge, MA},
abstract = {We study the relation of the mutual information and relative entropy over the Binomial and Negative Binomial channels with estimation theoretical quantities, in which we extend already known results for Gaussian and Poisson channels. We establish general expressions for these information theory concepts with a direct connection with estimation theory through the conditional mean estimation and a particular loss function.},
keywords = {Channel estimation, conditional mean estimation, Entropy, Estimation, estimation theoretical quantity, estimation theory, Gaussian channel, Gaussian channels, information theory concept, loss function, mean square error methods, Mutual information, negative binomial channel, Poisson channel, Random variables, relative entropy},
pubstate = {published},
tppubtype = {inproceedings}
}
Salamanca, Luis; Olmos, Pablo M; Murillo-Fuentes, Juan Jose; Perez-Cruz, Fernando
Tree-Structured Expectation Propagation for LDPC Decoding in AWGN Channels Proceedings Article
En: Proceeding of: Information Theory and Applications Workshop (ITA), San Diego, 2012.
Resumen | Enlaces | BibTeX | Etiquetas:
@inproceedings{Salamanca2012a,
title = {Tree-Structured Expectation Propagation for LDPC Decoding in AWGN Channels},
author = {Luis Salamanca and Pablo M Olmos and Juan Jose Murillo-Fuentes and Fernando Perez-Cruz},
url = {http://www.researchgate.net/publication/236006591_Tree-structured_expectation_propagation_for_LDPC_decoding_in_AWGN_channels},
year = {2012},
date = {2012-01-01},
booktitle = {Proceeding of: Information Theory and Applications Workshop (ITA)},
address = {San Diego},
abstract = {In this paper, we propose the tree-structured expectation propagation (TEP) algorithm for low-density parity-check (LDPC) decoding over the additive white Gaussian noise (AWGN) channel. By imposing a tree-like approximation over the graphical model of the code, this algorithm introduces pairwise marginal constraints over pairs of variables, which provide joint information of the variables related. Thanks to this, the proposed TEP decoder improves the performance of the standard belief propagation (BP) solution. An efficient way of constructing the tree-like structure is also described. The simulation results illustrate the TEP decoder gain in the finite-length regime, compared to the standard BP solution. For code lengths shorter than n = 512, the gain in the waterfall region achieves up to 0.25 dB. We also notice a remarkable reduction of the error floo},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Henao-Mazo, W; Bravo-Santos, Ángel M
Finding Diverse Shortest Paths for the Routing Task in Wireless Sensor Networks Proceedings Article
En: ICSNC 2012. The Seventh International Conference on Systems and Networks Communications, Lisboa, 2012.
Resumen | Enlaces | BibTeX | Etiquetas: Diverse Paths., K Shortest, Paths, Wireless Sensor Networks
@inproceedings{Henao-Mazo2012,
title = {Finding Diverse Shortest Paths for the Routing Task in Wireless Sensor Networks},
author = {W Henao-Mazo and \'{A}ngel M Bravo-Santos},
url = {http://www.iaria.org/conferences2012/ProgramICSNC12.html},
year = {2012},
date = {2012-01-01},
booktitle = {ICSNC 2012. The Seventh International Conference on Systems and Networks Communications},
address = {Lisboa},
abstract = {Wireless Sensor Networks are deployed with the idea of collecting field information of different variables like temperature, position, humidity, etc., from several resourceconstrained sensor nodes, and then relay those data to a sink node or base station. Therefore, the path finding for routing must be carried out with strategies that make it possible to manage efficiently the network limited resources, whilst at the same time the network throughput is kept within appreciable levels. Many routing schemes search for one path, with low power dissipation that may not be convenient to increase the network lifetime and long-term connectivity. In an attempt to overcome such eventualities, we proposed a scenario for relaying that uses multiple diverse paths obtained considering the links among network nodes, that could provide reliable data transmission. When data is transmitted across various diverse paths in the network that offer low retransmission rates, the battery demand can be decreased and network lifetime is extended. We show, by using simulations, that the reliability in packets reception and the power dissipation that our scheme offers compare favourably with similar literature implementations.},
keywords = {Diverse Paths., K Shortest, Paths, Wireless Sensor Networks},
pubstate = {published},
tppubtype = {inproceedings}
}
Perez-Cruz, Fernando
Coding and Approximate Inference Proceedings Article
En: Machine Learning Summer School (MLSS), La Palma, 2012.
@inproceedings{Perez-Cruz2012a,
title = {Coding and Approximate Inference},
author = {Fernando Perez-Cruz},
url = {http://mlss2012.tsc.uc3m.es/},
year = {2012},
date = {2012-01-01},
booktitle = {Machine Learning Summer School (MLSS)},
address = {La Palma},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Achutegui, Katrin; Miguez, Joaquin; Rodas, Javier; Escudero, Carlos J
A Multi-Model Sequential Monte Carlo Methodology for Indoor Tracking: Algorithms and Experimental Results Artículo de revista
En: Signal Processing, vol. 92, no 11, pp. 2594–2613, 2012.
Resumen | Enlaces | BibTeX | Etiquetas: Data fusion, Indoor positioning, Indoor tracking, Interacting multiple models, Sequential Monte Carlo, Switching observation models
@article{Achutegui2012,
title = {A Multi-Model Sequential Monte Carlo Methodology for Indoor Tracking: Algorithms and Experimental Results},
author = {Katrin Achutegui and Joaquin Miguez and Javier Rodas and Carlos J Escudero},
url = {http://www.tsc.uc3m.es/~jmiguez/papers/P32_2012_ Multi-Model Sequential Monte Carlo Methodology for Indoor Tracking- Algorithms and Experimental Results.pdf
http://www.sciencedirect.com/science/article/pii/S0165168412001077},
year = {2012},
date = {2012-01-01},
journal = {Signal Processing},
volume = {92},
number = {11},
pages = {2594--2613},
abstract = {In this paper we address the problem of indoor tracking using received signal strength (RSS) as a position-dependent data measurement. Since RSS is highly influenced by multipath propagation, it turns out very hard to adequately model the correspondence between the received power and the transmitter-to-receiver distance. Although various models have been proposed in the literature, they often require the use of very large collections of data in order to fit them and display great sensitivity to changes in the radio propagation environment. In this work we advocate the use of switching multiple models that account for different classes of target dynamics and propagation environments and propose a flexible probabilistic switching scheme. The resulting state-space structure is termed a generalized switching multiple model (GSMM) system. Within this framework, we investigate two types of models for the RSS data: polynomial models and classical logarithmic path-loss representation. The first model is more accurate however it demands an offline model fitting step. The second one is less precise but it can be fitted in an online procedure. We have designed two tracking algorithms built around a Rao-Blackwellized particle filter, tailored to the GSMM structure and assessed its performances both with synthetic and experimental measurements.},
keywords = {Data fusion, Indoor positioning, Indoor tracking, Interacting multiple models, Sequential Monte Carlo, Switching observation models},
pubstate = {published},
tppubtype = {article}
}
Martino, Luca; Olmo, Victor Pascual Del; Read, Jesse
A Multi-Point Metropolis Scheme with Generic Weight Functions Artículo de revista
En: Statistics & Probability Letters, vol. 82, no 7, pp. 1445–1453, 2012.
Resumen | Enlaces | BibTeX | Etiquetas: MCMC methods, Multi-point Metropolis algorithm, Multiple Try Metropolis algorithm
@article{Martino2012,
title = {A Multi-Point Metropolis Scheme with Generic Weight Functions},
author = {Luca Martino and Victor Pascual Del Olmo and Jesse Read},
url = {http://www.sciencedirect.com/science/article/pii/S0167715212001514},
year = {2012},
date = {2012-01-01},
journal = {Statistics \& Probability Letters},
volume = {82},
number = {7},
pages = {1445--1453},
abstract = {The multi-point Metropolis algorithm is an advanced MCMC technique based on drawing several correlated samples at each step and choosing one of them according to some normalized weights. We propose a variation of this technique where the weight functions are not specified, i.e., the analytic form can be chosen arbitrarily. This has the advantage of greater flexibility in the design of high-performance MCMC samplers. We prove that our method fulfills the balance condition, and provide a numerical simulation. We also give new insight into the functionality of different MCMC algorithms, and the connections between them.},
keywords = {MCMC methods, Multi-point Metropolis algorithm, Multiple Try Metropolis algorithm},
pubstate = {published},
tppubtype = {article}
}
Salamanca, Luis; Murillo-Fuentes, Juan Jose; Perez-Cruz, Fernando
Bayesian Equalization for LDPC Channel Decoding Artículo de revista
En: IEEE Transactions on Signal Processing, vol. 60, no 5, pp. 2672–2676, 2012, ISSN: 1053-587X.
Resumen | Enlaces | BibTeX | Etiquetas: Approximation methods, Bayes methods, Bayesian equalization, Bayesian estimation problem, Bayesian inference, Bayesian methods, BCJR (Bahl–Cocke–Jelinek–Raviv) algorithm, BCJR algorithm, Channel Coding, channel decoding, channel equalization, channel equalization problem, Channel estimation, channel state information, CSI, Decoding, equalisers, Equalizers, expectation propagation, expectation propagation algorithm, fading channels, graphical model representation, intersymbol interference, Kullback-Leibler divergence, LDPC, LDPC coding, low-density parity-check decoder, Modulation, parity check codes, symbol posterior estimates, Training
@article{Salamanca2012b,
title = {Bayesian Equalization for LDPC Channel Decoding},
author = {Luis Salamanca and Juan Jose Murillo-Fuentes and Fernando Perez-Cruz},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6129544},
issn = {1053-587X},
year = {2012},
date = {2012-01-01},
journal = {IEEE Transactions on Signal Processing},
volume = {60},
number = {5},
pages = {2672--2676},
abstract = {We describe the channel equalization problem, and its prior estimate of the channel state information (CSI), as a joint Bayesian estimation problem to improve each symbol posterior estimates at the input of the channel decoder. Our approach takes into consideration not only the uncertainty due to the noise in the channel, but also the uncertainty in the CSI estimate. However, this solution cannot be computed in linear time, because it depends on all the transmitted symbols. Hence, we also put forward an approximation for each symbol's posterior, using the expectation propagation algorithm, which is optimal from the Kullback-Leibler divergence viewpoint and yields an equalization with a complexity identical to the BCJR algorithm. We also use a graphical model representation of the full posterior, in which the proposed approximation can be readily understood. The proposed posterior estimates are more accurate than those computed using the ML estimate for the CSI. In order to illustrate this point, we measure the error rate at the output of a low-density parity-check decoder, which needs the exact posterior for each symbol to detect the incoming word and it is sensitive to a mismatch in those posterior estimates. For example, for QPSK modulation and a channel with three taps, we can expect gains over 0.5 dB with same computational complexity as the ML receiver.},
keywords = {Approximation methods, Bayes methods, Bayesian equalization, Bayesian estimation problem, Bayesian inference, Bayesian methods, BCJR (Bahl\textendashCocke\textendashJelinek\textendashRaviv) algorithm, BCJR algorithm, Channel Coding, channel decoding, channel equalization, channel equalization problem, Channel estimation, channel state information, CSI, Decoding, equalisers, Equalizers, expectation propagation, expectation propagation algorithm, fading channels, graphical model representation, intersymbol interference, Kullback-Leibler divergence, LDPC, LDPC coding, low-density parity-check decoder, Modulation, parity check codes, symbol posterior estimates, Training},
pubstate = {published},
tppubtype = {article}
}
Landa-Torres, Itziar; Ortiz-Garcia, Emilio G; Salcedo-Sanz, Sancho; Segovia-Vargas, María J; Gil-Lopez, Sergio; Miranda, Marta; Leiva-Murillo, Jose M; Ser, Javier Del
Evaluating the Internationalization Success of Companies Through a Hybrid Grouping Harmony Search—Extreme Learning Machine Approach Artículo de revista
En: IEEE Journal of Selected Topics in Signal Processing, vol. 6, no 4, pp. 388–398, 2012, ISSN: 1932-4553.
Resumen | Enlaces | BibTeX | Etiquetas: Companies, Company internationalization, corporative strategy, diverse activity, Economics, Electronic mail, ensembles, exporting, exporting performance, external markets, extreme learning machine ensemble, extreme learning machines, feature selection method, grouping-based harmony search, hard process, harmony search (HS), hybrid algorithm, hybrid algorithms, hybrid grouping harmony search-extreme learning ma, hybrid soft computing, international company, international trade, internationalization procedure, internationalization success, learning (artificial intelligence), Machine learning, organizational structure, Signal processing algorithms, Spanish manufacturing company, Training, value chain
@article{Landa-Torres2012,
title = {Evaluating the Internationalization Success of Companies Through a Hybrid Grouping Harmony Search\textemdashExtreme Learning Machine Approach},
author = {Itziar Landa-Torres and Emilio G Ortiz-Garcia and Sancho Salcedo-Sanz and Mar\'{i}a J Segovia-Vargas and Sergio Gil-Lopez and Marta Miranda and Jose M Leiva-Murillo and Javier Del Ser},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6200298},
issn = {1932-4553},
year = {2012},
date = {2012-01-01},
journal = {IEEE Journal of Selected Topics in Signal Processing},
volume = {6},
number = {4},
pages = {388--398},
abstract = {The internationalization of a company is widely understood as the corporative strategy for growing through external markets. It usually embodies a hard process, which affects diverse activities of the value chain and impacts on the organizational structure of the company. There is not a general model for a successful international company, so the success of an internationalization procedure must be estimated based on different variables addressing the status, strategy and market characteristics of the company at hand. This paper presents a novel hybrid soft-computing approach for evaluating the internationalization success of a company based on existing past data. Specifically, we propose a hybrid algorithm composed by a grouping-based harmony search (HS) approach and an extreme learning machine (ELM) ensemble. The proposed hybrid scheme further incorporates a feature selection method, which is obtained by means of a given group in the HS encoding format, whereas the ELM ensemble renders the final accuracy metric of the model. Practical results for the proposed hybrid technique are obtained in a real application based on the exporting success of Spanish manufacturing companies, which are shown to be satisfactory in comparison with alternative state-of-the-art techniques.},
keywords = {Companies, Company internationalization, corporative strategy, diverse activity, Economics, Electronic mail, ensembles, exporting, exporting performance, external markets, extreme learning machine ensemble, extreme learning machines, feature selection method, grouping-based harmony search, hard process, harmony search (HS), hybrid algorithm, hybrid algorithms, hybrid grouping harmony search-extreme learning ma, hybrid soft computing, international company, international trade, internationalization procedure, internationalization success, learning (artificial intelligence), Machine learning, organizational structure, Signal processing algorithms, Spanish manufacturing company, Training, value chain},
pubstate = {published},
tppubtype = {article}
}
Luengo, David; Miguez, Joaquin; Martino, Luca
Efficient Sampling from Truncated Bivariate Gaussians via Box-Muller Transformation Artículo de revista
En: Electronics Letters, vol. 48, no 24, pp. 1533–1534, 2012, ISSN: 0013-5194.
Resumen | Enlaces | BibTeX | Etiquetas:
@article{Luengo2012a,
title = {Efficient Sampling from Truncated Bivariate Gaussians via Box-Muller Transformation},
author = {David Luengo and Joaquin Miguez and Luca Martino},
url = {http://www.tsc.uc3m.es/~jmiguez/papers/P35_2012_Efficient Sampling from Truncated Bivariate Gaussians via Box-Muller Transformation.pdf
http://www.researchgate.net/publication/235004345_Efficient_Sampling_from_Truncated_Bivariate_Gaussians_via_the_Box-Muller_Transformation},
issn = {0013-5194},
year = {2012},
date = {2012-01-01},
journal = {Electronics Letters},
volume = {48},
number = {24},
pages = {1533--1534},
abstract = {Many practical simulation tasks demand procedures to draw samples efficiently from multivariate truncated Gaussian distributions. In this work, we introduce a novel rejection approach, based on the Box-Muller transformation, to generate samples from a truncated bivariate Gaussian density with an arbitrary support. Furthermore, for an important class of support regions the new method allows us to achieve exact sampling, thus becoming the most efficient approach possible. Introduction: The numerical simulation of many systems of practical interest demands the ability to produce Monte Carlo samples from truncated Gaussian distributions [5, 3, 7]. The simplest way to address this problem is to perform rejection sampling using the corresponding (non-truncated) Gaussian distribution as a proposal. This trivial method produces independent and identically distributed (i.i.d.) samples, but it is time consuming and computationally inefficient. For these two reasons, different methods have been introduced in the literature, e.g., using MCMC techniques [5, 7] or rejection sampling [1]. Unfortunately, MCMC schemes produce correlated samples, which can lead to a very slow convergence of the chain, whereas rejection methods can be computationally inefficient. In this paper, we introduce a novel approach, based on the Box-Muller transformation (BMT) [2], to generate i.i.d. samples from truncated bivariate Gaussian distributions. The main advantages of the proposed approach are the following: (1) it allows sampling within a generic domain D ⊆ R 2 without any restriction and (2) the inverse transformation of the BMT maps any region D ⊆ R 2 (either bounded or unbounded) into a bounded region, A ⊆ R = [0, 1] × [0, 1]. Hence, all the procedures developed for drawing efficiently uniform random variables within bounded regions, e.g., adaptive rejection sampling or strip methods [2, 4], can always be used. Furthermore, for an important class of support regions the BMT allows us to perform exact sampling (i.e., draw i.i.d. samples from the target distribution without any rejection), which is the most efficient situation possible. Problem Formulation: The problem considered here is related to drawing samples from a truncated multivariate Gaussian distribution. In particular, in this letter we focus on drawing samples from a bivariate truncated standard Gaussian PDF, denoted as Z ∼ T N (0, I, D), where the support domain D ⊆ R 2 is a non-null Borel set. Note that drawing samples from a non-truncated standard Gaussian distribution, Z ∼ N (0, I), enables us to draw samples from an arbitrary Gaussian distribution, X ∼ N (µ, $Sigma$), whenever $Sigma$ is positive definite. More precisely, since $Sigma$ is positive definite, it can be expressed as $Sigma$ = SS , using for instance the Cholesky decomposition, and the random vector X = SZ + µ has the desired distribution, X ∼ N (µ, $Sigma$). Similarly, sampling from a truncated bivariate standard Gaussian distribution allows us to generate samples from an arbitrary truncated bivariate Gaussian. In this case, if Z ∼ T N (0, I, D), then we can obtain X ∼ T N (µ, $Sigma$, D *) simply through the transformation X = SZ + µ, with $Sigma$ = SS and},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Leiva-Murillo, Jose M; Artés-Rodríguez, Antonio
Algorithms for Maximum-Likelihood Bandwidth Selection in Kernel Density Estimators Artículo de revista
En: Pattern Recognition Letters, vol. 33, no 13, pp. 1717–1724, 2012, ISSN: 01678655.
Resumen | Enlaces | BibTeX | Etiquetas: Kernel density estimation, Multivariate density modeling, Pattern recognition
@article{Leiva-Murillo2012,
title = {Algorithms for Maximum-Likelihood Bandwidth Selection in Kernel Density Estimators},
author = {Jose M Leiva-Murillo and Antonio Art\'{e}s-Rodr\'{i}guez},
url = {http://www.tsc.uc3m.es/~antonio/papers/P45_2012_Algorithms for Maximum Likelihood Bandwidth Selection in Kernel Density Estimators.pdf
http://www.sciencedirect.com/science/article/pii/S0167865512001948},
issn = {01678655},
year = {2012},
date = {2012-01-01},
journal = {Pattern Recognition Letters},
volume = {33},
number = {13},
pages = {1717--1724},
publisher = {Elsevier Science Inc.},
abstract = {In machine learning and statistics, kernel density estimators are rarely used on multivariate data due to the difficulty of finding an appropriate kernel bandwidth to overcome overfitting. However, the recent advances on information-theoretic learning have revived the interest on these models. With this motivation, in this paper we revisit the classical statistical problem of data-driven bandwidth selection by cross-validation maximum likelihood for Gaussian kernels. We find a solution to the optimization problem under both the spherical and the general case where a full covariance matrix is considered for the kernel. The fixed-point algorithms proposed in this paper obtain the maximum likelihood bandwidth in few iterations, without performing an exhaustive bandwidth search, which is unfeasible in the multivariate case. The convergence of the methods proposed is proved. A set of classification experiments are performed to prove the usefulness of the obtained models in pattern recognition.},
keywords = {Kernel density estimation, Multivariate density modeling, Pattern recognition},
pubstate = {published},
tppubtype = {article}
}
Maiz, Cristina S; Molanes-Lopez, Elisa M; Miguez, Joaquin; Djuric, Petar M
A Particle Filtering Scheme for Processing Time Series Corrupted by Outliers Artículo de revista
En: IEEE Transactions on Signal Processing, vol. 60, no 9, pp. 4611–4627, 2012, ISSN: 1053-587X.
Resumen | Enlaces | BibTeX | Etiquetas: Kalman filters, Mathematical model, nonlinear state space model, Outlier detection, prediction theory, predictive distribution, Probability density function, State-space methods, state-space models, statistical distributions, Target tracking, time serie processing, Vectors, Yttrium
@article{Maiz2012,
title = {A Particle Filtering Scheme for Processing Time Series Corrupted by Outliers},
author = {Cristina S Maiz and Elisa M Molanes-Lopez and Joaquin Miguez and Petar M Djuric},
url = {http://www.tsc.uc3m.es/~jmiguez/papers/P34_2012_A Particle Filtering Scheme for Processing Time Series Corrupted by Outliers.pdf http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6203606},
issn = {1053-587X},
year = {2012},
date = {2012-01-01},
journal = {IEEE Transactions on Signal Processing},
volume = {60},
number = {9},
pages = {4611--4627},
abstract = {The literature in engineering and statistics is abounding in techniques for detecting and properly processing anomalous observations in the data. Most of these techniques have been developed in the framework of static models and it is only in recent years that we have seen attempts that address the presence of outliers in nonlinear time series. For a target tracking problem described by a nonlinear state-space model, we propose the online detection of outliers by including an outlier detection step within the standard particle filtering algorithm. The outlier detection step is implemented by a test involving a statistic of the predictive distribution of the observations, such as a concentration measure or an extreme upper quantile. We also provide asymptotic results about the convergence of the particle approximations of the predictive distribution (and its statistics) and assess the performance of the resulting algorithms by computer simulations of target tracking problems with signal power observations.},
keywords = {Kalman filters, Mathematical model, nonlinear state space model, Outlier detection, prediction theory, predictive distribution, Probability density function, State-space methods, state-space models, statistical distributions, Target tracking, time serie processing, Vectors, Yttrium},
pubstate = {published},
tppubtype = {article}
}
Cruz-Roldan, Fernando; Dominguez-Jimenez, María Elena; Vidal, Gabriela Sansigre; Amo-Lopez, Pedro; Blanco-Velasco, Manuel; Bravo-Santos, Ángel M
On the Use of Discrete Cosine Transforms for Multicarrier Communications Artículo de revista
En: IEEE Transactions on Signal Processing, vol. 60, no 11, pp. 6085–6090, 2012, ISSN: 1053-587X.
Resumen | Enlaces | BibTeX | Etiquetas: broadband networks, carrier frequency offset, Carrier-frequency offset (CFO), CFO, channel equalization, computer simulations, Convolution, Data communication, data symbol, DCT, DFT, discrete cosine transform (DCT), discrete cosine transform domain, Discrete cosine transforms, discrete Fourier transforms, discrete multitone modulation (DMT), discrete trigonometric domain, element-by-element multiplication, equalisers, equivalent channel impulse response, linear convolution, mobile broadband wireless communication, mobile radio, Modulation, multicarrier communications, multicarrier data transmission, multicarrier modulation (MCM), multicarrier transceiver, OFDM, orthogonal frequency-division multiplexing (OFDM), Receivers, Redundancy, subcarrier equalizers, symmetric convolution-multiplication property, symmetric redundancy, time-domain analysis, transient response, transmission channel
@article{Cruz-Roldan2012,
title = {On the Use of Discrete Cosine Transforms for Multicarrier Communications},
author = {Fernando Cruz-Roldan and Mar\'{i}a Elena Dominguez-Jimenez and Gabriela Sansigre Vidal and Pedro Amo-Lopez and Manuel Blanco-Velasco and \'{A}ngel M Bravo-Santos},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6252068},
issn = {1053-587X},
year = {2012},
date = {2012-01-01},
journal = {IEEE Transactions on Signal Processing},
volume = {60},
number = {11},
pages = {6085--6090},
abstract = {In this correspondence, the conditions to use any kind of discrete cosine transform (DCT) for multicarrier data transmission are derived. The symmetric convolution-multiplication property of each DCT implies that when symmetric convolution is performed in the time domain, an element-by-element multiplication is performed in the corresponding discrete trigonometric domain. Therefore, appending symmetric redundancy (as prefix and suffix) into each data symbol to be transmitted, and also enforcing symmetry for the equivalent channel impulse response, the linear convolution performed in the transmission channel becomes a symmetric convolution in those samples of interest. Furthermore, the channel equalization can be carried out by means of a bank of scalars in the corresponding discrete cosine transform domain. The expressions for obtaining the value of each scalar corresponding to these one-tap per subcarrier equalizers are presented. This study is completed with several computer simulations in mobile broadband wireless communication scenarios, considering the presence of carrier frequency offset (CFO). The obtained results indicate that the proposed systems outperform the standardized ones based on the DFT.},
keywords = {broadband networks, carrier frequency offset, Carrier-frequency offset (CFO), CFO, channel equalization, computer simulations, Convolution, Data communication, data symbol, DCT, DFT, discrete cosine transform (DCT), discrete cosine transform domain, Discrete cosine transforms, discrete Fourier transforms, discrete multitone modulation (DMT), discrete trigonometric domain, element-by-element multiplication, equalisers, equivalent channel impulse response, linear convolution, mobile broadband wireless communication, mobile radio, Modulation, multicarrier communications, multicarrier data transmission, multicarrier modulation (MCM), multicarrier transceiver, OFDM, orthogonal frequency-division multiplexing (OFDM), Receivers, Redundancy, subcarrier equalizers, symmetric convolution-multiplication property, symmetric redundancy, time-domain analysis, transient response, transmission channel},
pubstate = {published},
tppubtype = {article}
}
Leiva-Murillo, Jose M; Artés-Rodríguez, Antonio
Information-Theoretic Linear Feature Extraction Based on Kernel Density Estimators: A Review Artículo de revista
En: IEEE Transactions on Systems, Man, and Cybernetics, Part C (Applications and Reviews), vol. 42, no 6, pp. 1180–1189, 2012, ISSN: 1094-6977.
Resumen | Enlaces | BibTeX | Etiquetas: Bandwidth, Density, detection theory, Entropy, Estimation, Feature extraction, Feature extraction (FE), information theoretic linear feature extraction, information theory, information-theoretic learning (ITL), Kernel, Kernel density estimation, kernel density estimators, Machine learning
@article{Leiva-Murillo2012a,
title = {Information-Theoretic Linear Feature Extraction Based on Kernel Density Estimators: A Review},
author = {Jose M Leiva-Murillo and Antonio Art\'{e}s-Rodr\'{i}guez},
url = {http://www.tsc.uc3m.es/~antonio/papers/P44_2012_Information Theoretic Linear Feature Extraction Based on Kernel Density Estimators A Review.pdf http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6185689},
issn = {1094-6977},
year = {2012},
date = {2012-01-01},
journal = {IEEE Transactions on Systems, Man, and Cybernetics, Part C (Applications and Reviews)},
volume = {42},
number = {6},
pages = {1180--1189},
abstract = {In this paper, we provide a unified study of the application of kernel density estimators to supervised linear feature extraction by means of criteria inspired by information and detection theory. We enrich this study by the incorporation of two novel criteria to the study, i.e., the mutual information and the likelihood ratio test, and perform both a theoretical and an experimental comparison between the new methods and other ones previously described in the literature. The impact of the bandwidth selection of the density estimator in the classification performance is discussed. Some theoretical results that bound classification performance as a function or mutual information are also compiled. A set of experiments on different real-world datasets allows us to perform an empirical comparison of the methods, in terms of both accuracy and computational complexity. We show the suitability of these methods to determine the dimension of the subspace that contains the discriminative information.},
keywords = {Bandwidth, Density, detection theory, Entropy, Estimation, Feature extraction, Feature extraction (FE), information theoretic linear feature extraction, information theory, information-theoretic learning (ITL), Kernel, Kernel density estimation, kernel density estimators, Machine learning},
pubstate = {published},
tppubtype = {article}
}
Luengo, David; Monzon, Sandra; Artés-Rodríguez, Antonio
Novel Fast Random Search Clustering Algorithm for Mixing Matrix Identification in MIMO Linear Blind Inverse Problems with Sparse Inputs Artículo de revista
En: Neurocomputing, vol. 87, pp. 62–78, 2012.
Resumen | Enlaces | BibTeX | Etiquetas: Line orientation clustering, Linear blind inverse problems, MIMO systems, Neyman–Pearson hypothesis test, Sparse signals
@article{Luengo2012b,
title = {Novel Fast Random Search Clustering Algorithm for Mixing Matrix Identification in MIMO Linear Blind Inverse Problems with Sparse Inputs},
author = {David Luengo and Sandra Monzon and Antonio Art\'{e}s-Rodr\'{i}guez},
url = {http://www.tsc.uc3m.es/~antonio/papers/P43_2012_Novel Fast Random Search Clustering Algorithm for Mixing Matrix Identification in MIMO Linear Blind Inverse Problems with Sparse Inputs.pdf
http://www.sciencedirect.com/science/article/pii/S0925231212000744},
year = {2012},
date = {2012-01-01},
journal = {Neurocomputing},
volume = {87},
pages = {62--78},
abstract = {In this paper we propose a novel fast random search clustering (RSC) algorithm for mixing matrix identification in multiple input multiple output (MIMO) linear blind inverse problems with sparse inputs. The proposed approach is based on the clustering of the observations around the directions given by the columns of the mixing matrix that occurs typically for sparse inputs. Exploiting this fact, the RSC algorithm proceeds by parameterizing the mixing matrix using hyperspherical coordinates, randomly selecting candidate basis vectors (i.e. clustering directions) from the observations, and accepting or rejecting them according to a binary hypothesis test based on the Neyman\textendashPearson criterion. The RSC algorithm is not tailored to any specific distribution for the sources, can deal with an arbitrary number of inputs and outputs (thus solving the difficult under-determined problem), and is applicable to both instantaneous and convolutive mixtures. Extensive simulations for synthetic and real data with different number of inputs and outputs, data size, sparsity factors of the inputs and signal to noise ratios confirm the good performance of the proposed approach under moderate/high signal to noise ratios.},
keywords = {Line orientation clustering, Linear blind inverse problems, MIMO systems, Neyman\textendashPearson hypothesis test, Sparse signals},
pubstate = {published},
tppubtype = {article}
}
Oquendo, Maria A; Baca-García, Enrique; Artés-Rodríguez, Antonio; Perez-Cruz, Fernando; Galfalvy, H C; Blasco-Fontecilla, Hilario; Madigan, D; Duan, N
Machine Learning and Data Mining: Strategies for Hypothesis Generation Artículo de revista
En: Molecular psychiatry, vol. 17, no 10, pp. 956–959, 2012, ISSN: 1476-5578.
Resumen | Enlaces | BibTeX | Etiquetas: Artificial Intelligence, Biological, Data Mining, Humans, Mental Disorders, Mental Disorders: diagnosis, Mental Disorders: therapy, Models
@article{Oquendo2012,
title = {Machine Learning and Data Mining: Strategies for Hypothesis Generation},
author = {Maria A Oquendo and Enrique Baca-Garc\'{i}a and Antonio Art\'{e}s-Rodr\'{i}guez and Fernando Perez-Cruz and H C Galfalvy and Hilario Blasco-Fontecilla and D Madigan and N Duan},
url = {http://www.ncbi.nlm.nih.gov/pubmed/22230882},
issn = {1476-5578},
year = {2012},
date = {2012-01-01},
journal = {Molecular psychiatry},
volume = {17},
number = {10},
pages = {956--959},
abstract = {Strategies for generating knowledge in medicine have included observation of associations in clinical or research settings and more recently, development of pathophysiological models based on molecular biology. Although critically important, they limit hypothesis generation to an incremental pace. Machine learning and data mining are alternative approaches to identifying new vistas to pursue, as is already evident in the literature. In concert with these analytic strategies, novel approaches to data collection can enhance the hypothesis pipeline as well. In data farming, data are obtained in an \'{o}rganic' way, in the sense that it is entered by patients themselves and available for harvesting. In contrast, in evidence farming (EF), it is the provider who enters medical data about individual patients. EF differs from regular electronic medical record systems because frontline providers can use it to learn from their own past experience. In addition to the possibility of generating large databases with farming approaches, it is likely that we can further harness the power of large data sets collected using either farming or more standard techniques through implementation of data-mining and machine-learning strategies. Exploiting large databases to develop new hypotheses regarding neurobiological and genetic underpinnings of psychiatric illness is useful in itself, but also affords the opportunity to identify novel mechanisms to be targeted in drug discovery and development.},
keywords = {Artificial Intelligence, Biological, Data Mining, Humans, Mental Disorders, Mental Disorders: diagnosis, Mental Disorders: therapy, Models},
pubstate = {published},
tppubtype = {article}
}
Reyes-Guerrero, J C; Murillo-Fuentes, Juan Jose; Olmos, Pablo M
Remote Detection of Interfered Downlinks in Wireless Cellular Systems Artículo de revista
En: Transactions on Emerging Telecommunications Technologies, vol. 23, no 5, pp. 444–453, 2012, ISSN: 21613915.
Resumen | Enlaces | BibTeX | Etiquetas:
@article{Reyes-Guerrero2012,
title = {Remote Detection of Interfered Downlinks in Wireless Cellular Systems},
author = {J C Reyes-Guerrero and Juan Jose Murillo-Fuentes and Pablo M Olmos},
url = {http://doi.wiley.com/10.1002/ett.2501},
issn = {21613915},
year = {2012},
date = {2012-01-01},
journal = {Transactions on Emerging Telecommunications Technologies},
volume = {23},
number = {5},
pages = {444--453},
abstract = {This work provides a novel technological solution to jamming in wireless systems, particularly to remotely detect interfered communications in a cellular network. The new system is focused on the detection of a failure in a link between a base station and a fixed wireless terminal located in a residential or business area. It has an important impact in security systems based on wireless terminals to transmit an alarm to a central station. In these systems, non-authorised people can prevent the transmission of the alarm by using a short-range jammer. The main advantage of this proposal is that it is non-intrusive; that is, no modification is needed in the base station, and no protocol modification is performed in the terminal. The detection is implemented in an external unit developed on a software-defined radio platform. The novel system proposed is valid for any cellular system and operator. In this work, we focus on its implementation in the GSM/GPRS system to illustrate its benefits and outline the method for Universal Mobile Telecommunications System. We describe the results of some experiments where the system successfully detects the presence of a short-range jammer in a real scenario.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Olmos, Pablo M; Salamanca, Luis; Murillo-Fuentes, Juan Jose; Perez-Cruz, Fernando
On the Design of LDPC-Convolutional Ensembles Using the TEP Decoder Artículo de revista
En: IEEE Communications Letters, vol. 16, no 5, pp. 726–729, 2012, ISSN: 1089-7798.
Resumen | Enlaces | BibTeX | Etiquetas: belief propagation decoding, binary erasure channel, channel capacity, Complexity theory, convolutional codes, convolutional LDPC codes, Decoding, design, Error analysis, finite-length analysis, Iterative decoding, LDPC-convolutional ensemble design, LDPCC code decoding, low-density parity-check convolutional code, parity check codes, tree-expectation propagation decoder, tree-structured expectation propagation, window-sliding scheme
@article{Olmos2012b,
title = {On the Design of LDPC-Convolutional Ensembles Using the TEP Decoder},
author = {Pablo M Olmos and Luis Salamanca and Juan Jose Murillo-Fuentes and Fernando Perez-Cruz},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6168872},
issn = {1089-7798},
year = {2012},
date = {2012-01-01},
journal = {IEEE Communications Letters},
volume = {16},
number = {5},
pages = {726--729},
abstract = {Low-density parity-check convolutional (LDPCC) codes asymptotically achieve channel capacity under belief propagation (BP) decoding. In this paper, we decode LDPCC codes using the Tree-Expectation Propagation (TEP) decoder, recently proposed as an alternative decoding method to the BP algorithm for the binary erasure channel (BEC). We show that, for LDPCC codes, the TEP decoder improves the BP solution with a comparable complexity or, alternatively, it allows using shorter codes to achieve similar error rates. We also propose a window-sliding scheme for the TEP decoder to reduce the decoding latency.},
keywords = {belief propagation decoding, binary erasure channel, channel capacity, Complexity theory, convolutional codes, convolutional LDPC codes, Decoding, design, Error analysis, finite-length analysis, Iterative decoding, LDPC-convolutional ensemble design, LDPCC code decoding, low-density parity-check convolutional code, parity check codes, tree-expectation propagation decoder, tree-structured expectation propagation, window-sliding scheme},
pubstate = {published},
tppubtype = {article}
}
Luengo, David; Martino, Luca
Efficient Random Variable Generation: Ratio of Uniforms and Polar Rejection Sampling Artículo de revista
En: Electronics Letters, vol. 48, no 6, pp. 326–327, 2012, ISSN: 00135194.
Resumen | Enlaces | BibTeX | Etiquetas:
@article{Luengo2012bb,
title = {Efficient Random Variable Generation: Ratio of Uniforms and Polar Rejection Sampling},
author = {David Luengo and Luca Martino},
url = {http://digital-library.theiet.org/content/journals/10.1049/el.2012.0206},
issn = {00135194},
year = {2012},
date = {2012-01-01},
journal = {Electronics Letters},
volume = {48},
number = {6},
pages = {326--327},
abstract = {Monte Carlo techniques, which require the generation of samples from some target density, are often the only alternative for performing Bayesian inference. Two classic sampling techniques to draw independent samples are the ratio of uniforms (RoU) and rejection sampling (RS). An efficient sampling algorithm is proposed combining the RoU and polar RS (i.e. RS inside a sector of a circle using polar coordinates). Its efficiency is shown in drawing samples from truncated Cauchy and Gaussian random variables, which have many important applications in signal processing and communications.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Read, Jesse; Bifet, Albert; Pfahringer, Bernhard; Holmes, Geoff
Batch-Incremental versus Instance-Incremental Learning in Dynamic and Evolving Data Proceedings Article
En: The Eleventh International Symposium on Intelligent Data Analysis (IDA 2012)., Helsinki, 2012.
BibTeX | Etiquetas:
@inproceedings{Read2012b,
title = {Batch-Incremental versus Instance-Incremental Learning in Dynamic and Evolving Data},
author = {Jesse Read and Albert Bifet and Bernhard Pfahringer and Geoff Holmes},
year = {2012},
date = {2012-01-01},
booktitle = {The Eleventh International Symposium on Intelligent Data Analysis (IDA 2012).},
address = {Helsinki},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Luengo, David; Martino, Luca
Almost rejectionless sampling from Nakagami-m distributions (m≥1) Artículo de revista
En: Electronics Letters, vol. 48, no 24, pp. 1559–1561, 2012, ISSN: 0013-5194.
Resumen | Enlaces | BibTeX | Etiquetas:
@article{Luengo2012ab,
title = {Almost rejectionless sampling from Nakagami-m distributions (m≥1)},
author = {David Luengo and Luca Martino},
url = {http://digital-library.theiet.org/content/view.action?itemId=http://iet.metastore.ingenta.com/content/journals/10.1049/el.2012.3513\&view=\&itemType=http://pub2web.metastore.ingenta.com/ns/Article?itemId=http://iet.metastore.ingenta.com/content/journals/10.1049/el.2012.3513\&view=\&itemType=http://pub2web.metastore.ingenta.com/ns/Article},
issn = {0013-5194},
year = {2012},
date = {2012-01-01},
journal = {Electronics Letters},
volume = {48},
number = {24},
pages = {1559--1561},
publisher = {IET Digital Library},
abstract = {The Nakagami-textitm distribution is widely used for the simulation of fading channels in wireless communications. A novel, simple and extremely efficient acceptance-rejection algorithm is introduced for the generation of independent Nakagami-textitm random variables. The proposed method uses another Nakagami density with a half-integer value of the fading parameter},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
López-Castromán, Jorge; Leiva-Murillo, Jose M; Blasco-Fontecilla, Hilario; Garcia-Nieto, R; Morant-Ginestar, C; Blanco, Carlos; Artés-Rodríguez, Antonio; Baca-García, Enrique
P-1266 - Dimensional Schizophrenia: not an Easy Transition Artículo de revista
En: European Psychiatry, vol. 27, pp. 1, 2012.
Resumen | Enlaces | BibTeX | Etiquetas:
@article{Lopez-Castroman2012,
title = {P-1266 - Dimensional Schizophrenia: not an Easy Transition},
author = {Jorge L\'{o}pez-Castrom\'{a}n and Jose M Leiva-Murillo and Hilario Blasco-Fontecilla and R Garcia-Nieto and C Morant-Ginestar and Carlos Blanco and Antonio Art\'{e}s-Rodr\'{i}guez and Enrique Baca-Garc\'{i}a},
url = {http://www.sciencedirect.com/science/article/pii/S0924933812754330},
year = {2012},
date = {2012-01-01},
journal = {European Psychiatry},
volume = {27},
pages = {1},
abstract = {Recently, several authors have argued in favor of extending the less common clinical phenotype of schizophrenia to a vulnerability phenotype of schizophrenia in the general population. It has been proposed that high levels in any of four different symptom dimensions (affective, psychosis, negative and cognitive) would lead to clinical assessment, identification of correlated symptoms in other dimensions and finally, the diagnosis of schizophrenia. Being so, we would expect to find such a dimensional pattern in the previous diagnoses of schizophrenic patients. We examined previous contacts of a large cohort of patients diagnosed, according to the International Classification of Diseases (ICD-10), with schizophrenia (n=26,163) in public mental health centers of Madrid (Spain) from 1980 to 2008. Of those patients, 56.7% received another diagnosis prior to schizophrenia. Non-schizophrenia diagnoses within the category of ‘schizophrenia, schizotypal and delusional disorders’ were common (F2; 40.0%). The other most frequent prior diagnoses were ‘neurotic, stress-related and somatoform disorders’ (F4; 47.3%), ‘mood disorders’ (F3; 41.4%), and ‘disorders of adult personality and behavior’ (F6; 20.8%). We then examined the probability of progression to schizophrenia, considering also time proximity. The strongest associations were between several F2 spectrum diagnoses with schizophrenia. However, some affective disorders (F3x) were also linked with schizophrenia but anxiety (F4) or personality disorders (F6) were not. Our findings support two of the previously described dimensions (psychotic, affective) in the development of schizophrenia. Several limitations of the dimensional model will be discussed in view of these findings.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2011
Vazquez-Vilar, Gonzalo; López-Valcarce, Roberto; Sala, Josep
Multiantenna spectrum sensing exploiting spectral a priori information Artículo de revista
En: IEEE Transactions on Wireless Communications, vol. 10, no 12, pp. 4345-4355, 2011, ISSN: 1536-1276.
@article{twc11,
title = {Multiantenna spectrum sensing exploiting spectral a priori information},
author = {Gonzalo Vazquez-Vilar and Roberto L\'{o}pez-Valcarce and Josep Sala},
doi = {10.1109/TWC.2011.101211.110665},
issn = {1536-1276},
year = {2011},
date = {2011-12-01},
journal = {IEEE Transactions on Wireless Communications},
volume = {10},
number = {12},
pages = {4345-4355},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Vazquez-Vilar, Gonzalo; López-Valcarce, Roberto
Spectrum sensing exploiting guard bands and weak channels Artículo de revista
En: IEEE Transactions on Signal Processing, vol. 59, no 12, pp. 6045-6057, 2011, ISSN: 1053-587X.
@article{tsp11,
title = {Spectrum sensing exploiting guard bands and weak channels},
author = {Gonzalo Vazquez-Vilar and Roberto L\'{o}pez-Valcarce},
doi = {10.1109/TSP.2011.2167615},
issn = {1053-587X},
year = {2011},
date = {2011-12-01},
journal = {IEEE Transactions on Signal Processing},
volume = {59},
number = {12},
pages = {6045-6057},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Vazquez-Vilar, Gonzalo; Ramirez, David; López-Valcarce, Roberto; Via, Javier; Santamaria, Ignacio
Spatial rank estimation in Cognitive Radio Networks with uncalibrated multiple antennas Proceedings Article
En: 4th International Conference on Cognitive Radio and Advanced Spectrum Management (CogART 2011), Barcelona, Spain, 2011, (Invited).
BibTeX | Etiquetas:
@inproceedings{cogart2011,
title = {Spatial rank estimation in Cognitive Radio Networks with uncalibrated multiple antennas},
author = {Gonzalo Vazquez-Vilar and David Ramirez and Roberto L\'{o}pez-Valcarce and Javier Via and Ignacio Santamaria},
year = {2011},
date = {2011-10-01},
booktitle = {4th International Conference on Cognitive Radio and Advanced Spectrum Management (CogART 2011)},
address = {Barcelona, Spain},
note = {Invited},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ramirez, David; Vazquez-Vilar, Gonzalo; Lopez-Valcarce, Roberto; Via, Javier; Santamaria, Ignacio
Detection of rank-P Signals in Cognitive Radio Networks With Uncalibrated Multiple Antennas Artículo de revista
En: IEEE Transactions on Signal Processing, vol. 59, no 8, pp. 3764-3774, 2011, ISSN: 1053-587X.
@article{ramirez11,
title = {Detection of rank-P Signals in Cognitive Radio Networks With Uncalibrated Multiple Antennas},
author = {David Ramirez and Gonzalo Vazquez-Vilar and Roberto Lopez-Valcarce and Javier Via and Ignacio Santamaria},
doi = {10.1109/TSP.2011.2146779},
issn = {1053-587X},
year = {2011},
date = {2011-08-01},
journal = {IEEE Transactions on Signal Processing},
volume = {59},
number = {8},
pages = {3764-3774},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Campo, Adria Tauste; Vazquez-Vilar, Gonzalo; i Fàbregas, Albert Guillén; Martinez, Alfonso
Random-Coding Joint Source-Channel Bounds Proceedings Article
En: 2011 IEEE International Symposium on Information Theory (ISIT 2011), Saint Petersburg, Russia, 2011.
BibTeX | Etiquetas:
@inproceedings{isit2011,
title = {Random-Coding Joint Source-Channel Bounds},
author = {Adria Tauste Campo and Gonzalo Vazquez-Vilar and Albert Guill\'{e}n i F\`{a}bregas and Alfonso Martinez},
year = {2011},
date = {2011-07-01},
booktitle = {2011 IEEE International Symposium on Information Theory (ISIT 2011)},
address = {Saint Petersburg, Russia},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Vazquez-Vilar, Gonzalo; López-Valcarce, Roberto; Pandharipande, Ashish
Detection diversity of multiantenna spectrum sensors Proceedings Article
En: 2011 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2011), Prague, Czech Republic, 2011.
BibTeX | Etiquetas:
@inproceedings{iccasp2011a,
title = {Detection diversity of multiantenna spectrum sensors},
author = {Gonzalo Vazquez-Vilar and Roberto L\'{o}pez-Valcarce and Ashish Pandharipande},
year = {2011},
date = {2011-05-01},
booktitle = {2011 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2011)},
address = {Prague, Czech Republic},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ramirez, David; Vazquez-Vilar, Gonzalo; López-Valcarce, Roberto; Via, Javier; Santamaria, Ignacio
Multiantenna Detection under Noise uncertainty and primary user's spatial structure Proceedings Article
En: 2011 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2011), Prague, Czech Republic, 2011.
BibTeX | Etiquetas:
@inproceedings{iccasp2011b,
title = {Multiantenna Detection under Noise uncertainty and primary user's spatial structure},
author = {David Ramirez and Gonzalo Vazquez-Vilar and Roberto L\'{o}pez-Valcarce and Javier Via and Ignacio Santamaria},
year = {2011},
date = {2011-05-01},
booktitle = {2011 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2011)},
address = {Prague, Czech Republic},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}