### 2015

Bravo-Santos, Ángel M; Djuric, Petar M

Detectors for Cooperative Mesh Networks with Decode-and-Forward Relays Artículo de revista

En: IEEE Transactions on Signal Processing, vol. 63, no 1, pp. 5–17, 2015, ISSN: 1053-587X.

Resumen | Enlaces | BibTeX | Etiquetas: Cooperative systems, Detectors, Journal, Mesh networks, Modulation, Relays, spread spectrum communication, Wireless communication

@article{Bravo-Santos2014bb,

title = {Detectors for Cooperative Mesh Networks with Decode-and-Forward Relays},

author = {\'{A}ngel M Bravo-Santos and Petar M Djuric},

url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=6928514},

doi = {10.1109/TSP.2014.2364016},

issn = {1053-587X},

year = {2015},

date = {2015-01-01},

journal = {IEEE Transactions on Signal Processing},

volume = {63},

number = {1},

pages = {5--17},

publisher = {IEEE},

abstract = {We consider mesh networks composed of groups of relaying nodes which operate in decode-andforward mode. Each node from a group relays information to all the nodes in the next group. We study these networks in two setups, one where the nodes have complete state information about the channels through which they receive the signals, and another when they only have the statistics of the channels. We derive recursive expressions for the probabilities of errors of the nodes and present several implementations of detectors used in these networks. We compare the mesh networks with multi-hop networks formed by a set of parallel sections of multiple relaying nodes. We demonstrate with numerous simulations that there are significant improvements in performance of mesh over multi-hop networks in various scenarios.},

keywords = {Cooperative systems, Detectors, Journal, Mesh networks, Modulation, Relays, spread spectrum communication, Wireless communication},

pubstate = {published},

tppubtype = {article}

}

### 2014

Santiago-Mozos, Ricardo; Perez-Cruz, Fernando; Madden, Michael; Artés-Rodríguez, Antonio

An Automated Screening System for Tuberculosis Artículo de revista

En: IEEE journal of biomedical and health informatics, vol. 18, no 3, pp. 855-862, 2014, ISSN: 2168-2208.

Resumen | Enlaces | BibTeX | Etiquetas: Automated screening, Bayesian, Decision making, Sequential analysis, Tuberculosis

@article{Santiago-Mozos2013,

title = {An Automated Screening System for Tuberculosis},

author = {Ricardo Santiago-Mozos and Fernando Perez-Cruz and Michael Madden and Antonio Art\'{e}s-Rodr\'{i}guez},

url = {http://www.tsc.uc3m.es/~antonio/papers/P47_2014_An Automated Screening System for Tuberculosis.pdf http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=6630069},

issn = {2168-2208},

year = {2014},

date = {2014-05-01},

journal = {IEEE journal of biomedical and health informatics},

volume = {18},

number = {3},

pages = {855-862},

publisher = {IEEE},

abstract = {Automated screening systems are commonly used to detect some agent in a sample and take a global decision about the subject (e.g. ill/healthy) based on these detections. We propose a Bayesian methodology for taking decisions in (sequential) screening systems that considers the false alarm rate of the detector. Our approach assesses the quality of its decisions and provides lower bounds on the achievable performance of the screening system from the training data. In addition, we develop a complete screening system for sputum smears in tuberculosis diagnosis, and show, using a real-world database, the advantages of the proposed framework when compared to the commonly used count detections and threshold approach.},

keywords = {Automated screening, Bayesian, Decision making, Sequential analysis, Tuberculosis},

pubstate = {published},

tppubtype = {article}

}

Impedovo, Sebastiano; Liu, Cheng-Lin; Impedovo, Donato; Pirlo, Giuseppe; Read, Jesse; Martino, Luca; Luengo, David

Efficient Monte Carlo Methods for Multi-Dimensional Learning with Classifier Chains Artículo de revista

En: Pattern Recognition, vol. 47, no 3, pp. 1535–1546, 2014.

Resumen | Enlaces | BibTeX | Etiquetas: Bayesian inference, Classifier chains, Monte Carlo methods, Multi-dimensional classification, Multi-label classification

@article{Impedovo2014b,

title = {Efficient Monte Carlo Methods for Multi-Dimensional Learning with Classifier Chains},

author = {Sebastiano Impedovo and Cheng-Lin Liu and Donato Impedovo and Giuseppe Pirlo and Jesse Read and Luca Martino and David Luengo},

url = {http://www.sciencedirect.com/science/article/pii/S0031320313004160},

year = {2014},

date = {2014-01-01},

journal = {Pattern Recognition},

volume = {47},

number = {3},

pages = {1535--1546},

abstract = {Multi-dimensional classification (MDC) is the supervised learning problem where an instance is associated with multiple classes, rather than with a single class, as in traditional classification problems. Since these classes are often strongly correlated, modeling the dependencies between them allows MDC methods to improve their performance \textendash at the expense of an increased computational cost. In this paper we focus on the classifier chains (CC) approach for modeling dependencies, one of the most popular and highest-performing methods for multi-label classification (MLC), a particular case of MDC which involves only binary classes (i.e., labels). The original CC algorithm makes a greedy approximation, and is fast but tends to propagate errors along the chain. Here we present novel Monte Carlo schemes, both for finding a good chain sequence and performing efficient inference. Our algorithms remain tractable for high-dimensional data sets and obtain the best predictive performance across several real data sets.},

keywords = {Bayesian inference, Classifier chains, Monte Carlo methods, Multi-dimensional classification, Multi-label classification},

pubstate = {published},

tppubtype = {article}

}

Read, Jesse; Achutegui, Katrin; Miguez, Joaquin

A Distributed Particle Filter for Nonlinear Tracking in Wireless Sensor Networks Artículo de revista

En: Signal Processing, vol. 98, pp. 121–134, 2014.

Resumen | Enlaces | BibTeX | Etiquetas: Distributed filtering, Target tracking, Wireless sensor network

@article{Read2014b,

title = {A Distributed Particle Filter for Nonlinear Tracking in Wireless Sensor Networks},

author = {Jesse Read and Katrin Achutegui and Joaquin Miguez},

url = {http://www.tsc.uc3m.es/~jmiguez/papers/P40_2014_A Distributed Particle Filter for Nonlinear Tracking in Wireless Sensor Networks.pdf

http://www.sciencedirect.com/science/article/pii/S0165168413004568},

year = {2014},

date = {2014-01-01},

journal = {Signal Processing},

volume = {98},

pages = {121--134},

abstract = {The use of distributed particle filters for tracking in sensor networks has become popular in recent years. The distributed particle filters proposed in the literature up to now are only approximations of the centralized particle filter or, if they are a proper distributed version of the particle filter, their implementation in a wireless sensor network demands a prohibitive communication capability. In this work, we propose a mathematically sound distributed particle filter for tracking in a real-world indoor wireless sensor network composed of low-power nodes. We provide formal and general descriptions of our methodology and then present the results of both real-world experiments and/or computer simulations that use models fitted with real data. With the same number of particles as a centralized filter, the distributed algorithm is over four times faster, yet our simulations show that, even assuming the same processing speed, the accuracy of the centralized and distributed algorithms is practically identical. The main limitation of the proposed scheme is the need to make all the sensor observations available to every processing node. Therefore, it is better suited to broadcast networks or multihop networks where the volume of generated data is kept low, e.g., by an adequate local pre-processing of the observations.},

keywords = {Distributed filtering, Target tracking, Wireless sensor network},

pubstate = {published},

tppubtype = {article}

}

Alvarado, Alex; Brannstrom, Fredrik; Agrell, Erik; Koch, Tobias

High-SNR Asymptotics of Mutual Information for Discrete Constellations With Applications to BICM Artículo de revista

En: IEEE Transactions on Information Theory, vol. 60, no 2, pp. 1061–1076, 2014, ISSN: 0018-9448.

Resumen | Enlaces | BibTeX | Etiquetas: additive white Gaussian noise channel, Anti-Gray code, bit-interleaved coded modulation, discrete constellations, Entropy, Gray code, high-SNR asymptotics, IP networks, Labeling, minimum-mean square error, Modulation, Mutual information, Signal to noise ratio, Vectors

@article{Alvarado2014,

title = {High-SNR Asymptotics of Mutual Information for Discrete Constellations With Applications to BICM},

author = {Alex Alvarado and Fredrik Brannstrom and Erik Agrell and Tobias Koch},

url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6671479

http://www.tsc.uc3m.es/~koch/files/IEEE_TIT_60%282%29.pdf},

issn = {0018-9448},

year = {2014},

date = {2014-01-01},

journal = {IEEE Transactions on Information Theory},

volume = {60},

number = {2},

pages = {1061--1076},

abstract = {Asymptotic expressions of the mutual information between any discrete input and the corresponding output of the scalar additive white Gaussian noise channel are presented in the limit as the signal-to-noise ratio (SNR) tends to infinity. Asymptotic expressions of the symbol-error probability (SEP) and the minimum mean-square error (MMSE) achieved by estimating the channel input given the channel output are also developed. It is shown that for any input distribution, the conditional entropy of the channel input given the output, MMSE, and SEP have an asymptotic behavior proportional to the Gaussian Q-function. The argument of the Q-function depends only on the minimum Euclidean distance (MED) of the constellation and the SNR, and the proportionality constants are functions of the MED and the probabilities of the pairs of constellation points at MED. The developed expressions are then generalized to study the high-SNR behavior of the generalized mutual information (GMI) for bit-interleaved coded modulation (BICM). By means of these asymptotic expressions, the long-standing conjecture that Gray codes are the binary labelings that maximize the BICM-GMI at high SNR is proven. It is further shown that for any equally spaced constellation whose size is a power of two, there always exists an anti-Gray code giving the lowest BICM-GMI at high SNR.},

keywords = {additive white Gaussian noise channel, Anti-Gray code, bit-interleaved coded modulation, discrete constellations, Entropy, Gray code, high-SNR asymptotics, IP networks, Labeling, minimum-mean square error, Modulation, Mutual information, Signal to noise ratio, Vectors},

pubstate = {published},

tppubtype = {article}

}

Martin-Fernandez, L; Gilioli, G; Lanzarone, E; Miguez, Joaquin; Pasquali, S; Ruggeri, F; Ruiz, D P

A Rao-Blackwellized Particle Filter for Joint Parameter Estimation and Biomass Tracking in a Stochastic Predator-Prey System Artículo de revista

En: Mathematical Biosciences and Engineering, vol. 11, no 3, pp. 573–597, 2014.

Resumen | Enlaces | BibTeX | Etiquetas:

@article{Martin-Fernandez2014,

title = {A Rao-Blackwellized Particle Filter for Joint Parameter Estimation and Biomass Tracking in a Stochastic Predator-Prey System},

author = {L Martin-Fernandez and G Gilioli and E Lanzarone and Joaquin Miguez and S Pasquali and F Ruggeri and D P Ruiz},

url = {http://www.tsc.uc3m.es/~jmiguez/papers/P42_2014_A Rao-Blackwellized Particle Filter for Joint Parameter Estimation and Biomass Tracking in a Stochastic Predator-Prey System.pdf https://www.aimsciences.org/journals/pdfs.jsp?paperID=9557\&mode=full http://gts.tsc.uc3m.es/wp-content/uploads/2014/01/LMF_et_al_MBE13_A-RAO-BLACKWELLIZED-PARTICLE-FILTER_-jma.pdf https://www.aimsciences.org/journals/displayArticlesnew.jsp?paperID=9557},

year = {2014},

date = {2014-01-01},

journal = {Mathematical Biosciences and Engineering},

volume = {11},

number = {3},

pages = {573--597},

abstract = {Functional response estimation and population tracking in predator- prey systems are critical problems in ecology. In this paper we consider a stochastic predator-prey system with a Lotka-Volterra functional response and propose a particle ltering method for: (a) estimating the behavioral parameter representing the rate of e ective search per predator in the functional response and (b) forecasting the population biomass using eld data. In particular, the proposed technique combines a sequential Monte Carlo sampling scheme for tracking the time-varying biomass with the analytical integration of the unknown behavioral parameter. In order to assess the performance of the method, we show results for both synthetic and observed data collected in an acarine predator-prey system, namely the pest mite Tetranychus urticae and the predatory mite Phytoseiulus persimilis.},

keywords = {},

pubstate = {published},

tppubtype = {article}

}

Piñeiro-Ave, José; Blanco-Velasco, Manuel; Cruz-Roldán, Fernando; Artés-Rodríguez, Antonio

Target Detection for Low Cost Uncooled MWIR Cameras Based on Empirical Mode Decomposition Artículo de revista

En: Infrared Physics &amp; Technology, vol. 63, pp. 222–231, 2014, ISSN: 13504495.

Resumen | Enlaces | BibTeX | Etiquetas: Background subtraction, Change detection, Denoising, Drift, Empirical Mode Decomposition (EMD), Intrinsic Mode Function (IMF)

@article{Pineiro-Ave2014,

title = {Target Detection for Low Cost Uncooled MWIR Cameras Based on Empirical Mode Decomposition},

author = {Jos\'{e} Pi\~{n}eiro-Ave and Manuel Blanco-Velasco and Fernando Cruz-Rold\'{a}n and Antonio Art\'{e}s-Rodr\'{i}guez},

url = {http://www.tsc.uc3m.es/~antonio/papers/P49_2014_Target Detection for Low Cost Uncooled MWIR Cameras Based on Empirical Mode Decomposition.pdf

http://www.sciencedirect.com/science/article/pii/S1350449514000085},

issn = {13504495},

year = {2014},

date = {2014-01-01},

journal = {Infrared Physics \& Technology},

volume = {63},

pages = {222--231},

abstract = {In this work, a novel method for detecting low intensity fast moving objects with low cost Medium Wavelength Infrared (MWIR) cameras is proposed. The method is based on background subtraction in a video sequence obtained with a low density Focal Plane Array (FPA) of the newly available uncooled lead selenide (PbSe) detectors. Thermal instability along with the lack of specific electronics and mechanical devices for canceling the effect of distortion make background image identification very difficult. As a result, the identification of targets is performed in low signal to noise ratio (SNR) conditions, which may considerably restrict the sensitivity of the detection algorithm. These problems are addressed in this work by means of a new technique based on the empirical mode decomposition, which accomplishes drift estimation and target detection. Given that background estimation is the most important stage for detecting, a previous denoising step enabling a better drift estimation is designed. Comparisons are conducted against a denoising technique based on the wavelet transform and also with traditional drift estimation methods such as Kalman filtering and running average. The results reported by the simulations show that the proposed scheme has superior performance.},

keywords = {Background subtraction, Change detection, Denoising, Drift, Empirical Mode Decomposition (EMD), Intrinsic Mode Function (IMF)},

pubstate = {published},

tppubtype = {article}

}

Koblents, Eugenia; Miguez, Joaquin

A Population Monte Carlo Scheme with Transformed Weights and Its Application to Stochastic Kinetic Models Artículo de revista

En: Statistics and Computing, no (to appear), 2014, ISSN: 0960-3174.

Resumen | Enlaces | BibTeX | Etiquetas: degeneracy of importance weights, Importance sampling, population Monte Carlo, Stochastic kinetic models

@article{Koblents2014bb,

title = {A Population Monte Carlo Scheme with Transformed Weights and Its Application to Stochastic Kinetic Models},

author = {Eugenia Koblents and Joaquin Miguez},

url = {http://link.springer.com/10.1007/s11222-013-9440-2 http://gts.tsc.uc3m.es/wp-content/uploads/2014/01/NPMC_A-population-Monte-Carlo-scheme-with-transformed_jma.pdf},

issn = {0960-3174},

year = {2014},

date = {2014-01-01},

journal = {Statistics and Computing},

number = {(to appear)},

abstract = {This paper addresses the Monte Carlo approximation of posterior probability distributions. In particular, we consider the population Monte Carlo (PMC) technique, which is based on an iterative importance sampling (IS) approach. An important drawback of this methodology is the degeneracy of the importance weights (IWs) when the dimension of either the observations or the variables of interest is high. To alleviate this difficulty, we propose a new method that performs a nonlinear transformation of the IWs. This operation reduces the weight variation, hence it avoids degeneracy and increases the efficiency of the IS scheme, specially when drawing from proposal functions which are poorly adapted to the true posterior. For the sake of illustration, we have applied the proposed algorithm to the estimation of the parameters of a Gaussian mixture model. This is a simple problem that enables us to discuss the main features of the proposed technique. As a practical application, we have also considered the challenging problem of estimating the rate parameters of a stochastic kinetic model (SKM). SKMs are multivariate systems that model molecular interactions in biological and chemical problems. We introduce a particularization of the proposed algorithm to SKMs and present numerical results.},

keywords = {degeneracy of importance weights, Importance sampling, population Monte Carlo, Stochastic kinetic models},

pubstate = {published},

tppubtype = {article}

}

Crisan, Dan; Miguez, Joaquin

Particle-Kernel Estimation of the Filter Density in State-Space Models Artículo de revista

En: Bernoulli, vol. (to appear, 2014.

Resumen | Enlaces | BibTeX | Etiquetas: density estimation, Markov systems., Models, Sequential Monte Carlo, state-space, stochastic filtering

@article{Crisan2014bb,

title = {Particle-Kernel Estimation of the Filter Density in State-Space Models},

author = {Dan Crisan and Joaquin Miguez},

url = {http://www.tsc.uc3m.es/~jmiguez/papers/P43_2014_Particle-Kernel Estimation of the Filter Density in State-Space Models.pdf

http://www.bernoulli-society.org/index.php/publications/bernoulli-journal/bernoulli-journal-papers},

year = {2014},

date = {2014-01-01},

journal = {Bernoulli},

volume = {(to appear},

abstract = {Sequential Monte Carlo (SMC) methods, also known as particle filters, are simulation-based recursive algorithms for the approximation of the a posteriori probability measures generated by state-space dynamical models. At any given time t, a SMC method produces a set of samples over the state space of the system of interest (often termed “particles”) that is used to build a discrete and random approximation of the posterior probability distribution of the state variables, conditional on a sequence of available observations. One potential application of the methodology is the estimation of the densities associated to the sequence of a posteriori distributions. While practitioners have rather freely applied such density approximations in the past, the issue has received less attention from a theoretical perspective. In this paper, we address the problem of constructing kernel-based estimates of the posterior probability density function and its derivatives, and obtain asymptotic convergence results for the estimation errors. In particular, we find convergence rates for the approximation errors that hold uniformly on the state space and guarantee that the error vanishes almost surely as the number of particles in the filter grows. Based on this uniform convergence result, we first show how to build continuous measures that converge almost surely (with known rate) toward the posterior measure and then address a few applications. The latter include maximum a posteriori estimation of the system state using the approximate derivatives of the posterior density and the approximation of functionals of it, e.g., Shannon’s entropy.},

keywords = {density estimation, Markov systems., Models, Sequential Monte Carlo, state-space, stochastic filtering},

pubstate = {published},

tppubtype = {article}

}

Ruiz, Francisco J R; Valera, Isabel; Blanco, Carlos; Perez-Cruz, Fernando

Bayesian Nonparametric Comorbidity Analysis of Psychiatric Disorders Artículo de revista

En: Journal of Machine Learning Research, vol. 15, no 1, pp. 1215–1248, 2014.

Resumen | Enlaces | BibTeX | Etiquetas: ALCIT, Bayesian Non-parametrics, categorical observations, Indian Buet Process, Laplace approximation, multinomial-logit function, variational inference

@article{Ruiz2014,

title = {Bayesian Nonparametric Comorbidity Analysis of Psychiatric Disorders},

author = {Francisco J R Ruiz and Isabel Valera and Carlos Blanco and Fernando Perez-Cruz},

url = {http://jmlr.org/papers/volume15/ruiz14a/ruiz14a.pdf

http://arxiv.org/abs/1401.7620},

year = {2014},

date = {2014-01-01},

journal = {Journal of Machine Learning Research},

volume = {15},

number = {1},

pages = {1215--1248},

abstract = {The analysis of comorbidity is an open and complex research field in the branch of psychiatry, where clinical experience and several studies suggest that the relation among the psychiatric disorders may have etiological and treatment implications. In this paper, we are interested in applying latent feature modeling to find the latent structure behind the psychiatric disorders that can help to examine and explain the relationships among them. To this end, we use the large amount of information collected in the National Epidemiologic Survey on Alcohol and Related Conditions (NESARC) database and propose to model these data using a nonparametric latent model based on the Indian Buffet Process (IBP). Due to the discrete nature of the data, we first need to adapt the observation model for discrete random variables. We propose a generative model in which the observations are drawn from a multinomial-logit distribution given the IBP matrix. The implementation of an efficient Gibbs sampler is accomplished using the Laplace approximation, which allows integrating out the weighting factors of the multinomial-logit likelihood model. We also provide a variational inference algorithm for this model, which provides a complementary (and less expensive in terms of computational complexity) alternative to the Gibbs sampler allowing us to deal with a larger number of data. Finally, we use the model to analyze comorbidity among the psychiatric disorders diagnosed by experts from the NESARC database.},

keywords = {ALCIT, Bayesian Non-parametrics, categorical observations, Indian Buet Process, Laplace approximation, multinomial-logit function, variational inference},

pubstate = {published},

tppubtype = {article}

}

O'Mahony, Niamh; Florentino-Liaño, Blanca; Carballo, Juan J; Baca-García, Enrique; Artés-Rodríguez, Antonio

Objective diagnosis of ADHD using IMUs Artículo de revista

En: Medical engineering &amp; physics, vol. 36, no 7, pp. 922–6, 2014, ISSN: 1873-4030.

Resumen | Enlaces | BibTeX | Etiquetas: Attention deficit/hyperactivity disorder, Classification, Inertial sensors, Machine learning, Objective diagnosis

@article{O'Mahony2014,

title = {Objective diagnosis of ADHD using IMUs},

author = {Niamh O'Mahony and Blanca Florentino-Lia\~{n}o and Juan J Carballo and Enrique Baca-Garc\'{i}a and Antonio Art\'{e}s-Rodr\'{i}guez},

url = {http://www.tsc.uc3m.es/~antonio/papers/P50_2014_Objective Diagnosis of ADHD Using IMUs.pdf

http://www.sciencedirect.com/science/article/pii/S1350453314000459},

issn = {1873-4030},

year = {2014},

date = {2014-01-01},

journal = {Medical engineering \& physics},

volume = {36},

number = {7},

pages = {922--6},

abstract = {This work proposes the use of miniature wireless inertial sensors as an objective tool for the diagnosis of ADHD. The sensors, consisting of both accelerometers and gyroscopes to measure linear and rotational movement, respectively, are used to characterize the motion of subjects in the setting of a psychiatric consultancy. A support vector machine is used to classify a group of subjects as either ADHD or non-ADHD and a classification accuracy of greater than 95% has been achieved. Separate analyses of the motion data recorded during various activities throughout the visit to the psychiatric consultancy show that motion recorded during a continuous performance test (a forced concentration task) provides a better classification performance than that recorded during "free time".},

keywords = {Attention deficit/hyperactivity disorder, Classification, Inertial sensors, Machine learning, Objective diagnosis},

pubstate = {published},

tppubtype = {article}

}

Montoya-Martinez, Jair; Artés-Rodríguez, Antonio; Pontil, Massimiliano; Hansen, Lars Kai

A Regularized Matrix Factorization Approach to Induce Structured Sparse-Low Rank Solutions in the EEG Inverse Problem Artículo de revista

En: EURASIP Journal on Advances in Signal Processing, vol. 2014, no 1, pp. 97, 2014, ISSN: 1687-6180.

Resumen | Enlaces | BibTeX | Etiquetas: Low rank, Matrix factorization, Nonsmooth-nonconvex optimization, Regularization, Structured sparsity

@article{Montoya-Martinez2014b,

title = {A Regularized Matrix Factorization Approach to Induce Structured Sparse-Low Rank Solutions in the EEG Inverse Problem},

author = {Jair Montoya-Martinez and Antonio Art\'{e}s-Rodr\'{i}guez and Massimiliano Pontil and Lars Kai Hansen},

url = {http://www.tsc.uc3m.es/~antonio/papers/P48_2014_A Regularized Matrix Factorization Approach to Induce Structured Sparse-Low Rank Solutions in the EEG Inverse Problem.pdf

http://asp.eurasipjournals.com/content/2014/1/97/abstract},

issn = {1687-6180},

year = {2014},

date = {2014-01-01},

journal = {EURASIP Journal on Advances in Signal Processing},

volume = {2014},

number = {1},

pages = {97},

publisher = {Springer},

abstract = {We consider the estimation of the Brain Electrical Sources (BES) matrix from noisy Electroencephalographic (EEG) measurements, commonly named as the EEG inverse problem. We propose a new method to induce neurophysiological meaningful solutions, which takes into account the smoothness, structured sparsity and low rank of the BES matrix. The method is based on the factorization of the BES matrix as a product of a sparse coding matrix and a dense latent source matrix. The structured sparse-low rank structure is enforced by minimizing a regularized functional that includes the l21-norm of the coding matrix and the squared Frobenius norm of the latent source matrix. We develop an alternating optimization algorithm to solve the resulting nonsmooth-nonconvex minimization problem. We analyze the convergence of the optimization procedure, and we compare, under different synthetic scenarios, the performance of our method respect to the Group Lasso and Trace Norm regularizers when they are applied directly to the target matrix.},

keywords = {Low rank, Matrix factorization, Nonsmooth-nonconvex optimization, Regularization, Structured sparsity},

pubstate = {published},

tppubtype = {article}

}

A, Pastore; Koch, Tobias; Fonollosa, Javier Rodriguez

A Rate-Splitting Approach to Fading Channels With Imperfect Channel-State Information Artículo de revista

En: IEEE Transactions on Information Theory, vol. 60, no 7, pp. 4266–4285, 2014, ISSN: 0018-9448.

Resumen | Enlaces | BibTeX | Etiquetas: channel capacity, COMONSENS, DEIPRO, Entropy, Fading, fading channels, flat fading, imperfect channel-state information, MobileNET, Mutual information, OTOSiS, Random variables, Receivers, Signal to noise ratio, Upper bound

@article{Pastore2014a,

title = {A Rate-Splitting Approach to Fading Channels With Imperfect Channel-State Information},

author = {Pastore A and Tobias Koch and Javier Rodriguez Fonollosa},

url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=6832779 http://www.tsc.uc3m.es/~koch/files/IEEE_TIT_60(7).pdf http://arxiv.org/pdf/1301.6120.pdf},

issn = {0018-9448},

year = {2014},

date = {2014-01-01},

journal = {IEEE Transactions on Information Theory},

volume = {60},

number = {7},

pages = {4266--4285},

publisher = {IEEE},

abstract = {As shown by M\'{e}dard, the capacity of fading channels with imperfect channel-state information can be lower-bounded by assuming a Gaussian channel input (X) with power (P) and by upper-bounding the conditional entropy (h(X|Y,hat {H})) by the entropy of a Gaussian random variable with variance equal to the linear minimum mean-square error in estimating (X) from ((Y,hat {H})) . We demonstrate that, using a rate-splitting approach, this lower bound can be sharpened: by expressing the Gaussian input (X) as the sum of two independent Gaussian variables (X_1) and (X_2) and by applying M\'{e}dard's lower bound first to bound the mutual information between (X_1) and (Y) while treating (X_2) as noise, and by applying it a second time to the mutual information between (X_2) and (Y) while assuming (X_1) to be known, we obtain a capacity lower bound that is strictly larger than M\'{e}dard's lower bound. We then generalize this approach to an arbi- rary number (L) of layers, where (X) is expressed as the sum of (L) independent Gaussian random variables of respective variances (P_ell ) , (ell = 1,dotsc ,L) summing up to (P) . Among all such rate-splitting bounds, we determine the supremum over power allocations (P_ell ) and total number of layers (L) . This supremum is achieved for (L rightarrow infty ) and gives rise to an analytically expressible capacity lower bound. For Gaussian fading, this novel bound is shown to converge to the Gaussian-input mutual information as the signal-to-noise ratio (SNR) grows, provided that the variance of the channel estimation error (H-hat {H}) tends to zero as the SNR tends to infinity.},

keywords = {channel capacity, COMONSENS, DEIPRO, Entropy, Fading, fading channels, flat fading, imperfect channel-state information, MobileNET, Mutual information, OTOSiS, Random variables, Receivers, Signal to noise ratio, Upper bound},

pubstate = {published},

tppubtype = {article}

}

Campo, Adria Tauste; Vazquez-Vilar, Gonzalo; i Fàbregas, Albert Guillén; Koch, Tobias; Martinez, Alfonso

A Derivation of the Source-Channel Error Exponent Using Nonidentical Product Distributions Artículo de revista

En: IEEE Transactions on Information Theory, vol. 60, no 6, pp. 3209–3217, 2014, ISSN: 0018-9448.

Resumen | Enlaces | BibTeX | Etiquetas: ALCIT, Channel Coding, COMONSENS, DEIPRO, error probability, joint source-channel coding, Joints, MobileNET, Probability distribution, product distributions, random coding, Reliability, reliability function, sphere-packing bound, Upper bound

@article{TausteCampo2014,

title = {A Derivation of the Source-Channel Error Exponent Using Nonidentical Product Distributions},

author = {Adria Tauste Campo and Gonzalo Vazquez-Vilar and Albert Guill\'{e}n i F\`{a}bregas and Tobias Koch and Alfonso Martinez},

url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=6803047 http://www.tsc.uc3m.es/~koch/files/IEEE_TIT_60(6).pdf},

issn = {0018-9448},

year = {2014},

date = {2014-01-01},

journal = {IEEE Transactions on Information Theory},

volume = {60},

number = {6},

pages = {3209--3217},

publisher = {IEEE},

abstract = {This paper studies the random-coding exponent of joint source-channel coding for a scheme where source messages are assigned to disjoint subsets (referred to as classes), and codewords are independently generated according to a distribution that depends on the class index of the source message. For discrete memoryless systems, two optimally chosen classes and product distributions are found to be sufficient to attain the sphere-packing exponent in those cases where it is tight.},

keywords = {ALCIT, Channel Coding, COMONSENS, DEIPRO, error probability, joint source-channel coding, Joints, MobileNET, Probability distribution, product distributions, random coding, Reliability, reliability function, sphere-packing bound, Upper bound},

pubstate = {published},

tppubtype = {article}

}

Cespedes, Javier; Olmos, Pablo M; Sanchez-Fernandez, Matilde; Perez-Cruz, Fernando

Expectation Propagation Detection for High-order High-dimensional MIMO Systems Artículo de revista

En: IEEE Transactions on Communications, vol. PP, no 99, pp. 1–1, 2014, ISSN: 0090-6778.

Resumen | Enlaces | BibTeX | Etiquetas: Approximation methods, computational complexity, Detectors, MIMO, Signal to noise ratio, Vectors

@article{Cespedes2014,

title = {Expectation Propagation Detection for High-order High-dimensional MIMO Systems},

author = {Javier Cespedes and Pablo M Olmos and Matilde Sanchez-Fernandez and Fernando Perez-Cruz},

url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6841617},

issn = {0090-6778},

year = {2014},

date = {2014-01-01},

journal = {IEEE Transactions on Communications},

volume = {PP},

number = {99},

pages = {1--1},

abstract = {Modern communications systems use multiple-input multiple-output (MIMO) and high-order QAM constellations for maximizing spectral efficiency. However, as the number of antennas and the order of the constellation grow, the design of efficient and low-complexity MIMO receivers possesses big technical challenges. For example, symbol detection can no longer rely on maximum likelihood detection or sphere-decoding methods, as their complexity increases exponentially with the number of transmitters/receivers. In this paper, we propose a low-complexity high-accuracy MIMO symbol detector based on the Expectation Propagation (EP) algorithm. EP allows approximating iteratively at polynomial-time the posterior distribution of the transmitted symbols. We also show that our EP MIMO detector outperforms classic and state-of-the-art solutions reducing the symbol error rate at a reduced computational complexity.},

keywords = {Approximation methods, computational complexity, Detectors, MIMO, Signal to noise ratio, Vectors},

pubstate = {published},

tppubtype = {article}

}

Read, Jesse; Bielza, Concha; Larranaga, Pedro

Multi-Dimensional Classification with Super-Classes Artículo de revista

En: IEEE Transactions on Knowledge and Data Engineering, vol. 26, no 7, pp. 1720–1733, 2014, ISSN: 1041-4347.

Resumen | Enlaces | BibTeX | Etiquetas: Accuracy, Bayes methods, Classification, COMPRHENSION, conditional dependence, Context, core goals, data instance, evaluation metrics, Integrated circuit modeling, modeling class dependencies, multi-dimensional, Multi-dimensional classification, multidimensional classification problem, multidimensional datasets, multidimensional learners, multilabel classification, multilabel research, multiple class variables, ordinary class, pattern classification, problem transformation, recently-popularized task, super classes, super-class partitions, tractable running time, Training, Vectors

@article{Read2014bb,

title = {Multi-Dimensional Classification with Super-Classes},

author = {Jesse Read and Concha Bielza and Pedro Larranaga},

url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=6648319},

issn = {1041-4347},

year = {2014},

date = {2014-01-01},

journal = {IEEE Transactions on Knowledge and Data Engineering},

volume = {26},

number = {7},

pages = {1720--1733},

publisher = {IEEE},

abstract = {The multi-dimensional classification problem is a generalisation of the recently-popularised task of multi-label classification, where each data instance is associated with multiple class variables. There has been relatively little research carried out specific to multi-dimensional classification and, although one of the core goals is similar (modelling dependencies among classes), there are important differences; namely a higher number of possible classifications. In this paper we present method for multi-dimensional classification, drawing from the most relevant multi-label research, and combining it with important novel developments. Using a fast method to model the conditional dependence between class variables, we form super-class partitions and use them to build multi-dimensional learners, learning each super-class as an ordinary class, and thus explicitly modelling class dependencies. Additionally, we present a mechanism to deal with the many class values inherent to super-classes, and thus make learning efficient. To investigate the effectiveness of this approach we carry out an empirical evaluation on a range of multi-dimensional datasets, under different evaluation metrics, and in comparison with high-performing existing multi-dimensional approaches from the literature. Analysis of results shows that our approach offers important performance gains over competing methods, while also exhibiting tractable running time.},

keywords = {Accuracy, Bayes methods, Classification, COMPRHENSION, conditional dependence, Context, core goals, data instance, evaluation metrics, Integrated circuit modeling, modeling class dependencies, multi-dimensional, Multi-dimensional classification, multidimensional classification problem, multidimensional datasets, multidimensional learners, multilabel classification, multilabel research, multiple class variables, ordinary class, pattern classification, problem transformation, recently-popularized task, super classes, super-class partitions, tractable running time, Training, Vectors},

pubstate = {published},

tppubtype = {article}

}

Salamanca, Luis; Murillo-Fuentes, Juan José; Olmos, Pablo M; Perez-Cruz, Fernando; Verdu, Sergio

Near DT Bound Achieving Linear Codes in the Short Blocklength Regime Artículo de revista

En: IEEE Communications Letters, vol. PP, no 99, pp. 1–1, 2014, ISSN: 1089-7798.

Resumen | Enlaces | BibTeX | Etiquetas: binary erasure channel, Channel Coding, Complexity theory, finite blocklength regime, LDPC codes, Maximum likelihood decoding, ML decoding, parity check codes, random coding

@article{Salamanca2014bb,

title = {Near DT Bound Achieving Linear Codes in the Short Blocklength Regime},

author = {Luis Salamanca and Juan Jos\'{e} Murillo-Fuentes and Pablo M Olmos and Fernando Perez-Cruz and Sergio Verdu},

url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6957577},

issn = {1089-7798},

year = {2014},

date = {2014-01-01},

journal = {IEEE Communications Letters},

volume = {PP},

number = {99},

pages = {1--1},

abstract = {The dependence-testing (DT) bound is one of the strongest achievability bounds for the binary erasure channel (BEC) in the finite block length regime. In this paper, we show that maximum likelihood decoded regular low-density paritycheck (LDPC) codes with at least 5 ones per column almost achieve the DT bound. Specifically, using quasi-regular LDPC codes with block length of 256 bits, we achieve a rate that is less than 1% away from the rate predicted by the DT bound for a word error rate below 103. The results also indicate that the maximum-likelihood solution is computationally feasible for decoding block codes over the BEC with several hundred bits.},

keywords = {binary erasure channel, Channel Coding, Complexity theory, finite blocklength regime, LDPC codes, Maximum likelihood decoding, ML decoding, parity check codes, random coding},

pubstate = {published},

tppubtype = {article}

}

Taborda, Camilo G; Guo, Dongning; Perez-Cruz, Fernando

Information--Estimation Relationships over Binomial and Negative Binomial Models Artículo de revista

En: IEEE Transactions on Information Theory, vol. to appear, pp. 1–1, 2014, ISSN: 0018-9448.

Resumen | Enlaces | BibTeX | Etiquetas: ALCIT

@article{GilTaborda2014,

title = {Information--Estimation Relationships over Binomial and Negative Binomial Models},

author = {Camilo G Taborda and Dongning Guo and Fernando Perez-Cruz},

url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=6746122},

issn = {0018-9448},

year = {2014},

date = {2014-01-01},

journal = {IEEE Transactions on Information Theory},

volume = {to appear},

pages = {1--1},

publisher = {IEEE},

abstract = {In recent years, a number of new connections between information measures and estimation have been found under various models, including, predominantly, Gaussian and Poisson models. This paper develops similar results for the binomial and negative binomial models. In particular, it is shown that the derivative of the relative entropy and the derivative of the mutual information for the binomial and negative binomial models can be expressed through the expectation of closed-form expressions that have conditional estimates as the main argument. Under mild conditions, those derivatives take the form of an expected Bregman divergence},

keywords = {ALCIT},

pubstate = {published},

tppubtype = {article}

}

Yang, Wei; Durisi, Giuseppe; Koch, Tobias; Polyanskiy, Yury

Quasi-Static Multiple-Antenna Fading Channels at Finite Blocklength Artículo de revista

En: IEEE Transactions on Information Theory, vol. 60, no 7, pp. 4232–4265, 2014, ISSN: 0018-9448.

Resumen | Enlaces | BibTeX | Etiquetas: channel dispersion, Decoding, error probability, finite blocklength regime, MIMO, MIMO channel, outage probability, quasi-static fading channel, Rayleigh channels, Receivers, Transmitters

@article{Yang2014bb,

title = {Quasi-Static Multiple-Antenna Fading Channels at Finite Blocklength},

author = {Wei Yang and Giuseppe Durisi and Tobias Koch and Yury Polyanskiy},

url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=6802432 http://arxiv.org/abs/1311.2012},

issn = {0018-9448},

year = {2014},

date = {2014-01-01},

journal = {IEEE Transactions on Information Theory},

volume = {60},

number = {7},

pages = {4232--4265},

publisher = {IEEE},

abstract = {This paper investigates the maximal achievable rate for a given blocklength and error probability over quasi-static multiple-input multiple-output fading channels, with and without channel state information at the transmitter and/or the receiver. The principal finding is that outage capacity, despite being an asymptotic quantity, is a sharp proxy for the finite-blocklength fundamental limits of slow-fading channels. Specifically, the channel dispersion is shown to be zero regardless of whether the fading realizations are available at both transmitter and receiver, at only one of them, or at neither of them. These results follow from analytically tractable converse and achievability bounds. Numerical evaluation of these bounds verifies that zero dispersion may indeed imply fast convergence to the outage capacity as the blocklength increases. In the example of a particular 1 $,times,$ 2 single-input multiple-output Rician fading channel, the blocklength required to achieve 90% of capacity is about an order of magnitude smaller compared with the blocklength required for an AWGN channel with the same capacity. For this specific scenario, the coding/decoding schemes adopted in the LTE-Advanced standard are benchmarked against the finite-blocklength achievability and converse bounds.},

keywords = {channel dispersion, Decoding, error probability, finite blocklength regime, MIMO, MIMO channel, outage probability, quasi-static fading channel, Rayleigh channels, Receivers, Transmitters},

pubstate = {published},

tppubtype = {article}

}

### 2013

Olmos, Pablo M; Murillo-Fuentes, Juan Jose; Perez-Cruz, Fernando

Tree-Structure Expectation Propagation for LDPC Decoding Over the BEC Artículo de revista

En: IEEE Transactions on Information Theory, vol. 59, no 6, pp. 3354–3377, 2013, ISSN: 0018-9448.

Resumen | Enlaces | BibTeX | Etiquetas: Algorithm design and analysis, Approximation algorithms, Approximation methods, BEC, belief propagation, Belief-propagation (BP), binary erasure channel, Complexity theory, decode low-density parity-check codes, Decoding, discrete memoryless channels, expectation propagation, finite-length analysis, LDPC codes, LDPC decoding, parity check codes, peeling-type algorithm, Probability density function, random graph evolution, Tanner graph, tree-structure expectation propagation

@article{Olmos2013b,

title = {Tree-Structure Expectation Propagation for LDPC Decoding Over the BEC},

author = {Pablo M Olmos and Juan Jose Murillo-Fuentes and Fernando Perez-Cruz},

url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6451276},

issn = {0018-9448},

year = {2013},

date = {2013-01-01},

journal = {IEEE Transactions on Information Theory},

volume = {59},

number = {6},

pages = {3354--3377},

abstract = {We present the tree-structure expectation propagation (Tree-EP) algorithm to decode low-density parity-check (LDPC) codes over discrete memoryless channels (DMCs). Expectation propagation generalizes belief propagation (BP) in two ways. First, it can be used with any exponential family distribution over the cliques in the graph. Second, it can impose additional constraints on the marginal distributions. We use this second property to impose pairwise marginal constraints over pairs of variables connected to a check node of the LDPC code's Tanner graph. Thanks to these additional constraints, the Tree-EP marginal estimates for each variable in the graph are more accurate than those provided by BP. We also reformulate the Tree-EP algorithm for the binary erasure channel (BEC) as a peeling-type algorithm (TEP) and we show that the algorithm has the same computational complexity as BP and it decodes a higher fraction of errors. We describe the TEP decoding process by a set of differential equations that represents the expected residual graph evolution as a function of the code parameters. The solution of these equations is used to predict the TEP decoder performance in both the asymptotic regime and the finite-length regimes over the BEC. While the asymptotic threshold of the TEP decoder is the same as the BP decoder for regular and optimized codes, we propose a scaling law for finite-length LDPC codes, which accurately approximates the TEP improved performance and facilitates its optimization.},

keywords = {Algorithm design and analysis, Approximation algorithms, Approximation methods, BEC, belief propagation, Belief-propagation (BP), binary erasure channel, Complexity theory, decode low-density parity-check codes, Decoding, discrete memoryless channels, expectation propagation, finite-length analysis, LDPC codes, LDPC decoding, parity check codes, peeling-type algorithm, Probability density function, random graph evolution, Tanner graph, tree-structure expectation propagation},

pubstate = {published},

tppubtype = {article}

}

Asheghan, Mohammad Mostafa; Miguez, Joaquin

Robust Global Synchronization of two Complex Dynamical Networks Artículo de revista

En: Chaos (Woodbury, N.Y.), vol. 23, no 2, pp. 023108, 2013, ISSN: 1089-7682.

Resumen | Enlaces | BibTeX | Etiquetas:

@article{Asheghan2013,

title = {Robust Global Synchronization of two Complex Dynamical Networks},

author = {Mohammad Mostafa Asheghan and Joaquin Miguez},

url = {http://www.tsc.uc3m.es/~jmiguez/papers/P38_2013_Robust Global Synchronization of two Complex Dynamical Networks.pdf

http://www.researchgate.net/publication/245026922_Robust_global_synchronization_of_two_complex_dynamical_networks},

issn = {1089-7682},

year = {2013},

date = {2013-01-01},

journal = {Chaos (Woodbury, N.Y.)},

volume = {23},

number = {2},

pages = {023108},

abstract = {We investigate the synchronization of two coupled complex dynamical networks, a problem that has been termed outer synchronization in the literature. Our approach relies on (a) a basic lemma on the eigendecomposition of matrices resulting from Kronecker products and (b) a suitable choice of Lyapunov function related to the synchronization error dynamics. Starting from these two ingredients, a theorem that provides a sufficient condition for outer synchronization of the networks is proved. The condition in the theorem is expressed as a linear matrix inequality. When satisfied, synchronization is guaranteed to occur globally, i.e., independently of the initial conditions of the networks. The argument of the proof includes the design of the gain of the synchronizer, which is a constant square matrix with dimension dependent on the number of dynamic variables in a single network node, but independent of the size of the overall network, which can be much larger. This basic result is subsequently elaborated to simplify the design of the synchronizer, to avoid unnecessarily restrictive assumptions (e.g., diffusivity) on the coupling matrix that defines the topology of the networks and, finally, to obtain synchronizers that are robust to model errors in the parameters of the coupled networks. An illustrative numerical example for the outer synchronization of two networks of classical Lorenz nodes with perturbed parameters is presented.},

keywords = {},

pubstate = {published},

tppubtype = {article}

}

Jingshan, Zhong; Dauwels, Justin; Vazquez, Manuel A; Waller, Laura

Sparse ACEKF for Phase Reconstruction. Artículo de revista

En: Optics express, vol. 21, no 15, pp. 18125–37, 2013, ISSN: 1094-4087.

Resumen | Enlaces | BibTeX | Etiquetas: Image reconstruction techniques, Phase retrieval

@article{Jingshan2013,

title = {Sparse ACEKF for Phase Reconstruction.},

author = {Zhong Jingshan and Justin Dauwels and Manuel A Vazquez and Laura Waller},

url = {http://www.opticsinfobase.org/viewmedia.cfm?uri=oe-21-15-18125\&seq=0\&html=true},

issn = {1094-4087},

year = {2013},

date = {2013-01-01},

journal = {Optics express},

volume = {21},

number = {15},

pages = {18125--37},

publisher = {Optical Society of America},

abstract = {We propose a novel low-complexity recursive filter to efficiently recover quantitative phase from a series of noisy intensity images taken through focus. We first transform the wave propagation equation and nonlinear observation model (intensity measurement) into a complex augmented state space model. From the state space model, we derive a sparse augmented complex extended Kalman filter (ACEKF) to infer the complex optical field (amplitude and phase), and find that it converges under mild conditions. Our proposed method has a computational complexity of N(z)N logN and storage requirement of O(N), compared with the original ACEKF method, which has a computational complexity of O(NzN(3)) and storage requirement of O(N(2)), where Nz is the number of images and N is the number of pixels in each image. Thus, it is efficient, robust and recursive, and may be feasible for real-time phase recovery applications with high resolution images.},

keywords = {Image reconstruction techniques, Phase retrieval},

pubstate = {published},

tppubtype = {article}

}

Salamanca, Luis; Olmos, Pablo M; Perez-Cruz, Fernando; Murillo-Fuentes, Juan Jose

Tree-Structured Expectation Propagation for LDPC Decoding over BMS Channels Artículo de revista

En: IEEE Transactions on Communications, vol. 61, no 10, pp. 4086–4095, 2013, ISSN: 0090-6778.

Resumen | Enlaces | BibTeX | Etiquetas: Approximation algorithms, Approximation methods, BEC, belief propagation, binary erasure channel, binary memoryless symmetric channels, BMS channels, Channel Coding, Complexity theory, convolutional codes, convolutional low-density parity-check codes, Decoding, decoding block, expectation propagation, finite-length codes, LDPC decoding, message-passing algorithm, parity check codes, Probability density function, sparse linear codes, TEP algorithm, tree-structured expectation propagation, trees (mathematics), Vegetation

@article{Salamanca2013a,

title = {Tree-Structured Expectation Propagation for LDPC Decoding over BMS Channels},

author = {Luis Salamanca and Pablo M Olmos and Fernando Perez-Cruz and Juan Jose Murillo-Fuentes},

url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6587624},

issn = {0090-6778},

year = {2013},

date = {2013-01-01},

journal = {IEEE Transactions on Communications},

volume = {61},

number = {10},

pages = {4086--4095},

abstract = {In this paper, we put forward the tree-structured expectation propagation (TEP) algorithm for decoding block and convolutional low-density parity-check codes over any binary channel. We have already shown that TEP improves belief propagation (BP) over the binary erasure channel (BEC) by imposing marginal constraints over a set of pairs of variables that form a tree or a forest. The TEP decoder is a message-passing algorithm that sequentially builds a tree/forest of erased variables to capture additional information disregarded by the standard BP decoder, which leads to a noticeable reduction of the error rate for finite-length codes. In this paper, we show how the TEP can be extended to any channel, specifically to binary memoryless symmetric (BMS) channels. We particularly focus on how the TEP algorithm can be adapted for any channel model and, more importantly, how to choose the tree/forest to keep the gains observed for block and convolutional LDPC codes over the BEC.},

keywords = {Approximation algorithms, Approximation methods, BEC, belief propagation, binary erasure channel, binary memoryless symmetric channels, BMS channels, Channel Coding, Complexity theory, convolutional codes, convolutional low-density parity-check codes, Decoding, decoding block, expectation propagation, finite-length codes, LDPC decoding, message-passing algorithm, parity check codes, Probability density function, sparse linear codes, TEP algorithm, tree-structured expectation propagation, trees (mathematics), Vegetation},

pubstate = {published},

tppubtype = {article}

}

Valera, Isabel; Sieskul, Bamrung; Miguez, Joaquin

On the Maximum Likelihood Estimation of the ToA Under an Imperfect Path Loss Exponent Artículo de revista

En: EURASIP Journal on Wireless Communications and Networking, vol. 2013, no 1, pp. 158, 2013, ISSN: 1687-1499.

Resumen | Enlaces | BibTeX | Etiquetas: Maximum likelihood estimator, Path loss exponent, Time-of-arrival estimation

@article{Valera2013,

title = {On the Maximum Likelihood Estimation of the ToA Under an Imperfect Path Loss Exponent},

author = {Isabel Valera and Bamrung Sieskul and Joaquin Miguez},

url = {http://www.tsc.uc3m.es/~jmiguez/papers/P37_2013_On the Maximum Likelihood Estimation of the ToA Under an Imperfect Path Loss Exponent.pdf

http://jwcn.eurasipjournals.com/content/2013/1/158},

issn = {1687-1499},

year = {2013},

date = {2013-01-01},

journal = {EURASIP Journal on Wireless Communications and Networking},

volume = {2013},

number = {1},

pages = {158},

publisher = {Springer},

abstract = {We investigate the estimation of the time of arrival (ToA) of a radio signal transmitted over a flat-fading channel. The path attenuation is assumed to depend only on the transmitter-receiver distance and the path loss exponent (PLE) which, in turn, depends on the physical environment. All previous approaches to the problem either assume that the PLE is perfectly known or rely on estimators of the ToA which do not depend on the PLE. In this paper, we introduce a novel analysis of the performance of the maximum likelihood (ML) estimator of the ToA under an imperfect knowledge of the PLE. Specifically, we carry out a Taylor series expansion that approximates the bias and the root mean square error of the ML estimator in closed form as a function of the PLE error. The analysis is first carried out for a path loss model in which the received signal gain depends only on the PLE and the transmitter-receiver distance. Then, we extend the obtained results to account also for shadow fading scenarios. Our computer simulations show that this approximate analysis is accurate when the signal-to-noise ratio (SNR) of the received signal is medium to high. A simple Monte Carlo method based on the analysis is also proposed. This technique is computationally efficient and yields a better approximation of the ML estimator in the low SNR region. The obtained analytical (and Monte Carlo) approximations can be useful at the design stage of wireless communication and localization systems.},

keywords = {Maximum likelihood estimator, Path loss exponent, Time-of-arrival estimation},

pubstate = {published},

tppubtype = {article}

}

Koch, Tobias; Lapidoth, Amos

At Low SNR, Asymmetric Quantizers are Better Artículo de revista

En: IEEE Transactions on Information Theory, vol. 59, no 9, pp. 5421–5445, 2013, ISSN: 0018-9448.

Resumen | Enlaces | BibTeX | Etiquetas: 1-bit quantizer, asymmetric signaling constellation, asymmetric threshold quantizers, asymptotic power loss, Capacity per unit energy, channel capacity, discrete-time Gaussian channel, flash-signaling input distribution, Gaussian channel, Gaussian channels, low signal-to-noise ratio (SNR), quantisation (signal), quantization, Rayleigh channels, Rayleigh-fading channel, signal-to-noise ratio, SNR, spectral efficiency

@article{Koch2013,

title = {At Low SNR, Asymmetric Quantizers are Better},

author = {Tobias Koch and Amos Lapidoth},

url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6545291},

issn = {0018-9448},

year = {2013},

date = {2013-01-01},

journal = {IEEE Transactions on Information Theory},

volume = {59},

number = {9},

pages = {5421--5445},

abstract = {We study the capacity of the discrete-time Gaussian channel when its output is quantized with a 1-bit quantizer. We focus on the low signal-to-noise ratio (SNR) regime, where communication at very low spectral efficiencies takes place. In this regime, a symmetric threshold quantizer is known to reduce channel capacity by a factor of 2/$pi$, i.e., to cause an asymptotic power loss of approximately 2 dB. Here, it is shown that this power loss can be avoided by using asymmetric threshold quantizers and asymmetric signaling constellations. To avoid this power loss, flash-signaling input distributions are essential. Consequently, 1-bit output quantization of the Gaussian channel reduces spectral efficiency. Threshold quantizers are not only asymptotically optimal: at every fixed SNR, a threshold quantizer maximizes capacity among all 1-bit output quantizers. The picture changes on the Rayleigh-fading channel. In the noncoherent case, a 1-bit output quantizer causes an unavoidable low-SNR asymptotic power loss. In the coherent case, however, this power loss is avoidable provided that we allow the quantizer to depend on the fading level.},

keywords = {1-bit quantizer, asymmetric signaling constellation, asymmetric threshold quantizers, asymptotic power loss, Capacity per unit energy, channel capacity, discrete-time Gaussian channel, flash-signaling input distribution, Gaussian channel, Gaussian channels, low signal-to-noise ratio (SNR), quantisation (signal), quantization, Rayleigh channels, Rayleigh-fading channel, signal-to-noise ratio, SNR, spectral efficiency},

pubstate = {published},

tppubtype = {article}

}

Vazquez, Manuel A; Miguez, Joaquin

User Activity Tracking in DS-CDMA Systems Artículo de revista

En: IEEE Transactions on Vehicular Technology, vol. 62, no 7, pp. 3188–3203, 2013, ISSN: 0018-9545.

Resumen | Enlaces | BibTeX | Etiquetas: Activity detection, activity tracking, Bayes methods, Bayesian framework, Channel estimation, code division multiple access, code-division multiple access (CDMA), computer simulations, data detection, direct sequence code division multiple-access, DS-CDMA systems, Equations, joint channel and data estimation, joint channel estimation, Joints, MAP equalizers, Mathematical model, maximum a posteriori, MIMO communication, Multiaccess communication, multiple-input-multiple-output communication chann, multiuser communication systems, per-survivor processing (PSP), radio receivers, Receivers, sequential Monte Carlo (SMC) methods, time-varying number, time-varying parameter, Vectors, wireless channels

@article{Vazquez2013a,

title = {User Activity Tracking in DS-CDMA Systems},

author = {Manuel A Vazquez and Joaquin Miguez},

url = {http://www.tsc.uc3m.es/~jmiguez/papers/P39_2013_User Activity Tracking in DS-CDMA Systems.pdf http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6473922},

issn = {0018-9545},

year = {2013},

date = {2013-01-01},

journal = {IEEE Transactions on Vehicular Technology},

volume = {62},

number = {7},

pages = {3188--3203},

abstract = {In modern multiuser communication systems, users are allowed to enter or leave the system at any given time. Thus, the number of active users is an unknown and time-varying parameter, and the performance of the system depends on how accurately this parameter is estimated over time. The so-called problem of user identification, which consists of determining the number and identities of users transmitting in a communication system, is usually solved prior to, and hence independently of, that posed by the detection of the transmitted data. Since both problems are tightly connected, a joint solution is desirable. In this paper, we focus on direct-sequence (DS) code-division multiple-access (CDMA) systems and derive, within a Bayesian framework, different receivers that cope with an unknown and time-varying number of users while performing joint channel estimation and data detection. The main feature of these receivers, compared with other recently proposed schemes for user activity detection, is that they are natural extensions of existing maximum a posteriori (MAP) equalizers for multiple-input-multiple-output communication channels. We assess the validity of the proposed receivers, including their reliability in detecting the number and identities of active users, by way of computer simulations.},

keywords = {Activity detection, activity tracking, Bayes methods, Bayesian framework, Channel estimation, code division multiple access, code-division multiple access (CDMA), computer simulations, data detection, direct sequence code division multiple-access, DS-CDMA systems, Equations, joint channel and data estimation, joint channel estimation, Joints, MAP equalizers, Mathematical model, maximum a posteriori, MIMO communication, Multiaccess communication, multiple-input-multiple-output communication chann, multiuser communication systems, per-survivor processing (PSP), radio receivers, Receivers, sequential Monte Carlo (SMC) methods, time-varying number, time-varying parameter, Vectors, wireless channels},

pubstate = {published},

tppubtype = {article}

}

Bravo-Santos, Ángel M

Polar Codes for Gaussian Degraded Relay Channels Artículo de revista

En: IEEE Communications Letters, vol. 17, no 2, pp. 365–368, 2013, ISSN: 1089-7798.

Resumen | Enlaces | BibTeX | Etiquetas: channel capacity, Channel Coding, Decoding, Encoding, Gaussian channels, Gaussian degraded relay channel, Gaussian noise, Gaussian-degraded relay channels, log-likelihood expression, Markov coding, Noise, parity check codes, polar code detector, polar codes, relay-destination link, Relays, Vectors

@article{Bravo-Santos2013,

title = {Polar Codes for Gaussian Degraded Relay Channels},

author = {\'{A}ngel M Bravo-Santos},

url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=6412681},

issn = {1089-7798},

year = {2013},

date = {2013-01-01},

journal = {IEEE Communications Letters},

volume = {17},

number = {2},

pages = {365--368},

publisher = {IEEE},

abstract = {In this paper we apply polar codes for the Gaussian degraded relay channel. We study the conditions to be satisfied by the codes and provide an efficient method for constructing them. The relay-destination link is special because the noise is the sum of two components: the Gaussian noise and the signals from the source. We study this link and provide the log-likelihood expression to be used by the polar code detector. We perform simulations of the channel and the results show that polar codes of high rate and large codeword length are closer to the theoretical limit than other good codes.},

keywords = {channel capacity, Channel Coding, Decoding, Encoding, Gaussian channels, Gaussian degraded relay channel, Gaussian noise, Gaussian-degraded relay channels, log-likelihood expression, Markov coding, Noise, parity check codes, polar code detector, polar codes, relay-destination link, Relays, Vectors},

pubstate = {published},

tppubtype = {article}

}

Perez-Cruz, Fernando; Vaerenbergh, Steven Van; Murillo-Fuentes, Juan Jose; Lazaro-Gredilla, Miguel; Santamaria, Ignacio

Gaussian Processes for Nonlinear Signal Processing: An Overview of Recent Advances Artículo de revista

En: IEEE Signal Processing Magazine, vol. 30, no 4, pp. 40–50, 2013, ISSN: 1053-5888.

Resumen | Enlaces | BibTeX | Etiquetas: adaptive algorithm, Adaptive algorithms, classification scenario, Gaussian processes, Learning systems, Machine learning, Noise measurement, nonGaussian noise model, Nonlinear estimation, nonlinear estimation problem, nonlinear signal processing, optimal Wiener filtering, recursive algorithm, Signal processing, Wiener filters, wireless digital communication

@article{Perez-Cruz2013,

title = {Gaussian Processes for Nonlinear Signal Processing: An Overview of Recent Advances},

author = {Fernando Perez-Cruz and Steven Van Vaerenbergh and Juan Jose Murillo-Fuentes and Miguel Lazaro-Gredilla and Ignacio Santamaria},

url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6530761},

issn = {1053-5888},

year = {2013},

date = {2013-01-01},

journal = {IEEE Signal Processing Magazine},

volume = {30},

number = {4},

pages = {40--50},

abstract = {Gaussian processes (GPs) are versatile tools that have been successfully employed to solve nonlinear estimation problems in machine learning but are rarely used in signal processing. In this tutorial, we present GPs for regression as a natural nonlinear extension to optimal Wiener filtering. After establishing their basic formulation, we discuss several important aspects and extensions, including recursive and adaptive algorithms for dealing with nonstationarity, low-complexity solutions, non-Gaussian noise models, and classification scenarios. Furthermore, we provide a selection of relevant applications to wireless digital communications.},

keywords = {adaptive algorithm, Adaptive algorithms, classification scenario, Gaussian processes, Learning systems, Machine learning, Noise measurement, nonGaussian noise model, Nonlinear estimation, nonlinear estimation problem, nonlinear signal processing, optimal Wiener filtering, recursive algorithm, Signal processing, Wiener filters, wireless digital communication},

pubstate = {published},

tppubtype = {article}

}

Salamanca, Luis; Olmos, Pablo M; Murillo-Fuentes, Juan Jose; Perez-Cruz, Fernando

Tree Expectation Propagation for ML Decoding of LDPC Codes over the BEC Artículo de revista

En: IEEE Transactions on Communications, vol. 61, no 2, pp. 465–473, 2013, ISSN: 0090-6778.

Resumen | Enlaces | BibTeX | Etiquetas: approximate inference, Approximation algorithms, Approximation methods, BEC, binary codes, binary erasure channel, code graph, Complexity theory, equivalent complexity, Gaussian elimination method, Gaussian processes, generalized tree-structured expectation propagatio, graphical message-passing procedure, graphical models, LDPC codes, Maximum likelihood decoding, maximum likelihood solution, ML decoding, parity check codes, peeling decoder, tree expectation propagation, tree graph, Tree graphs, tree-structured expectation propagation, tree-structured expectation propagation decoder, trees (mathematics)

@article{Salamanca2013b,

title = {Tree Expectation Propagation for ML Decoding of LDPC Codes over the BEC},

author = {Luis Salamanca and Pablo M Olmos and Juan Jose Murillo-Fuentes and Fernando Perez-Cruz},

url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6384612},

issn = {0090-6778},

year = {2013},

date = {2013-01-01},

journal = {IEEE Transactions on Communications},

volume = {61},

number = {2},

pages = {465--473},

abstract = {We propose a decoding algorithm for LDPC codes that achieves the maximum likelihood (ML) solution over the binary erasure channel (BEC). In this channel, the tree-structured expectation propagation (TEP) decoder improves the peeling decoder (PD) by processing check nodes of degree one and two. However, it does not achieve the ML solution, as the tree structure of the TEP allows only for approximate inference. In this paper, we provide the procedure to construct the structure needed for exact inference. This algorithm, denoted as generalized tree-structured expectation propagation (GTEP), modifies the code graph by recursively eliminating any check node and merging this information in the remaining graph. The GTEP decoder upon completion either provides the unique ML solution or a tree graph in which the number of parent nodes indicates the multiplicity of the ML solution. We also explain the algorithm as a Gaussian elimination method, relating the GTEP to other ML solutions. Compared to previous approaches, it presents an equivalent complexity, it exhibits a simpler graphical message-passing procedure and, most interesting, the algorithm can be generalized to other channels.},

keywords = {approximate inference, Approximation algorithms, Approximation methods, BEC, binary codes, binary erasure channel, code graph, Complexity theory, equivalent complexity, Gaussian elimination method, Gaussian processes, generalized tree-structured expectation propagatio, graphical message-passing procedure, graphical models, LDPC codes, Maximum likelihood decoding, maximum likelihood solution, ML decoding, parity check codes, peeling decoder, tree expectation propagation, tree graph, Tree graphs, tree-structured expectation propagation, tree-structured expectation propagation decoder, trees (mathematics)},

pubstate = {published},

tppubtype = {article}

}

Bravo-Santos, Ángel M

Polar Codes for the Rayleigh Fading Channel Artículo de revista

En: IEEE Communications Letters, vol. PP, no 99, pp. 1–4, 2013, ISSN: 1089-7798.

Resumen | Enlaces | BibTeX | Etiquetas: fading channels, polar codes, Rayleigh channels

@article{Bravo-Santos2013a,

title = {Polar Codes for the Rayleigh Fading Channel},

author = {\'{A}ngel M Bravo-Santos},

url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6663750},

issn = {1089-7798},

year = {2013},

date = {2013-01-01},

journal = {IEEE Communications Letters},

volume = {PP},

number = {99},

pages = {1--4},

abstract = {The application of polar codes for the Rayleigh fading channel is considered. We construct polar codes for the block Rayleigh fading channel with known channel side information (CSI) and for the Rayleigh channel with known channel distribution information (CDI). The construction of polar codes for the Rayleigh fading with known CSI allows them to work with any signal noise ratio (SNR). The rate of the codeword is adapted correspondingly. Polar codes for Rayleigh fading with known CDI suffer a penalty for not having complete information about the channel. The penalty, however, is small, about 1.3 dB. We perform simulations and compare the obtained results with the theoretical limits. We show that they are close to the theoretical limit. We compare polar codes with other good codes and the results show that long polar codes are closer to the limit.},

keywords = {fading channels, polar codes, Rayleigh channels},

pubstate = {published},

tppubtype = {article}

}

Leiva-Murillo, Jose M; López-Castromán, Jorge; Baca-García, Enrique

Characterization of Suicidal Behaviour with Self-Organizing Maps Artículo de revista

En: Computational and mathematical methods in medicine, vol. 2013, pp. 136743, 2013, ISSN: 1748-6718.

Resumen | Enlaces | BibTeX | Etiquetas:

@article{Leiva-Murillo2013,

title = {Characterization of Suicidal Behaviour with Self-Organizing Maps},

author = {Jose M Leiva-Murillo and Jorge L\'{o}pez-Castrom\'{a}n and Enrique Baca-Garc\'{i}a},

url = {http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=3705862\&tool=pmcentrez\&rendertype=abstract},

issn = {1748-6718},

year = {2013},

date = {2013-01-01},

journal = {Computational and mathematical methods in medicine},

volume = {2013},

pages = {136743},

abstract = {The study of the variables involved in suicidal behavior is important from a social, medical, and economical point of view. Given the high number of potential variables of interest, a large population of subjects must be analysed in order to get conclusive results. In this paper, we describe a method based on self-organizing maps (SOMs) for finding the most relevant variables even when their relation to suicidal behavior is strongly nonlinear. We have applied the method to a cohort with more than 8,000 subjects and 600 variables and discovered four groups of variables involved in suicidal behavior. According to the results, there are four main groups of risk factors that characterize the population of suicide attempters: mental disorders, alcoholism, impulsivity, and childhood abuse. The identification of specific subpopulations of suicide attempters is consistent with current medical knowledge and may provide a new avenue of research to improve the management of suicidal cases.},

keywords = {},

pubstate = {published},

tppubtype = {article}

}

Martino, Luca; Read, Jesse

On the Flexibility of the Design of Multiple Try Metropolis Schemes Artículo de revista

En: Computational Statistics, vol. 28, no 6, pp. 2797–2823, 2013, ISSN: 0943-4062.

Resumen | Enlaces | BibTeX | Etiquetas:

@article{Martino2013,

title = {On the Flexibility of the Design of Multiple Try Metropolis Schemes},

author = {Luca Martino and Jesse Read},

url = {http://link.springer.com/10.1007/s00180-013-0429-2},

issn = {0943-4062},

year = {2013},

date = {2013-01-01},

journal = {Computational Statistics},

volume = {28},

number = {6},

pages = {2797--2823},

abstract = {The multiple try Metropolis (MTM) method is a generalization of the classical Metropolis\textendashHastings algorithm in which the next state of the chain is chosen among a set of samples, according to normalized weights. In the literature, several extensions have been proposed. In this work, we show and remark upon the flexibility of the design of MTM-type methods, fulfilling the detailed balance condition. We discuss several possibilities, show different numerical simulations and discuss the implications of the results},

keywords = {},

pubstate = {published},

tppubtype = {article}

}

Manzano, Mario; Espinosa, Felipe; Bravo-Santos, Ángel M; Santiso, Enrique; Bravo, Ignacio; García, David

Dynamic Cognitive Self-Organized TDMA for Medium Access Control in Real-Time Vehicle to Vehicle Communications Artículo de revista

En: Mathematical Problems in Engineering, vol. 2013, 2013.

Resumen | Enlaces | BibTeX | Etiquetas:

@article{Manzano2013,

title = {Dynamic Cognitive Self-Organized TDMA for Medium Access Control in Real-Time Vehicle to Vehicle Communications},

author = {Mario Manzano and Felipe Espinosa and \'{A}ngel M Bravo-Santos and Enrique Santiso and Ignacio Bravo and David Garc\'{i}a},

url = {http://www.hindawi.com/journals/mpe/2013/574528/abs/},

year = {2013},

date = {2013-01-01},

journal = {Mathematical Problems in Engineering},

volume = {2013},

abstract = {The emergence of intelligent transport systems has brought out a new set of requirements on wireless communication. To cope with these requirements, several proposals are currently under discussion. In this highly mobile environment, the design of a prompt, efficient, flexible, and reliable medium access control, able to cover the specific constraints of the named real-time communications applications, is still unsolved. This paper presents the original proposal integrating Non-Cooperative Cognitive Time Division Multiple Access (NCC-TDMA) based on Cognitive Radio (CR) techniques to obtain a mechanism which complies with the requirements of real-time communications. Though the proposed MAC uses a slotted channel, it can be adapted to operate on the physical layer of different standards. The authors’ analysis considers the IEEE WAVE and 802.11p as the standards of reference. The mechanism also offers other advantages, such as avoiding signalling and the adaptation capacity to channel conditions and interferences. The solution is applied to the problem of units merging a convoy. Comparison results between NCC-TDMA and Slotted-Aloha are included.},

keywords = {},

pubstate = {published},

tppubtype = {article}

}

Koch, Tobias; Kramer, Gerhard

On Noncoherent Fading Relay Channels at High Signal-to-Noise Ratio Artículo de revista

En: IEEE Transactions on Information Theory, vol. 59, no 4, pp. 2221–2241, 2013, ISSN: 0018-9448.

Resumen | Enlaces | BibTeX | Etiquetas: channel capacity, Channel models, Fading, fading channels, MIMO communication, multiple-input single-output fading channel statis, noncoherent, noncoherent fading relay channel capacity, radio receiver, radio receivers, radio transmitter, radio transmitters, Receivers, relay channels, relay networks (telecommunication), Relays, Signal to noise ratio, signal-to-noise ratio, SNR, statistics, time selective, Transmitters, Upper bound

@article{Koch2013a,

title = {On Noncoherent Fading Relay Channels at High Signal-to-Noise Ratio},

author = {Tobias Koch and Gerhard Kramer},

url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6378474},

issn = {0018-9448},

year = {2013},

date = {2013-01-01},

journal = {IEEE Transactions on Information Theory},

volume = {59},

number = {4},

pages = {2221--2241},

abstract = {The capacity of noncoherent regular-fading relay channels is studied where all terminals are aware of the fading statistics but not of their realizations. It is shown that if the fading coefficient of the channel between the transmitter and the receiver can be predicted more accurately from its infinite past than the fading coefficient of the channel between the relay and the receiver, then at high signal-to-noise ratio (SNR), the relay does not increase capacity. It is further shown that if the fading coefficient of the channel between the transmitter and the relay can be predicted more accurately from its infinite past than the fading coefficient of the channel between the relay and the receiver, then at high SNR, one can achieve communication rates that are within one bit of the capacity of the multiple-input single-output fading channel that results when the transmitter and the relay can cooperate.},

keywords = {channel capacity, Channel models, Fading, fading channels, MIMO communication, multiple-input single-output fading channel statis, noncoherent, noncoherent fading relay channel capacity, radio receiver, radio receivers, radio transmitter, radio transmitters, Receivers, relay channels, relay networks (telecommunication), Relays, Signal to noise ratio, signal-to-noise ratio, SNR, statistics, time selective, Transmitters, Upper bound},

pubstate = {published},

tppubtype = {article}

}

Leiva-Murillo, Jose M; Gomez-Chova, Luis; Camps-Valls, Gustavo

Multitask Remote Sensing Data Classification Artículo de revista

En: IEEE Transactions on Geoscience and Remote Sensing, vol. 51, no 1, pp. 151–161, 2013, ISSN: 0196-2892.

Enlaces | BibTeX | Etiquetas: Aggregates, angular image features, Cloud screening, covariate shift, covariate shift (CS), cross information, data processing problems, data set bias, domain adaptation, geophysical image processing, Hilbert space pairwise predictor Euclidean distanc, image classification, image feature nonstationary behavior, Kernel, land mine detection, land-mine detection, learning (artificial intelligence), Machine learning, matrix decomposition, matrix regularization, MTL, multisource image classification, multispectral images, multitask learning, multitask learning (MTL), multitask remote sensing data classification, multitemporal classification, multitemporal image classification, radar data, regularization schemes, relational operators, Remote sensing, small sample set problem, spatial image features, Standards, support vector machine, support vector machine (SVM), Support vector machines, SVM, temporal image features, Training, urban monitoring

@article{Leiva-Murillo2013a,

title = {Multitask Remote Sensing Data Classification},

author = {Jose M Leiva-Murillo and Luis Gomez-Chova and Gustavo Camps-Valls},

url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=6214595},

issn = {0196-2892},

year = {2013},

date = {2013-01-01},

journal = {IEEE Transactions on Geoscience and Remote Sensing},

volume = {51},

number = {1},

pages = {151--161},

publisher = {IEEE},

keywords = {Aggregates, angular image features, Cloud screening, covariate shift, covariate shift (CS), cross information, data processing problems, data set bias, domain adaptation, geophysical image processing, Hilbert space pairwise predictor Euclidean distanc, image classification, image feature nonstationary behavior, Kernel, land mine detection, land-mine detection, learning (artificial intelligence), Machine learning, matrix decomposition, matrix regularization, MTL, multisource image classification, multispectral images, multitask learning, multitask learning (MTL), multitask remote sensing data classification, multitemporal classification, multitemporal image classification, radar data, regularization schemes, relational operators, Remote sensing, small sample set problem, spatial image features, Standards, support vector machine, support vector machine (SVM), Support vector machines, SVM, temporal image features, Training, urban monitoring},

pubstate = {published},

tppubtype = {article}

}

Read, Jesse; Bielza, Concha; Larranaga, Pedro

Multi-Dimensional Classification with Super-Classes Artículo de revista

En: IEEE Transactions on Knowledge and Data Engineering, vol. PP, no 99, pp. 1–1, 2013, ISSN: 1041-4347.

Resumen | Enlaces | BibTeX | Etiquetas: COMPREHENSION

@article{Read2013b,

title = {Multi-Dimensional Classification with Super-Classes},

author = {Jesse Read and Concha Bielza and Pedro Larranaga},

url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6648319},

issn = {1041-4347},

year = {2013},

date = {2013-01-01},

journal = {IEEE Transactions on Knowledge and Data Engineering},

volume = {PP},

number = {99},

pages = {1--1},

abstract = {The multi-dimensional classification problem is a generalisation of the recently-popularised task of multi-label classification, where each data instance is associated with multiple class variables. There has been relatively little research carried out specific to multi-dimensional classification and, although one of the core goals is similar (modelling dependencies among classes), there are important differences; namely a higher number of possible classifications. In this paper we present method for multi-dimensional classification, drawing from the most relevant multi-label research, and combining it with important novel developments. Using a fast method to model the conditional dependence between class variables, we form super-class partitions and use them to build multi-dimensional learners, learning each super-class as an ordinary class, and thus explicitly modelling class dependencies. Additionally, we present a mechanism to deal with the many class values inherent to super-classes, and thus make learning efficient. To investigate the effectiveness of this approach we carry out an empirical evaluation on a range of multi-dimensional datasets, under different evaluation metrics, and in comparison with high-performing existing multi-dimensional approaches from the literature. Analysis of results shows that our approach offers important performance gains over competing methods, while also exhibiting tractable running time.},

keywords = {COMPREHENSION},

pubstate = {published},

tppubtype = {article}

}

Asheghan, Mohammad Mostafa; Delshad, Saleh S; Hamidi-Beheshti, Mohammad Taghi; Tavazoei, Mohammad Saleh

Non-Fragile Control and Synchronization of a New Fractional Order Chaotic System Artículo de revista

En: Applied Mathematics and Computation, vol. 222, pp. 712–721, 2013, ISSN: 00963003.

Resumen | Enlaces | BibTeX | Etiquetas:

@article{Asheghan2013a,

title = {Non-Fragile Control and Synchronization of a New Fractional Order Chaotic System},

author = {Mohammad Mostafa Asheghan and Saleh S Delshad and Mohammad Taghi Hamidi-Beheshti and Mohammad Saleh Tavazoei},

url = {http://www.sciencedirect.com/science/article/pii/S0096300313007959},

issn = {00963003},

year = {2013},

date = {2013-01-01},

journal = {Applied Mathematics and Computation},

volume = {222},

pages = {712--721},

abstract = {In this paper, we address global non-fragile control and synchronization of a new fractional order chaotic system. First we inspect the chaotic behavior of the fractional order system under study and also find the lowest order (2.49) for the introduced dynamics to remain chaotic. Then, a necessary and sufficient condition which can be easily extended to other fractional-order systems is proposed in terms of Linear Matrix Inequality (LMI) to check whether the candidate state feedback controller with parameter uncertainty can guarantee zero convergence of error or not. In addition, the proposed method provides a global zero attraction of error that guarantees stability around all existing equilibrium points. Finally, numerical simulation are employed to verify the validity of the proposed algorithm.},

keywords = {},

pubstate = {published},

tppubtype = {article}

}

Alvarez, Mauricio; Luengo, David; Lawrence, Neil D

Linear Latent Force Models Using Gaussian Processes Artículo de revista

En: IEEE Trans. Pattern Anal. Mach. Intell., vol. 35, no 11, pp. 2693–2705, 2013.

Resumen | Enlaces | BibTeX | Etiquetas: Analytical models, Computational modeling, Data models, Differential equations, Force, Gaussian processes, Mathematical mode

@article{Alvarez2013,

title = {Linear Latent Force Models Using Gaussian Processes},

author = {Mauricio Alvarez and David Luengo and Neil D Lawrence},

url = {http://dblp.uni-trier.de/db/journals/pami/pami35.html#AlvarezLL13 http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6514873},

year = {2013},

date = {2013-01-01},

journal = {IEEE Trans. Pattern Anal. Mach. Intell.},

volume = {35},

number = {11},

pages = {2693--2705},

abstract = {Purely data-driven approaches for machine learning present difficulties when data are scarce relative to the complexity of the model or when the model is forced to extrapolate. On the other hand, purely mechanistic approaches need to identify and specify all the interactions in the problem at hand (which may not be feasible) and still leave the issue of how to parameterize the system. In this paper, we present a hybrid approach using Gaussian processes and differential equations to combine data-driven modeling with a physical model of the system. We show how different, physically inspired, kernel functions can be developed through sensible, simple, mechanistic assumptions about the underlying system. The versatility of our approach is illustrated with three case studies from motion capture, computational biology, and geostatistics.},

keywords = {Analytical models, Computational modeling, Data models, Differential equations, Force, Gaussian processes, Mathematical mode},

pubstate = {published},

tppubtype = {article}

}

Serrano-Drozdowskyj, E; López-Castromán, Jorge; Leiva-Murillo, Jose M; Blasco-Fontecilla, Hilario; Garcia-Nieto, R; Artés-Rodríguez, Antonio; Morant-Ginestar, C; Blanco, Carlos; Courtet, Philippe; Baca-García, Enrique

1533 – A Naturalistic Study of the Diagnostic Evolution of Schizophrenia Artículo de revista

En: European Psychiatry, vol. 28, 2013.

Resumen | Enlaces | BibTeX | Etiquetas:

@article{Serrano-Drozdowskyj2013,

title = {1533 \textendash A Naturalistic Study of the Diagnostic Evolution of Schizophrenia},

author = {E Serrano-Drozdowskyj and Jorge L\'{o}pez-Castrom\'{a}n and Jose M Leiva-Murillo and Hilario Blasco-Fontecilla and R Garcia-Nieto and Antonio Art\'{e}s-Rodr\'{i}guez and C Morant-Ginestar and Carlos Blanco and Philippe Courtet and Enrique Baca-Garc\'{i}a},

url = {http://www.sciencedirect.com/science/article/pii/S0924933813765465},

year = {2013},

date = {2013-01-01},

journal = {European Psychiatry},

volume = {28},

abstract = {INTRODUCTION In the absence of biological measures, diagnostic long-term stability provides the best evidence of diagnostic validity.Therefore,the study of diagnostic stability in naturalistic conditions may reflect clinical validity and utility of current schizophrenia diagnostic criteria. OBJECTIVES Describe the diagnostic evolution of schizophrenia in clinical settings. METHODS We examined the stability of schizophrenia first diagnoses (n=26,163) in public mental health centers of Madrid (Spain).Probability of maintaining the diagnosis of schizophrenia was calculated considering the cumulative percentage of each diagnosis per month during 48 months after the initial diagnosis of schizophrenia. RESULTS 65% of the subjects kept the diagnosis of schizophrenia in subsequent assessments (Figure 1). Patients who changed (35%) did so in the first 4-8 months. After that time gap the rates of each diagnostic category remained stable. Diagnostic shift from schizophrenia was more commonly toward the following diagnoses: personality disorders (F60), delusional disorders (F22), bipolar disorder (F31), persistent mood disorders (F34), acute and transient psychotic disorders (F23) or schizoaffective disorder (F25). CONCLUSIONS Once it is confirmed, clinical assessment repeatedly maintains the diagnosis of schizophrenia.The time lapse for its confirmation agrees with the current diagnostic criteria in DSM-IV. We will discuss the implications of these findings for the categorical versus dimensional debate in the diagnosis of schizophrenia.},

keywords = {},

pubstate = {published},

tppubtype = {article}

}

Koch, Tobias; Lapidoth, Amos

At Low SNR, Asymmetric Quantizers are Better Artículo de revista

En: IEEE Transactions on Information Theory, vol. 59, no 9, pp. 5421-5445, 2013.

@article{6545291,

title = {At Low SNR, Asymmetric Quantizers are Better},

author = {Tobias Koch and Amos Lapidoth},

doi = {10.1109/TIT.2013.2262919},

year = {2013},

date = {2013-01-01},

urldate = {2013-01-01},

journal = {IEEE Transactions on Information Theory},

volume = {59},

number = {9},

pages = {5421-5445},

keywords = {},

pubstate = {published},

tppubtype = {article}

}

### 2012

Sala, Josep; Vazquez-Vilar, Gonzalo; López-Valcarce, Roberto

Multiantenna GLR detection of rank-one signals with known power spectrum in white noise with unknown spatial correlation Artículo de revista

En: IEEE Transactions on Signal Processing, vol. 60, no 6, pp. 3065-3078, 2012, ISSN: 1053-587X.

@article{gvazquez-TSP12,

title = {Multiantenna GLR detection of rank-one signals with known power spectrum in white noise with unknown spatial correlation},

author = {Josep Sala and Gonzalo Vazquez-Vilar and Roberto L\'{o}pez-Valcarce},

doi = {10.1109/TSP.2012.2189767},

issn = {1053-587X},

year = {2012},

date = {2012-06-01},

journal = {IEEE Transactions on Signal Processing},

volume = {60},

number = {6},

pages = {3065-3078},

keywords = {},

pubstate = {published},

tppubtype = {article}

}

Achutegui, Katrin; Miguez, Joaquin; Rodas, Javier; Escudero, Carlos J

A Multi-Model Sequential Monte Carlo Methodology for Indoor Tracking: Algorithms and Experimental Results Artículo de revista

En: Signal Processing, vol. 92, no 11, pp. 2594–2613, 2012.

Resumen | Enlaces | BibTeX | Etiquetas: Data fusion, Indoor positioning, Indoor tracking, Interacting multiple models, Sequential Monte Carlo, Switching observation models

@article{Achutegui2012,

title = {A Multi-Model Sequential Monte Carlo Methodology for Indoor Tracking: Algorithms and Experimental Results},

author = {Katrin Achutegui and Joaquin Miguez and Javier Rodas and Carlos J Escudero},

url = {http://www.tsc.uc3m.es/~jmiguez/papers/P32_2012_ Multi-Model Sequential Monte Carlo Methodology for Indoor Tracking- Algorithms and Experimental Results.pdf

http://www.sciencedirect.com/science/article/pii/S0165168412001077},

year = {2012},

date = {2012-01-01},

journal = {Signal Processing},

volume = {92},

number = {11},

pages = {2594--2613},

abstract = {In this paper we address the problem of indoor tracking using received signal strength (RSS) as a position-dependent data measurement. Since RSS is highly influenced by multipath propagation, it turns out very hard to adequately model the correspondence between the received power and the transmitter-to-receiver distance. Although various models have been proposed in the literature, they often require the use of very large collections of data in order to fit them and display great sensitivity to changes in the radio propagation environment. In this work we advocate the use of switching multiple models that account for different classes of target dynamics and propagation environments and propose a flexible probabilistic switching scheme. The resulting state-space structure is termed a generalized switching multiple model (GSMM) system. Within this framework, we investigate two types of models for the RSS data: polynomial models and classical logarithmic path-loss representation. The first model is more accurate however it demands an offline model fitting step. The second one is less precise but it can be fitted in an online procedure. We have designed two tracking algorithms built around a Rao-Blackwellized particle filter, tailored to the GSMM structure and assessed its performances both with synthetic and experimental measurements.},

keywords = {Data fusion, Indoor positioning, Indoor tracking, Interacting multiple models, Sequential Monte Carlo, Switching observation models},

pubstate = {published},

tppubtype = {article}

}

Martino, Luca; Olmo, Victor Pascual Del; Read, Jesse

A Multi-Point Metropolis Scheme with Generic Weight Functions Artículo de revista

En: Statistics &amp; Probability Letters, vol. 82, no 7, pp. 1445–1453, 2012.

Resumen | Enlaces | BibTeX | Etiquetas: MCMC methods, Multi-point Metropolis algorithm, Multiple Try Metropolis algorithm

@article{Martino2012,

title = {A Multi-Point Metropolis Scheme with Generic Weight Functions},

author = {Luca Martino and Victor Pascual Del Olmo and Jesse Read},

url = {http://www.sciencedirect.com/science/article/pii/S0167715212001514},

year = {2012},

date = {2012-01-01},

journal = {Statistics \& Probability Letters},

volume = {82},

number = {7},

pages = {1445--1453},

abstract = {The multi-point Metropolis algorithm is an advanced MCMC technique based on drawing several correlated samples at each step and choosing one of them according to some normalized weights. We propose a variation of this technique where the weight functions are not specified, i.e., the analytic form can be chosen arbitrarily. This has the advantage of greater flexibility in the design of high-performance MCMC samplers. We prove that our method fulfills the balance condition, and provide a numerical simulation. We also give new insight into the functionality of different MCMC algorithms, and the connections between them.},

keywords = {MCMC methods, Multi-point Metropolis algorithm, Multiple Try Metropolis algorithm},

pubstate = {published},

tppubtype = {article}

}

Salamanca, Luis; Murillo-Fuentes, Juan Jose; Perez-Cruz, Fernando

Bayesian Equalization for LDPC Channel Decoding Artículo de revista

En: IEEE Transactions on Signal Processing, vol. 60, no 5, pp. 2672–2676, 2012, ISSN: 1053-587X.

Resumen | Enlaces | BibTeX | Etiquetas: Approximation methods, Bayes methods, Bayesian equalization, Bayesian estimation problem, Bayesian inference, Bayesian methods, BCJR (Bahl–Cocke–Jelinek–Raviv) algorithm, BCJR algorithm, Channel Coding, channel decoding, channel equalization, channel equalization problem, Channel estimation, channel state information, CSI, Decoding, equalisers, Equalizers, expectation propagation, expectation propagation algorithm, fading channels, graphical model representation, intersymbol interference, Kullback-Leibler divergence, LDPC, LDPC coding, low-density parity-check decoder, Modulation, parity check codes, symbol posterior estimates, Training

@article{Salamanca2012b,

title = {Bayesian Equalization for LDPC Channel Decoding},

author = {Luis Salamanca and Juan Jose Murillo-Fuentes and Fernando Perez-Cruz},

url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6129544},

issn = {1053-587X},

year = {2012},

date = {2012-01-01},

journal = {IEEE Transactions on Signal Processing},

volume = {60},

number = {5},

pages = {2672--2676},

abstract = {We describe the channel equalization problem, and its prior estimate of the channel state information (CSI), as a joint Bayesian estimation problem to improve each symbol posterior estimates at the input of the channel decoder. Our approach takes into consideration not only the uncertainty due to the noise in the channel, but also the uncertainty in the CSI estimate. However, this solution cannot be computed in linear time, because it depends on all the transmitted symbols. Hence, we also put forward an approximation for each symbol's posterior, using the expectation propagation algorithm, which is optimal from the Kullback-Leibler divergence viewpoint and yields an equalization with a complexity identical to the BCJR algorithm. We also use a graphical model representation of the full posterior, in which the proposed approximation can be readily understood. The proposed posterior estimates are more accurate than those computed using the ML estimate for the CSI. In order to illustrate this point, we measure the error rate at the output of a low-density parity-check decoder, which needs the exact posterior for each symbol to detect the incoming word and it is sensitive to a mismatch in those posterior estimates. For example, for QPSK modulation and a channel with three taps, we can expect gains over 0.5 dB with same computational complexity as the ML receiver.},

keywords = {Approximation methods, Bayes methods, Bayesian equalization, Bayesian estimation problem, Bayesian inference, Bayesian methods, BCJR (Bahl\textendashCocke\textendashJelinek\textendashRaviv) algorithm, BCJR algorithm, Channel Coding, channel decoding, channel equalization, channel equalization problem, Channel estimation, channel state information, CSI, Decoding, equalisers, Equalizers, expectation propagation, expectation propagation algorithm, fading channels, graphical model representation, intersymbol interference, Kullback-Leibler divergence, LDPC, LDPC coding, low-density parity-check decoder, Modulation, parity check codes, symbol posterior estimates, Training},

pubstate = {published},

tppubtype = {article}

}

Landa-Torres, Itziar; Ortiz-Garcia, Emilio G; Salcedo-Sanz, Sancho; Segovia-Vargas, María J; Gil-Lopez, Sergio; Miranda, Marta; Leiva-Murillo, Jose M; Ser, Javier Del

Evaluating the Internationalization Success of Companies Through a Hybrid Grouping Harmony Search—Extreme Learning Machine Approach Artículo de revista

En: IEEE Journal of Selected Topics in Signal Processing, vol. 6, no 4, pp. 388–398, 2012, ISSN: 1932-4553.

Resumen | Enlaces | BibTeX | Etiquetas: Companies, Company internationalization, corporative strategy, diverse activity, Economics, Electronic mail, ensembles, exporting, exporting performance, external markets, extreme learning machine ensemble, extreme learning machines, feature selection method, grouping-based harmony search, hard process, harmony search (HS), hybrid algorithm, hybrid algorithms, hybrid grouping harmony search-extreme learning ma, hybrid soft computing, international company, international trade, internationalization procedure, internationalization success, learning (artificial intelligence), Machine learning, organizational structure, Signal processing algorithms, Spanish manufacturing company, Training, value chain

@article{Landa-Torres2012,

title = {Evaluating the Internationalization Success of Companies Through a Hybrid Grouping Harmony Search\textemdashExtreme Learning Machine Approach},

author = {Itziar Landa-Torres and Emilio G Ortiz-Garcia and Sancho Salcedo-Sanz and Mar\'{i}a J Segovia-Vargas and Sergio Gil-Lopez and Marta Miranda and Jose M Leiva-Murillo and Javier Del Ser},

url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6200298},

issn = {1932-4553},

year = {2012},

date = {2012-01-01},

journal = {IEEE Journal of Selected Topics in Signal Processing},

volume = {6},

number = {4},

pages = {388--398},

abstract = {The internationalization of a company is widely understood as the corporative strategy for growing through external markets. It usually embodies a hard process, which affects diverse activities of the value chain and impacts on the organizational structure of the company. There is not a general model for a successful international company, so the success of an internationalization procedure must be estimated based on different variables addressing the status, strategy and market characteristics of the company at hand. This paper presents a novel hybrid soft-computing approach for evaluating the internationalization success of a company based on existing past data. Specifically, we propose a hybrid algorithm composed by a grouping-based harmony search (HS) approach and an extreme learning machine (ELM) ensemble. The proposed hybrid scheme further incorporates a feature selection method, which is obtained by means of a given group in the HS encoding format, whereas the ELM ensemble renders the final accuracy metric of the model. Practical results for the proposed hybrid technique are obtained in a real application based on the exporting success of Spanish manufacturing companies, which are shown to be satisfactory in comparison with alternative state-of-the-art techniques.},

keywords = {Companies, Company internationalization, corporative strategy, diverse activity, Economics, Electronic mail, ensembles, exporting, exporting performance, external markets, extreme learning machine ensemble, extreme learning machines, feature selection method, grouping-based harmony search, hard process, harmony search (HS), hybrid algorithm, hybrid algorithms, hybrid grouping harmony search-extreme learning ma, hybrid soft computing, international company, international trade, internationalization procedure, internationalization success, learning (artificial intelligence), Machine learning, organizational structure, Signal processing algorithms, Spanish manufacturing company, Training, value chain},

pubstate = {published},

tppubtype = {article}

}

Luengo, David; Miguez, Joaquin; Martino, Luca

Efficient Sampling from Truncated Bivariate Gaussians via Box-Muller Transformation Artículo de revista

En: Electronics Letters, vol. 48, no 24, pp. 1533–1534, 2012, ISSN: 0013-5194.

Resumen | Enlaces | BibTeX | Etiquetas:

@article{Luengo2012a,

title = {Efficient Sampling from Truncated Bivariate Gaussians via Box-Muller Transformation},

author = {David Luengo and Joaquin Miguez and Luca Martino},

url = {http://www.tsc.uc3m.es/~jmiguez/papers/P35_2012_Efficient Sampling from Truncated Bivariate Gaussians via Box-Muller Transformation.pdf

http://www.researchgate.net/publication/235004345_Efficient_Sampling_from_Truncated_Bivariate_Gaussians_via_the_Box-Muller_Transformation},

issn = {0013-5194},

year = {2012},

date = {2012-01-01},

journal = {Electronics Letters},

volume = {48},

number = {24},

pages = {1533--1534},

abstract = {Many practical simulation tasks demand procedures to draw samples efficiently from multivariate truncated Gaussian distributions. In this work, we introduce a novel rejection approach, based on the Box-Muller transformation, to generate samples from a truncated bivariate Gaussian density with an arbitrary support. Furthermore, for an important class of support regions the new method allows us to achieve exact sampling, thus becoming the most efficient approach possible. Introduction: The numerical simulation of many systems of practical interest demands the ability to produce Monte Carlo samples from truncated Gaussian distributions [5, 3, 7]. The simplest way to address this problem is to perform rejection sampling using the corresponding (non-truncated) Gaussian distribution as a proposal. This trivial method produces independent and identically distributed (i.i.d.) samples, but it is time consuming and computationally inefficient. For these two reasons, different methods have been introduced in the literature, e.g., using MCMC techniques [5, 7] or rejection sampling [1]. Unfortunately, MCMC schemes produce correlated samples, which can lead to a very slow convergence of the chain, whereas rejection methods can be computationally inefficient. In this paper, we introduce a novel approach, based on the Box-Muller transformation (BMT) [2], to generate i.i.d. samples from truncated bivariate Gaussian distributions. The main advantages of the proposed approach are the following: (1) it allows sampling within a generic domain D ⊆ R 2 without any restriction and (2) the inverse transformation of the BMT maps any region D ⊆ R 2 (either bounded or unbounded) into a bounded region, A ⊆ R = [0, 1] × [0, 1]. Hence, all the procedures developed for drawing efficiently uniform random variables within bounded regions, e.g., adaptive rejection sampling or strip methods [2, 4], can always be used. Furthermore, for an important class of support regions the BMT allows us to perform exact sampling (i.e., draw i.i.d. samples from the target distribution without any rejection), which is the most efficient situation possible. Problem Formulation: The problem considered here is related to drawing samples from a truncated multivariate Gaussian distribution. In particular, in this letter we focus on drawing samples from a bivariate truncated standard Gaussian PDF, denoted as Z ∼ T N (0, I, D), where the support domain D ⊆ R 2 is a non-null Borel set. Note that drawing samples from a non-truncated standard Gaussian distribution, Z ∼ N (0, I), enables us to draw samples from an arbitrary Gaussian distribution, X ∼ N (µ, $Sigma$), whenever $Sigma$ is positive definite. More precisely, since $Sigma$ is positive definite, it can be expressed as $Sigma$ = SS , using for instance the Cholesky decomposition, and the random vector X = SZ + µ has the desired distribution, X ∼ N (µ, $Sigma$). Similarly, sampling from a truncated bivariate standard Gaussian distribution allows us to generate samples from an arbitrary truncated bivariate Gaussian. In this case, if Z ∼ T N (0, I, D), then we can obtain X ∼ T N (µ, $Sigma$, D *) simply through the transformation X = SZ + µ, with $Sigma$ = SS and},

keywords = {},

pubstate = {published},

tppubtype = {article}

}

Leiva-Murillo, Jose M; Artés-Rodríguez, Antonio

Algorithms for Maximum-Likelihood Bandwidth Selection in Kernel Density Estimators Artículo de revista

En: Pattern Recognition Letters, vol. 33, no 13, pp. 1717–1724, 2012, ISSN: 01678655.

Resumen | Enlaces | BibTeX | Etiquetas: Kernel density estimation, Multivariate density modeling, Pattern recognition

@article{Leiva-Murillo2012,

title = {Algorithms for Maximum-Likelihood Bandwidth Selection in Kernel Density Estimators},

author = {Jose M Leiva-Murillo and Antonio Art\'{e}s-Rodr\'{i}guez},

url = {http://www.tsc.uc3m.es/~antonio/papers/P45_2012_Algorithms for Maximum Likelihood Bandwidth Selection in Kernel Density Estimators.pdf

http://www.sciencedirect.com/science/article/pii/S0167865512001948},

issn = {01678655},

year = {2012},

date = {2012-01-01},

journal = {Pattern Recognition Letters},

volume = {33},

number = {13},

pages = {1717--1724},

publisher = {Elsevier Science Inc.},

abstract = {In machine learning and statistics, kernel density estimators are rarely used on multivariate data due to the difficulty of finding an appropriate kernel bandwidth to overcome overfitting. However, the recent advances on information-theoretic learning have revived the interest on these models. With this motivation, in this paper we revisit the classical statistical problem of data-driven bandwidth selection by cross-validation maximum likelihood for Gaussian kernels. We find a solution to the optimization problem under both the spherical and the general case where a full covariance matrix is considered for the kernel. The fixed-point algorithms proposed in this paper obtain the maximum likelihood bandwidth in few iterations, without performing an exhaustive bandwidth search, which is unfeasible in the multivariate case. The convergence of the methods proposed is proved. A set of classification experiments are performed to prove the usefulness of the obtained models in pattern recognition.},

keywords = {Kernel density estimation, Multivariate density modeling, Pattern recognition},

pubstate = {published},

tppubtype = {article}

}

Maiz, Cristina S; Molanes-Lopez, Elisa M; Miguez, Joaquin; Djuric, Petar M

A Particle Filtering Scheme for Processing Time Series Corrupted by Outliers Artículo de revista

En: IEEE Transactions on Signal Processing, vol. 60, no 9, pp. 4611–4627, 2012, ISSN: 1053-587X.

Resumen | Enlaces | BibTeX | Etiquetas: Kalman filters, Mathematical model, nonlinear state space model, Outlier detection, prediction theory, predictive distribution, Probability density function, State-space methods, state-space models, statistical distributions, Target tracking, time serie processing, Vectors, Yttrium

@article{Maiz2012,

title = {A Particle Filtering Scheme for Processing Time Series Corrupted by Outliers},

author = {Cristina S Maiz and Elisa M Molanes-Lopez and Joaquin Miguez and Petar M Djuric},

url = {http://www.tsc.uc3m.es/~jmiguez/papers/P34_2012_A Particle Filtering Scheme for Processing Time Series Corrupted by Outliers.pdf http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6203606},

issn = {1053-587X},

year = {2012},

date = {2012-01-01},

journal = {IEEE Transactions on Signal Processing},

volume = {60},

number = {9},

pages = {4611--4627},

abstract = {The literature in engineering and statistics is abounding in techniques for detecting and properly processing anomalous observations in the data. Most of these techniques have been developed in the framework of static models and it is only in recent years that we have seen attempts that address the presence of outliers in nonlinear time series. For a target tracking problem described by a nonlinear state-space model, we propose the online detection of outliers by including an outlier detection step within the standard particle filtering algorithm. The outlier detection step is implemented by a test involving a statistic of the predictive distribution of the observations, such as a concentration measure or an extreme upper quantile. We also provide asymptotic results about the convergence of the particle approximations of the predictive distribution (and its statistics) and assess the performance of the resulting algorithms by computer simulations of target tracking problems with signal power observations.},

keywords = {Kalman filters, Mathematical model, nonlinear state space model, Outlier detection, prediction theory, predictive distribution, Probability density function, State-space methods, state-space models, statistical distributions, Target tracking, time serie processing, Vectors, Yttrium},

pubstate = {published},

tppubtype = {article}

}

Cruz-Roldan, Fernando; Dominguez-Jimenez, María Elena; Vidal, Gabriela Sansigre; Amo-Lopez, Pedro; Blanco-Velasco, Manuel; Bravo-Santos, Ángel M

On the Use of Discrete Cosine Transforms for Multicarrier Communications Artículo de revista

En: IEEE Transactions on Signal Processing, vol. 60, no 11, pp. 6085–6090, 2012, ISSN: 1053-587X.

Resumen | Enlaces | BibTeX | Etiquetas: broadband networks, carrier frequency offset, Carrier-frequency offset (CFO), CFO, channel equalization, computer simulations, Convolution, Data communication, data symbol, DCT, DFT, discrete cosine transform (DCT), discrete cosine transform domain, Discrete cosine transforms, discrete Fourier transforms, discrete multitone modulation (DMT), discrete trigonometric domain, element-by-element multiplication, equalisers, equivalent channel impulse response, linear convolution, mobile broadband wireless communication, mobile radio, Modulation, multicarrier communications, multicarrier data transmission, multicarrier modulation (MCM), multicarrier transceiver, OFDM, orthogonal frequency-division multiplexing (OFDM), Receivers, Redundancy, subcarrier equalizers, symmetric convolution-multiplication property, symmetric redundancy, time-domain analysis, transient response, transmission channel

@article{Cruz-Roldan2012,

title = {On the Use of Discrete Cosine Transforms for Multicarrier Communications},

author = {Fernando Cruz-Roldan and Mar\'{i}a Elena Dominguez-Jimenez and Gabriela Sansigre Vidal and Pedro Amo-Lopez and Manuel Blanco-Velasco and \'{A}ngel M Bravo-Santos},

url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6252068},

issn = {1053-587X},

year = {2012},

date = {2012-01-01},

journal = {IEEE Transactions on Signal Processing},

volume = {60},

number = {11},

pages = {6085--6090},

abstract = {In this correspondence, the conditions to use any kind of discrete cosine transform (DCT) for multicarrier data transmission are derived. The symmetric convolution-multiplication property of each DCT implies that when symmetric convolution is performed in the time domain, an element-by-element multiplication is performed in the corresponding discrete trigonometric domain. Therefore, appending symmetric redundancy (as prefix and suffix) into each data symbol to be transmitted, and also enforcing symmetry for the equivalent channel impulse response, the linear convolution performed in the transmission channel becomes a symmetric convolution in those samples of interest. Furthermore, the channel equalization can be carried out by means of a bank of scalars in the corresponding discrete cosine transform domain. The expressions for obtaining the value of each scalar corresponding to these one-tap per subcarrier equalizers are presented. This study is completed with several computer simulations in mobile broadband wireless communication scenarios, considering the presence of carrier frequency offset (CFO). The obtained results indicate that the proposed systems outperform the standardized ones based on the DFT.},

keywords = {broadband networks, carrier frequency offset, Carrier-frequency offset (CFO), CFO, channel equalization, computer simulations, Convolution, Data communication, data symbol, DCT, DFT, discrete cosine transform (DCT), discrete cosine transform domain, Discrete cosine transforms, discrete Fourier transforms, discrete multitone modulation (DMT), discrete trigonometric domain, element-by-element multiplication, equalisers, equivalent channel impulse response, linear convolution, mobile broadband wireless communication, mobile radio, Modulation, multicarrier communications, multicarrier data transmission, multicarrier modulation (MCM), multicarrier transceiver, OFDM, orthogonal frequency-division multiplexing (OFDM), Receivers, Redundancy, subcarrier equalizers, symmetric convolution-multiplication property, symmetric redundancy, time-domain analysis, transient response, transmission channel},

pubstate = {published},

tppubtype = {article}

}

Leiva-Murillo, Jose M; Artés-Rodríguez, Antonio

Information-Theoretic Linear Feature Extraction Based on Kernel Density Estimators: A Review Artículo de revista

En: IEEE Transactions on Systems, Man, and Cybernetics, Part C (Applications and Reviews), vol. 42, no 6, pp. 1180–1189, 2012, ISSN: 1094-6977.

Resumen | Enlaces | BibTeX | Etiquetas: Bandwidth, Density, detection theory, Entropy, Estimation, Feature extraction, Feature extraction (FE), information theoretic linear feature extraction, information theory, information-theoretic learning (ITL), Kernel, Kernel density estimation, kernel density estimators, Machine learning

@article{Leiva-Murillo2012a,

title = {Information-Theoretic Linear Feature Extraction Based on Kernel Density Estimators: A Review},

author = {Jose M Leiva-Murillo and Antonio Art\'{e}s-Rodr\'{i}guez},

url = {http://www.tsc.uc3m.es/~antonio/papers/P44_2012_Information Theoretic Linear Feature Extraction Based on Kernel Density Estimators A Review.pdf http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6185689},

issn = {1094-6977},

year = {2012},

date = {2012-01-01},

journal = {IEEE Transactions on Systems, Man, and Cybernetics, Part C (Applications and Reviews)},

volume = {42},

number = {6},

pages = {1180--1189},

abstract = {In this paper, we provide a unified study of the application of kernel density estimators to supervised linear feature extraction by means of criteria inspired by information and detection theory. We enrich this study by the incorporation of two novel criteria to the study, i.e., the mutual information and the likelihood ratio test, and perform both a theoretical and an experimental comparison between the new methods and other ones previously described in the literature. The impact of the bandwidth selection of the density estimator in the classification performance is discussed. Some theoretical results that bound classification performance as a function or mutual information are also compiled. A set of experiments on different real-world datasets allows us to perform an empirical comparison of the methods, in terms of both accuracy and computational complexity. We show the suitability of these methods to determine the dimension of the subspace that contains the discriminative information.},

keywords = {Bandwidth, Density, detection theory, Entropy, Estimation, Feature extraction, Feature extraction (FE), information theoretic linear feature extraction, information theory, information-theoretic learning (ITL), Kernel, Kernel density estimation, kernel density estimators, Machine learning},

pubstate = {published},

tppubtype = {article}

}