2011
Plata-Chaves, Jorge; Lazaro, Marcelino; Artés-Rodríguez, Antonio
Optimal Neyman-Pearson Fusion in Two-Dimensional Densor Networks with Serial Architecture and Dependent Observations Proceedings Article
En: Information Fusion (FUSION), 2011 Proceedings of the 14th International Conference on, pp. 1–6, Chicago, 2011, ISBN: 978-1-4577-0267-9.
Resumen | Enlaces | BibTeX | Etiquetas: Bayesian methods, binary distributed detection problem, decision theory, dependent observations, Joints, local decision rule, Measurement uncertainty, Network topology, Neyman-Pearson criterion, optimal Neyman-Pearson fusion, optimum distributed detection, Parallel architectures, Performance evaluation, Probability density function, sensor dependent observations, sensor fusion, serial architecture, serial network topology, two-dimensional sensor networks, Wireless Sensor Networks
@inproceedings{Plata-Chaves2011bb,
title = {Optimal Neyman-Pearson Fusion in Two-Dimensional Densor Networks with Serial Architecture and Dependent Observations},
author = {Jorge Plata-Chaves and Marcelino Lazaro and Antonio Art\'{e}s-Rodr\'{i}guez},
url = {http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=5977545\&searchWithin%3Dartes+rodriguez%26sortType%3Dasc_p_Sequence%26filter%3DAND%28p_IS_Number%3A5977431%29},
isbn = {978-1-4577-0267-9},
year = {2011},
date = {2011-01-01},
booktitle = {Information Fusion (FUSION), 2011 Proceedings of the 14th International Conference on},
pages = {1--6},
address = {Chicago},
abstract = {In this correspondence, we consider a sensor network with serial architecture. When solving a binary distributed detection problem where the sensor observations are dependent under each one of the two possible hypothesis, each fusion stage of the network applies a local decision rule. We assume that, based on the information available at each fusion stage, the decision rules provide a binary message regarding the presence or absence of an event of interest. Under this scenario and under a Neyman-Pearson formulation, we derive the optimal decision rules associated with each fusion stage. As it happens when the sensor observations are independent, we are able to show that, under the Neyman-Pearson criterion, the optimal fusion rules of a serial configuration with dependent observations also match optimal Neyman-Pearson tests.},
keywords = {Bayesian methods, binary distributed detection problem, decision theory, dependent observations, Joints, local decision rule, Measurement uncertainty, Network topology, Neyman-Pearson criterion, optimal Neyman-Pearson fusion, optimum distributed detection, Parallel architectures, Performance evaluation, Probability density function, sensor dependent observations, sensor fusion, serial architecture, serial network topology, two-dimensional sensor networks, Wireless Sensor Networks},
pubstate = {published},
tppubtype = {inproceedings}
}
Parviainen, Jussi; Kirkko-Jaakkola, Martti; Davidson, Pavel; Vázquez, Manuel A; Collin, Jussi
Doppler radar and MEMS gyro augmented DGPS for large vehicle navigation Proceedings Article
En: 2011 International Conference on Localization and GNSS (ICL-GNSS), pp. 140-145, 2011.
@inproceedings{5955285,
title = {Doppler radar and MEMS gyro augmented DGPS for large vehicle navigation},
author = {Jussi Parviainen and Martti Kirkko-Jaakkola and Pavel Davidson and Manuel A V\'{a}zquez and Jussi Collin},
doi = {10.1109/ICL-GNSS.2011.5955285},
year = {2011},
date = {2011-01-01},
urldate = {2011-01-01},
booktitle = {2011 International Conference on Localization and GNSS (ICL-GNSS)},
pages = {140-145},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2010
López-Valcarce, Roberto; Vazquez-Vilar, Gonzalo; Sala, Josep
Multiantenna spectrum sensing for Cognitive Radio: overcoming noise uncertainty Proceedings Article
En: The 2nd International Workshop on Cognitive Information Processing (CIP 2010), Elba Island (Tuscany), Italy, 2010, (Invited).
BibTeX | Etiquetas:
@inproceedings{cip2010,
title = {Multiantenna spectrum sensing for Cognitive Radio: overcoming noise uncertainty},
author = {Roberto L\'{o}pez-Valcarce and Gonzalo Vazquez-Vilar and Josep Sala},
year = {2010},
date = {2010-06-01},
booktitle = {The 2nd International Workshop on Cognitive Information Processing (CIP 2010)},
address = {Elba Island (Tuscany), Italy},
note = {Invited},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
El-Howayek, Georges; Jayaweera, Sudharman K; Hakim, Kamrul; Vazquez-Vilar, Gonzalo; Mosquera, Carlos
Dynamic Spectrum Leasing (DSL) in Dynamic Channels Proceedings Article
En: ICC'10 Workshop on Cognitive Radio Interfaces and Signal Processing (ICC'10 Workshop CRISP), Cape Town, South Africa, 2010.
BibTeX | Etiquetas:
@inproceedings{crisp2010,
title = {Dynamic Spectrum Leasing (DSL) in Dynamic Channels},
author = {Georges El-Howayek and Sudharman K Jayaweera and Kamrul Hakim and Gonzalo Vazquez-Vilar and Carlos Mosquera},
year = {2010},
date = {2010-05-01},
booktitle = {ICC'10 Workshop on Cognitive Radio Interfaces and Signal Processing (ICC'10 Workshop CRISP)},
address = {Cape Town, South Africa},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Vazquez-Vilar, Gonzalo; López-Valcarce, Roberto; Mosquera, Carlos; González-Prelcic, Nuria
Wideband Spectral Estimation from Compressed Measurements Exploiting Spectral a priori Information in Cognitive Radio Systems Proceedings Article
En: 2010 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2010), Dallas, U.S.A., 2010.
BibTeX | Etiquetas:
@inproceedings{icassp2010,
title = {Wideband Spectral Estimation from Compressed Measurements Exploiting Spectral a priori Information in Cognitive Radio Systems},
author = {Gonzalo Vazquez-Vilar and Roberto L\'{o}pez-Valcarce and Carlos Mosquera and Nuria Gonz\'{a}lez-Prelcic},
year = {2010},
date = {2010-03-01},
booktitle = {2010 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2010)},
address = {Dallas, U.S.A.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Vazquez, Manuel A; Miguez, Joaquin
Adaptive MLSD for MIMO Transmission Systems with Unknown Subchannel Orders Proceedings Article
En: 2010 7th International Symposium on Wireless Communication Systems, pp. 451–455, IEEE, York, 2010, ISSN: 2154-0217.
Resumen | Enlaces | BibTeX | Etiquetas: Bit error rate, Channel estimation, channel impulse response, computational complexity, Estimation, frequency-selective multiple-input multiple-output, maximum likelihood sequence detection, maximum likelihood sequence estimation, MIMO, MIMO communication, MIMO transmission systems, multiple subchannels, per survivor processing methodology, pilot data, Receivers, Signal to noise ratio, Time frequency analysis, time selective MIMO channel
@inproceedings{Vazquez2010,
title = {Adaptive MLSD for MIMO Transmission Systems with Unknown Subchannel Orders},
author = {Manuel A Vazquez and Joaquin Miguez},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5624335},
issn = {2154-0217},
year = {2010},
date = {2010-01-01},
booktitle = {2010 7th International Symposium on Wireless Communication Systems},
pages = {451--455},
publisher = {IEEE},
address = {York},
abstract = {In the equalization of frequency-selective multiple-input multiple-output (MIMO) channels it is usually assumed that the length of the channel impulse response (CIR), also referred to as the channel order, is known. However, this is not true in most practical situations and, in order to avoid the serious performance degradation that occurs when the CIR length is underestimated, a channel with "more than enough" taps is usually considered. This very frequently leads to overestimating the channel order, which increases the computational complexity of any maximum likelihood sequence detection (MLSD) algorithm, while degrading its performance at the same time. The problem of estimating a single channel order for a time and frequency selective MIMO channel has recently been tackled. However, this is an idealized approach, since a MIMO channel comprises multiple subchannels (as many as the number of inputs times that of the outputs), each of them possibly with its own order. In this paper, we introduce an algorithm for MLSD that incorporates the full estimation of the MIMO CIR parameters, including one channel order per output. The proposed technique is based on the per survivor processing (PSP) methodology, it admits both blind and semiblind implementations, depending on the availability of pilot data, and it is designed to work with time-selective channels. Besides the analytical derivation of the algorithm, we provide computer simulation results that illustrate the effectiveness of the resulting receiver.},
keywords = {Bit error rate, Channel estimation, channel impulse response, computational complexity, Estimation, frequency-selective multiple-input multiple-output, maximum likelihood sequence detection, maximum likelihood sequence estimation, MIMO, MIMO communication, MIMO transmission systems, multiple subchannels, per survivor processing methodology, pilot data, Receivers, Signal to noise ratio, Time frequency analysis, time selective MIMO channel},
pubstate = {published},
tppubtype = {inproceedings}
}
Valera, Isabel; Sieskul, B T; Zheng, F; Kaiser, T
A Hybrid SS-ToA Wireless Ge- olocation Based on Path Attenuation under Imperfect Path Loss Exponent Proceedings Article
En: 18th European Signal Processing Conference (EUSIPCO-2010), Aalborg, 2010.
Resumen | Enlaces | BibTeX | Etiquetas: hood estimator, maximum likeli-, Path loss exponent, Time-of-arrival estimation
@inproceedings{Valera2010,
title = {A Hybrid SS-ToA Wireless Ge- olocation Based on Path Attenuation under Imperfect Path Loss Exponent},
author = {Isabel Valera and B T Sieskul and F Zheng and T Kaiser},
url = {http://www.eurasip.org/Proceedings/Eusipco/Eusipco2010/Contents/papers/1569292415.pdf},
year = {2010},
date = {2010-01-01},
booktitle = {18th European Signal Processing Conference (EUSIPCO-2010)},
address = {Aalborg},
abstract = {We consider the wireless geolocationusing the time of arrival (ToA) of radio signals in a cellular setting. The main concern in this paper involves the effects of the error knowledge of the path loss exponent (PLE). We derive the asymptotic error performance of the maximum likelihood (ML) estimator un- der the imperfect PLE. We point out that a previous method provides inaccurate performance prediction and then present a new method based on the Taylor series expansion. Numer- ical examples illustrate that the Taylor analysis captures the bias and the error variance of the ML estimator under the im- perfect PLE better than the conventional method. Simulation results also illustrate that in the threshold region, the ML es- timator outperforms the MC estimator even in the presence of the PLE error. However, in the asymptotic region the MC estimator and the ML estimator with the perfect PLE outper- form the ML estimator under the imperfect PLE.},
keywords = {hood estimator, maximum likeli-, Path loss exponent, Time-of-arrival estimation},
pubstate = {published},
tppubtype = {inproceedings}
}
Martino, Luca; Miguez, Joaquin
A Rejection Sampling Scheme for Posterior Probability Distributions via the Ratio-of-Uniforms Method Proceedings Article
En: 18th European Signal Processing Conference (EUSIPCO-2010), Aalborg, 2010.
Resumen | Enlaces | BibTeX | Etiquetas:
@inproceedings{Martino2010,
title = {A Rejection Sampling Scheme for Posterior Probability Distributions via the Ratio-of-Uniforms Method},
author = {Luca Martino and Joaquin Miguez},
url = {http://www.academia.edu/2355638/A_rejection_sampling_scheme_for_posterior_probability_distributions_via_the_ratio-of-uniforms_method},
year = {2010},
date = {2010-01-01},
booktitle = {18th European Signal Processing Conference (EUSIPCO-2010)},
address = {Aalborg},
abstract = {Accept/reject sampling is a well-known method to generaterandom samples from arbitrary target probability distribu-tions. It demands the design of a suitable proposal probabil-ity density function (pdf) from which candidate samples canbe drawn. The main limitation to the use of RS is the needto find an adequate upper bound for the ratio of the targetpdf over the proposal pdf from which the samples are gener-ated. There are no general methods to analytically find thisbound, except when the target pdf is log-concave. In thispaper we introduce a novel procedure using the ratio of uni-forms method to efficiently perform rejection sampling fora large class of target densities. The candidate samples aregenerated using only two independent uniform random vari-ables. In order to illustrate the application of the proposedtechnique, we provide a numerical example},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ruiz, Manuel Martinez; Artés-Rodríguez, Antonio; Diaz-Rico, Jose Antonio; Fuentes, Jose Blanco
New Initiatives for Imagery Transmission over a Tactical Data Link. A Case Study: JPEG2000 Compressed Images Transmitted in a Link-16 Network. Method and Results Proceedings Article
En: 2010 - MILCOM 2010 MILITARY COMMUNICATIONS CONFERENCE, pp. 1163–1168, IEEE, San Jose, 2010, ISSN: 2155-7578.
Resumen | Enlaces | BibTeX | Etiquetas: Bit rate, code stream, data stream, Decoding, discrete wavelet transforms, Image coding, image compression, imagery transmission, JPEG-2000, JPEG2000 compressed images, link-16, Link-16 Enhance Throughput, Link-16 tactical network, MIDS-LVT, military communication, multirresolution, operational requirement, packing limit, PSNR, Security, Streaming media, tactical data link, time slot, Transform coding, wavelet discrete transforms, wavelets
@inproceedings{Martinez-Ruiz2010,
title = {New Initiatives for Imagery Transmission over a Tactical Data Link. A Case Study: JPEG2000 Compressed Images Transmitted in a Link-16 Network. Method and Results},
author = {Manuel Martinez Ruiz and Antonio Art\'{e}s-Rodr\'{i}guez and Jose Antonio Diaz-Rico and Jose Blanco Fuentes},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5680102},
issn = {2155-7578},
year = {2010},
date = {2010-01-01},
booktitle = {2010 - MILCOM 2010 MILITARY COMMUNICATIONS CONFERENCE},
pages = {1163--1168},
publisher = {IEEE},
address = {San Jose},
abstract = {This paper presents the results of an initiative to transmit imagery content through a Link-16 tactical network using a multirresolution approach based on wavelets to compress images. Firstly, we identify the operational requirements. Secondly, we justify why JPEG2000 is our choice for coding still images. Thirdly, we propose a method to map the JPEG2000 code-stream into Link-16 free-text messages. We propose to send the most important part of the JPEG2000 compressed image in a more error resistant Link-16 packed structure and the remaining of the image in less robust data structures but at higher data rates. Finally, we present our results based on software simulations and laboratory tests with real Link-16 terminals including a comparative analysis with Link-16 enhance throughput. A configuration using two MIDS-LVTs has being set up, along with JPEG2000 coding and decoding software tools.},
keywords = {Bit rate, code stream, data stream, Decoding, discrete wavelet transforms, Image coding, image compression, imagery transmission, JPEG-2000, JPEG2000 compressed images, link-16, Link-16 Enhance Throughput, Link-16 tactical network, MIDS-LVT, military communication, multirresolution, operational requirement, packing limit, PSNR, Security, Streaming media, tactical data link, time slot, Transform coding, wavelet discrete transforms, wavelets},
pubstate = {published},
tppubtype = {inproceedings}
}
Koch, Tobias; Lapidoth, Amos
Increased Capacity per Unit-Cost by Oversampling Proceedings Article
En: 2010 IEEE 26-th Convention of Electrical and Electronics Engineers in Israel, pp. 000684–000688, IEEE, Eliat, 2010, ISBN: 978-1-4244-8681-6.
Resumen | Enlaces | BibTeX | Etiquetas: AWGN, AWGN channels, bandlimited Gaussian channel, channel capacity, Gaussian channels, increased capacity per unit cost, Information rates, one bit output quantizer, oversampling, quantisation (signal), quantization, sampling rate recovery, signal sampling
@inproceedings{Koch2010,
title = {Increased Capacity per Unit-Cost by Oversampling},
author = {Tobias Koch and Amos Lapidoth},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5662127},
isbn = {978-1-4244-8681-6},
year = {2010},
date = {2010-01-01},
booktitle = {2010 IEEE 26-th Convention of Electrical and Electronics Engineers in Israel},
pages = {000684--000688},
publisher = {IEEE},
address = {Eliat},
abstract = {It is demonstrated that doubling the sampling rate recovers some of the loss in capacity incurred on the bandlimited Gaussian channel with a one-bit output quantizer.},
keywords = {AWGN, AWGN channels, bandlimited Gaussian channel, channel capacity, Gaussian channels, increased capacity per unit cost, Information rates, one bit output quantizer, oversampling, quantisation (signal), quantization, sampling rate recovery, signal sampling},
pubstate = {published},
tppubtype = {inproceedings}
}
Murillo-Fuentes, Juan Jose; Olmos, Pablo M; Perez-Cruz, Fernando
Analyzing the Maxwell Decoder for LDPC Codes in Binary Erasure Channels Proceedings Article
En: Information Theory and Applications (ITA), San Diego, 2010.
Resumen | Enlaces | BibTeX | Etiquetas:
@inproceedings{Murillo-Fuentes2010,
title = {Analyzing the Maxwell Decoder for LDPC Codes in Binary Erasure Channels},
author = {Juan Jose Murillo-Fuentes and Pablo M Olmos and Fernando Perez-Cruz},
url = {http://ita.ucsd.edu/workshop/10/files/abstract/abstract_1462.txt},
year = {2010},
date = {2010-01-01},
booktitle = {Information Theory and Applications (ITA)},
address = {San Diego},
abstract = {The Maxwell decoder has been proposed for bridging the gap between the achievable capacity by belief propagation decoding and the maximum a posteriori decoder in binary erasure channels of LDPC codes. The Maxwell decoder, once the belief-propagation decoder gets stuck in a nonempty stopping set, guesses a bit and replicates any running copies of the decoding process. Density evolution and EXIT chart analyses of this iterative decoder show that MAP performance can be derived from the performance of the BP decoder. The complexity of the Maxwell decoder depends exponentially on the number of guesses and a priori we cannot bound the number of guesses, which limits its applicability as a LDPC decoder. In this paper, we adapt the expectation propagation algorithm for LDPC decoding. Our algorithm can be understood as a Maxwell decoder with a bounded complexity. For unbounded complexity it achieves maximum a posteriori decoding. In this paper, we analyze in detail the simplest version of the algorithm, whose complexity is identical to belief propagation, and we demonstrate that the achieved capacity is higher than that of the belief propagation decoder.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Olmos, Pablo M; Murillo-Fuentes, Juan Jose; Perez-Cruz, Fernando
Tree-Structure Expectation Propagation for Decoding LDPC Codes over Binary Erasure Channels Proceedings Article
En: 2010 IEEE International Symposium on Information Theory, pp. 799–803, IEEE, Austin, TX, 2010, ISBN: 978-1-4244-7892-7.
Resumen | Enlaces | BibTeX | Etiquetas: belief propagation, binary erasure channels, Bipartite graph, BP decoder, Capacity planning, Channel Coding, codeword, computational complexity, Decoding, Finishing, graph theory, H infinity control, LDPC code decoding, LDPC Tanner graph, Maxwell decoder, parity check codes, Performance analysis, tree structure expectation propagation, trees (mathematics), Upper bound
@inproceedings{Olmos2010,
title = {Tree-Structure Expectation Propagation for Decoding LDPC Codes over Binary Erasure Channels},
author = {Pablo M Olmos and Juan Jose Murillo-Fuentes and Fernando Perez-Cruz},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5513636},
isbn = {978-1-4244-7892-7},
year = {2010},
date = {2010-01-01},
booktitle = {2010 IEEE International Symposium on Information Theory},
pages = {799--803},
publisher = {IEEE},
address = {Austin, TX},
abstract = {Expectation Propagation is a generalization to Belief Propagation (BP) in two ways. First, it can be used with any exponential family distribution over the cliques in the graph. Second, it can impose additional constraints on the marginal distributions. We use this second property to impose pair-wise marginal distribution constraints in some check nodes of the LDPC Tanner graph. These additional constraints allow decoding the received codeword when the BP decoder gets stuck. In this paper, we first present the new decoding algorithm, whose complexity is identical to the BP decoder, and we then prove that it is able to decode codewords with a larger fraction of erasures, as the block size tends to infinity. The proposed algorithm can be also understood as a simplification of the Maxwell decoder, but without its computational complexity. We also illustrate that the new algorithm outperforms the BP decoder for finite block-size codes.},
keywords = {belief propagation, binary erasure channels, Bipartite graph, BP decoder, Capacity planning, Channel Coding, codeword, computational complexity, Decoding, Finishing, graph theory, H infinity control, LDPC code decoding, LDPC Tanner graph, Maxwell decoder, parity check codes, Performance analysis, tree structure expectation propagation, trees (mathematics), Upper bound},
pubstate = {published},
tppubtype = {inproceedings}
}
Djuric, Petar M; Closas, Pau; Bugallo, Monica F; Miguez, Joaquin
Evaluation of a Method's Robustness Proceedings Article
En: 2010 IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 3598–3601, IEEE, Dallas, 2010, ISSN: 1520-6149.
Resumen | Enlaces | BibTeX | Etiquetas: Electronic mail, Extraterrestrial measurements, Filtering, Gaussian processes, method's robustness, Random variables, robustness, sequential methods, Signal processing, statistical distributions, Telecommunications, uniform distribution, Wireless communication
@inproceedings{Djuric2010,
title = {Evaluation of a Method's Robustness},
author = {Petar M Djuric and Pau Closas and Monica F Bugallo and Joaquin Miguez},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5495921},
issn = {1520-6149},
year = {2010},
date = {2010-01-01},
booktitle = {2010 IEEE International Conference on Acoustics, Speech and Signal Processing},
pages = {3598--3601},
publisher = {IEEE},
address = {Dallas},
abstract = {In signal processing, it is typical to develop or use a method based on a given model. In practice, however, we almost never know the actual model and we hope that the assumed model is in the neighborhood of the true one. If deviations exist, the method may be more or less sensitive to them. Therefore, it is important to know more about this sensitivity, or in other words, how robust the method is to model deviations. To that end, it is useful to have a metric that can quantify the robustness of the method. In this paper we propose a procedure for developing a variety of metrics for measuring robustness. They are based on a discrete random variable that is generated from observed data and data generated according to past data and the adopted model. This random variable is uniform if the model is correct. When the model deviates from the true one, the distribution of the random variable deviates from the uniform distribution. One can then employ measures for differences between distributions in order to quantify robustness. In this paper we describe the proposed methodology and demonstrate it with simulated data.},
keywords = {Electronic mail, Extraterrestrial measurements, Filtering, Gaussian processes, method's robustness, Random variables, robustness, sequential methods, Signal processing, statistical distributions, Telecommunications, uniform distribution, Wireless communication},
pubstate = {published},
tppubtype = {inproceedings}
}
Salamanca, Luis; Murillo-Fuentes, Juan Jose; Perez-Cruz, Fernando
Bayesian BCJR for Channel Equalization and Decoding Proceedings Article
En: 2010 IEEE International Workshop on Machine Learning for Signal Processing, pp. 53–58, IEEE, Kittila, 2010, ISSN: 1551-2541.
Resumen | Enlaces | BibTeX | Etiquetas: a posteriori probability, Bayes methods, Bayesian BCJR, Bayesian methods, Bit error rate, channel decoding, channel estate information, Channel estimation, Decoding, digital communication, digital communications, equalisers, Equalizers, error statistics, Markov processes, Maximum likelihood decoding, maximum likelihood estimation, multipath channel, probabilistic channel equalization, Probability, single input single output model, SISO model, statistical information, Training
@inproceedings{Salamanca2010,
title = {Bayesian BCJR for Channel Equalization and Decoding},
author = {Luis Salamanca and Juan Jose Murillo-Fuentes and Fernando Perez-Cruz},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5589201},
issn = {1551-2541},
year = {2010},
date = {2010-01-01},
booktitle = {2010 IEEE International Workshop on Machine Learning for Signal Processing},
pages = {53--58},
publisher = {IEEE},
address = {Kittila},
abstract = {In this paper we focus on the probabilistic channel equalization in digital communications. We face the single input single output (SISO) model to show how the statistical information about the multipath channel can be exploited to further improve our estimation of the a posteriori probabilities (APP) during the equalization process. We consider not only the uncertainty due to the noise in the channel, but also in the estimate of the channel estate information (CSI). Thus, we resort to a Bayesian approach for the computation of the APP. This novel algorithm has the same complexity as the BCJR, exhibiting lower bit error rate at the output of the channel decoder than the standard BCJR that considers maximum likelihood (ML) to estimate the CSI.},
keywords = {a posteriori probability, Bayes methods, Bayesian BCJR, Bayesian methods, Bit error rate, channel decoding, channel estate information, Channel estimation, Decoding, digital communication, digital communications, equalisers, Equalizers, error statistics, Markov processes, Maximum likelihood decoding, maximum likelihood estimation, multipath channel, probabilistic channel equalization, Probability, single input single output model, SISO model, statistical information, Training},
pubstate = {published},
tppubtype = {inproceedings}
}
Vinuelas-Peris, Pablo; Artés-Rodríguez, Antonio
Bayesian Joint Recovery of Correlated Signals in Distributed Compressed Sensing Proceedings Article
En: 2010 2nd International Workshop on Cognitive Information Processing, pp. 382–387, IEEE, Elba, 2010, ISBN: 978-1-4244-6459-3.
Resumen | Enlaces | BibTeX | Etiquetas: Bayes methods, Bayesian joint recovery, Bayesian methods, correlated signal, Correlation, correlation methods, Covariance matrix, Dictionaries, distributed compressed sensing, matrix decomposition, Noise measurement, sensors, sparse component correlation coefficient
@inproceedings{Vinuelas-Peris2010,
title = {Bayesian Joint Recovery of Correlated Signals in Distributed Compressed Sensing},
author = {Pablo Vinuelas-Peris and Antonio Art\'{e}s-Rodr\'{i}guez},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5604103},
isbn = {978-1-4244-6459-3},
year = {2010},
date = {2010-01-01},
booktitle = {2010 2nd International Workshop on Cognitive Information Processing},
pages = {382--387},
publisher = {IEEE},
address = {Elba},
abstract = {In this paper we address the problem of Distributed Compressed Sensing (DCS) of correlated signals. We model the correlation using the sparse components correlation coefficient of signals, a general and simple measure. We develop an sparse Bayesian learning method for this setting, that can be applied to both random and optimized projection matrices. As a result, we obtain a reduction of the number of measurements needed for a given recovery error that is dependent on the correlation coefficient, as shown by computer simulations in different scenarios.},
keywords = {Bayes methods, Bayesian joint recovery, Bayesian methods, correlated signal, Correlation, correlation methods, Covariance matrix, Dictionaries, distributed compressed sensing, matrix decomposition, Noise measurement, sensors, sparse component correlation coefficient},
pubstate = {published},
tppubtype = {inproceedings}
}
Achutegui, Katrin; Rodas, Javier; Escudero, Carlos J; Miguez, Joaquin
A Model-Switching Sequential Monte Carlo Algorithm for Indoor Tracking with Experimental RSS Data Proceedings Article
En: 2010 International Conference on Indoor Positioning and Indoor Navigation, pp. 1–8, IEEE, Zurich, 2010, ISBN: 978-1-4244-5862-2.
Resumen | Enlaces | BibTeX | Etiquetas: Approximation methods, Computational modeling, Data models, generalized IMM system, GIMM approach, indoor radio, Indoor tracking, Kalman filters, maneuvering target motion, Mathematical model, model switching sequential Monte Carlo algorithm, Monte Carlo methods, multipath propagation, multiple model interaction, propagation environment, radio receivers, radio tracking, radio transmitters, random processes, Rao-Blackwellized sequential Monte Carlo tracking, received signal strength, RSS data, sensors, state space model, target position dependent data, transmitter-to-receiver distance, wireless technology
@inproceedings{Achutegui2010,
title = {A Model-Switching Sequential Monte Carlo Algorithm for Indoor Tracking with Experimental RSS Data},
author = {Katrin Achutegui and Javier Rodas and Carlos J Escudero and Joaquin Miguez},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5648053},
isbn = {978-1-4244-5862-2},
year = {2010},
date = {2010-01-01},
booktitle = {2010 International Conference on Indoor Positioning and Indoor Navigation},
pages = {1--8},
publisher = {IEEE},
address = {Zurich},
abstract = {In this paper we address the problem of indoor tracking using received signal strength (RSS) as position-dependent data. This type of measurements are very appealing because they can be easily obtained with a variety of (inexpensive) wireless technologies. However, the extraction of accurate location information from RSS in indoor scenarios is not an easy task. Due to the multipath propagation, it is hard to adequately model the correspondence between the received power and the transmitter-to-receiver distance. For that reason, we propose the use of a compound model that combines several sub-models, whose parameters are adjusted to different propagation environments. This methodology, called Interacting Multiple Models (IMM), has been used in the past either for modeling the motion of maneuvering targets or the relationship between the target position and the observations. Here, we extend its application to handle both types of uncertainty simultaneously and we refer to the resulting state-space model as a generalized IMM (GIMM) system. The flexibility of the GIMM approach is attained at the expense of an increase in the number of random processes that must be accurately tracked. To overcome this difficulty, we introduce a Rao-Blackwellized sequential Monte Carlo tracking algorithm that exhibits good performance both with synthetic and experimental data.},
keywords = {Approximation methods, Computational modeling, Data models, generalized IMM system, GIMM approach, indoor radio, Indoor tracking, Kalman filters, maneuvering target motion, Mathematical model, model switching sequential Monte Carlo algorithm, Monte Carlo methods, multipath propagation, multiple model interaction, propagation environment, radio receivers, radio tracking, radio transmitters, random processes, Rao-Blackwellized sequential Monte Carlo tracking, received signal strength, RSS data, sensors, state space model, target position dependent data, transmitter-to-receiver distance, wireless technology},
pubstate = {published},
tppubtype = {inproceedings}
}
Helander, E; Silén, H; Miguez, Joaquin; Gabbouj, M
Maximum a Posteriori Voice Conversion Using Sequential Monte Carlo Methods Proceedings Article
En: Eleventh Annual Conference of the International Speech Communication Association (INTERSPEECH), Makuhari, Chiba, Japan, 2010.
Resumen | Enlaces | BibTeX | Etiquetas:
@inproceedings{Helander2010,
title = {Maximum a Posteriori Voice Conversion Using Sequential Monte Carlo Methods},
author = {E Helander and H Sil\'{e}n and Joaquin Miguez and M Gabbouj},
url = {http://www.isca-speech.org/archive/interspeech_2010/i10_1716.html},
year = {2010},
date = {2010-01-01},
booktitle = {Eleventh Annual Conference of the International Speech Communication Association (INTERSPEECH)},
address = {Makuhari, Chiba, Japan},
abstract = {Many voice conversion algorithms are based on frame-wise mapping from source features into target features. This ignores the inherent temporal continuity that is present in speech and can degrade the subjective quality. In this paper, we propose to optimize the speech feature sequence after a frame-based conversion algorithm has been applied. In particular, we select the sequence of speech features through the minimization of a cost function that involves both the conversion error and the smoothness of the sequence. The estimation problem is solved using sequential Monte Carlo methods. Both subjective and objective results show the effectiveness of the method.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Salamanca, Luis; Murillo-Fuentes, Juan Jose; Perez-Cruz, Fernando
Channel Decoding with a Bayesian Equalizer Proceedings Article
En: 2010 IEEE International Symposium on Information Theory, pp. 1998–2002, IEEE, Austin, TX, 2010, ISBN: 978-1-4244-7892-7.
Resumen | Enlaces | BibTeX | Etiquetas: a posteriori probability, Bayesian equalizer, Bayesian methods, BER, Bit error rate, Channel Coding, channel decoding, channel estate information, Communication channels, Decoding, equalisers, Equalizers, error statistics, low-density parity-check decoders, LPDC decoders, Maximum likelihood decoding, maximum likelihood detection, maximum likelihood estimation, Noise reduction, parity check codes, Probability, Uncertainty
@inproceedings{Salamanca2010a,
title = {Channel Decoding with a Bayesian Equalizer},
author = {Luis Salamanca and Juan Jose Murillo-Fuentes and Fernando Perez-Cruz},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5513348},
isbn = {978-1-4244-7892-7},
year = {2010},
date = {2010-01-01},
booktitle = {2010 IEEE International Symposium on Information Theory},
pages = {1998--2002},
publisher = {IEEE},
address = {Austin, TX},
abstract = {Low-density parity-check (LPDC) decoders assume the channel estate information (CSI) is known and they have the true a posteriori probability (APP) for each transmitted bit. But in most cases of interest, the CSI needs to be estimated with the help of a short training sequence and the LDPC decoder has to decode the received word using faulty APP estimates. In this paper, we study the uncertainty in the CSI estimate and how it affects the bit error rate (BER) output by the LDPC decoder. To improve these APP estimates, we propose a Bayesian equalizer that takes into consideration not only the uncertainty due to the noise in the channel, but also the uncertainty in the CSI estimate, reducing the BER after the LDPC decoder.},
keywords = {a posteriori probability, Bayesian equalizer, Bayesian methods, BER, Bit error rate, Channel Coding, channel decoding, channel estate information, Communication channels, Decoding, equalisers, Equalizers, error statistics, low-density parity-check decoders, LPDC decoders, Maximum likelihood decoding, maximum likelihood detection, maximum likelihood estimation, Noise reduction, parity check codes, Probability, Uncertainty},
pubstate = {published},
tppubtype = {inproceedings}
}
Alvarez, Mauricio; Luengo, David; Titsias, Michalis; Lawrence, Neil D
Efficient Multioutput Gaussian Processes Through Variational Inducing Kernels Proceedings Article
En: AISTATS 2010, Sardinia, 2010.
Resumen | Enlaces | BibTeX | Etiquetas:
@inproceedings{Alvarez2010,
title = {Efficient Multioutput Gaussian Processes Through Variational Inducing Kernels},
author = {Mauricio Alvarez and David Luengo and Michalis Titsias and Neil D Lawrence},
url = {http://eprints.pascal-network.org/archive/00006397/},
year = {2010},
date = {2010-01-01},
booktitle = {AISTATS 2010},
address = {Sardinia},
abstract = {Interest in multioutput kernel methods is increasing, whether under the guise of multitask learning, multisensor networks or structured output data. From the Gaussian process perspective a multioutput Mercer kernel is a covariance function over correlated output functions. One way of constructing such kernels is based on convolution processes (CP). A key problem for this approach is efficient inference. Alvarez and Lawrence recently presented a sparse approximation for CPs that enabled efficient inference. In this paper, we extend this work in two directions: we introduce the concept of variational inducing functions to handle potential non-smooth functions involved in the kernel CP construction and we consider an alternative approach to approximate inference based on variational methods, extending the work by Titsias (2009) to the multiple output case. We demonstrate our approaches on prediction of school marks, compiler performance and financial time series.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Plata-Chaves, Jorge; Lazaro, Marcelino
Closed-Form Error Exponent for the Neyman-Pearson Fusion of Two-Dimensional Markov Local Decisions Proceedings Article
En: European Signal Processing Conference (EUSIPCO 2010), Aalborg, 2010.
Resumen | Enlaces | BibTeX | Etiquetas:
@inproceedings{Plata-Chaves2010,
title = {Closed-Form Error Exponent for the Neyman-Pearson Fusion of Two-Dimensional Markov Local Decisions},
author = {Jorge Plata-Chaves and Marcelino Lazaro},
url = {http://www.eurasip.org/Proceedings/Eusipco/Eusipco2010/Contents/papers/1569292447.pdf},
year = {2010},
date = {2010-01-01},
booktitle = {European Signal Processing Conference (EUSIPCO 2010)},
address = {Aalborg},
abstract = {We consider a distributed detection system formed by a large num- ber of local detectors and a fusion center that performs a Neyman- Pearson fusion of the binary quantizations of the sensor observa- tions. The aforementioned local decisions are taken with no kind of cooperation and transmitted to the fusion center over error free parallel access channels. Furthermore, the devices are located on a rectangular lattice so that sensors belonging to a specific row or column are equally spaced. For each hypothesis H 0 and H 1 , the correlation structure of the local decisions is modelled with a two- dimensional causal field where the rows and columns are outcomes of the same first-order binary Markov chain. Under this scenario, we derive a closed-form error exponent for the Neyman-Pearson fusion of the local decisions. Afterwards, using the derived error exponent we study the effect of different design parameters of the network on its overall detection performance},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2009
Czink, Nicolai; Bandemer, Bernd; Vazquez-Vilar, Gonzalo; Jalloul, Louay; Oestges, Claude; Paulraj, Arogyaswami
Spatial Separation of Multi-User MIMO Channels Proceedings Article
En: 20th Personal, Indoor and Mobile Radio Communications Symposium 2009 (PIMRC 09), Tokyo, Japan, 2009.
BibTeX | Etiquetas:
@inproceedings{nczink2009,
title = {Spatial Separation of Multi-User MIMO Channels},
author = {Nicolai Czink and Bernd Bandemer and Gonzalo Vazquez-Vilar and Louay Jalloul and Claude Oestges and Arogyaswami Paulraj},
year = {2009},
date = {2009-09-01},
booktitle = {20th Personal, Indoor and Mobile Radio Communications Symposium 2009 (PIMRC 09)},
address = {Tokyo, Japan},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bandemer, Bernd; Vazquez-Vilar, Gonzalo; Gamal, Abbas El
On the Sum Capacity of A Class of Cyclically Symmetric Deterministic Interference Channels Proceedings Article
En: 2009 IEEE International Symposium on Information Theory (ISIT 2009), Coex, Seoul, Korea, 2009.
BibTeX | Etiquetas:
@inproceedings{bbandemer2009,
title = {On the Sum Capacity of A Class of Cyclically Symmetric Deterministic Interference Channels},
author = {Bernd Bandemer and Gonzalo Vazquez-Vilar and Abbas El Gamal},
year = {2009},
date = {2009-06-01},
booktitle = {2009 IEEE International Symposium on Information Theory (ISIT 2009)},
address = {Coex, Seoul, Korea},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
López-Valcarce, Roberto; Vazquez-Vilar, Gonzalo; Álvarez-Díaz, Marcos
Multiantenna detection of multicarrier primary signals exploiting spectral a priori information Proceedings Article
En: 4th International Conference on Cognitive Radio Oriented Wireless Networks and Communications (Crowncom 2009), Hannover, Germany, 2009.
BibTeX | Etiquetas:
@inproceedings{crowncom2009,
title = {Multiantenna detection of multicarrier primary signals exploiting spectral a priori information},
author = {Roberto L\'{o}pez-Valcarce and Gonzalo Vazquez-Vilar and Marcos \'{A}lvarez-D\'{i}az},
year = {2009},
date = {2009-06-01},
booktitle = {4th International Conference on Cognitive Radio Oriented Wireless Networks and Communications (Crowncom 2009)},
address = {Hannover, Germany},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
López-Valcarce, Roberto; Vazquez-Vilar, Gonzalo
Wideband Spectrum Sensing in Cognitive Radio: Joint Estimation of Noise Variance and Multiple Signal Levels Proceedings Article
En: 2009 IEEE International Workshop on Signal Processing Advances for Wireless Communications (Spawc 2009), Perugia, Italy, 2009.
BibTeX | Etiquetas:
@inproceedings{spawc2009,
title = {Wideband Spectrum Sensing in Cognitive Radio: Joint Estimation of Noise Variance and Multiple Signal Levels},
author = {Roberto L\'{o}pez-Valcarce and Gonzalo Vazquez-Vilar},
year = {2009},
date = {2009-06-01},
booktitle = {2009 IEEE International Workshop on Signal Processing Advances for Wireless Communications (Spawc 2009)},
address = {Perugia, Italy},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Olmos, Pablo M; Murillo-Fuentes, Juan Jose; Perez-Cruz, Fernando
Soft LDPC Decoding in Nonlinear Channels with Gaussian Processes for Classification Proceedings Article
En: European Signal Processing Conference (EUSIPCO), Glasgow, 2009.
Resumen | Enlaces | BibTeX | Etiquetas:
@inproceedings{Olmos2009,
title = {Soft LDPC Decoding in Nonlinear Channels with Gaussian Processes for Classification},
author = {Pablo M Olmos and Juan Jose Murillo-Fuentes and Fernando Perez-Cruz},
url = {http://www.eurasip.org/Proceedings/Eusipco/Eusipco2009/contents/papers/1569186781.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {European Signal Processing Conference (EUSIPCO)},
address = {Glasgow},
abstract = {In this paper, we propose a new approach for nonlinear equalization based on Gaussian processes for classification (GPC).We also measure the performance of the equalizer after a low-density parity-check channel decoder has detected the received sequence. Typically, most channel equalizers concentrate on reducing the bit error rate, instead of providing accurate posterior probability estimates. GPC is a Bayesian nonlinear classification tool that provides accurate posterior probability estimates with short training sequences. We show that the accuracy of these estimates is essential for optimal performance of the channel decoder and that the error rate outputted by the equalizer might be irrelevant to understand the performance of the overall communication receiver. We compare the proposed equalizers with state-ofthe- art solutions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bravo-Santos, Ángel M; Djuric, Petar M
Cooperative Relay Communications in Mesh Networks Proceedings Article
En: 2009 IEEE 10th Workshop on Signal Processing Advances in Wireless Communications, pp. 499–503, IEEE, Perugia, 2009, ISBN: 978-1-4244-3695-8.
Resumen | Enlaces | BibTeX | Etiquetas: binary transmission, bit error probability, Bit error rate, cooperative relay communications, decode-and-forward relays, Detectors, error statistics, Maximum likelihood decoding, maximum likelihood detection, Mesh networks, mesh wireless networks, multi-hop networks, Network topology, optimal node decision rules, Peer to peer computing, radio networks, Relays, spread spectrum communication, telecommunication network topology, Wireless Sensor Networks
@inproceedings{Bravo-Santos2009,
title = {Cooperative Relay Communications in Mesh Networks},
author = {\'{A}ngel M Bravo-Santos and Petar M Djuric},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5161835},
isbn = {978-1-4244-3695-8},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE 10th Workshop on Signal Processing Advances in Wireless Communications},
pages = {499--503},
publisher = {IEEE},
address = {Perugia},
abstract = {In previous literature on cooperative relay communications, the emphasis has been on the study of multi-hop networks. In this paper we address mesh wireless networks that use decode-and-forward relays for which we derive the optimal node decision rules in case of binary transmission. We also obtain the expression for the overall bit error probability. We compare the mesh networks with multi-hop networks and show the improvement in performance that can be achieved with them when both networks have the same number of nodes and equal number of hops.},
keywords = {binary transmission, bit error probability, Bit error rate, cooperative relay communications, decode-and-forward relays, Detectors, error statistics, Maximum likelihood decoding, maximum likelihood detection, Mesh networks, mesh wireless networks, multi-hop networks, Network topology, optimal node decision rules, Peer to peer computing, radio networks, Relays, spread spectrum communication, telecommunication network topology, Wireless Sensor Networks},
pubstate = {published},
tppubtype = {inproceedings}
}
Bugallo, Monica F; Maiz, Cristina S; Miguez, Joaquin; Djuric, Petar M
Cost-Reference Particle Filters and Fusion of Information Proceedings Article
En: 2009 IEEE 13th Digital Signal Processing Workshop and 5th IEEE Signal Processing Education Workshop, pp. 286–291, IEEE, Marco Island, FL, 2009.
Resumen | Enlaces | BibTeX | Etiquetas: costs, distributed processing, Electronic mail, fusion, Information filtering, Information filters, information fusion, Measurement standards, probabilistic information, random measures, sensor fusion, smoothing methods, Weight measurement
@inproceedings{Bugallo2009,
title = {Cost-Reference Particle Filters and Fusion of Information},
author = {Monica F Bugallo and Cristina S Maiz and Joaquin Miguez and Petar M Djuric},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4785936},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE 13th Digital Signal Processing Workshop and 5th IEEE Signal Processing Education Workshop},
pages = {286--291},
publisher = {IEEE},
address = {Marco Island, FL},
abstract = {Cost-reference particle filtering is a methodology for tracking unknowns in a system without reliance on probabilistic information about the noises in the system. The methodology is based on analogous principles as the ones of standard particle filtering. Unlike the random measures of standard particle filters that are composed of particles and weights, the random measures of cost-reference particle filters contain particles and user-defined costs. In this paper, we discuss a few scenarios where we need to meld random measures of two or more cost-reference particle filters. The objective is to obtain a fused random measure that combines the information from the individual cost-reference particle filters.},
keywords = {costs, distributed processing, Electronic mail, fusion, Information filtering, Information filters, information fusion, Measurement standards, probabilistic information, random measures, sensor fusion, smoothing methods, Weight measurement},
pubstate = {published},
tppubtype = {inproceedings}
}
Djuric, Petar M; Miguez, Joaquin
Model Assessment with Kolmogorov-Smirnov Statistics Proceedings Article
En: 2009 IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 2973–2976, IEEE, Taipei, 2009, ISSN: 1520-6149.
Resumen | Enlaces | BibTeX | Etiquetas: Bayesian methods, Computer Simulation, Context modeling, Electronic mail, Filtering, ill-conditioned problem, Kolmogorov-Smirnov statistics, model assessment, modelling, Predictive models, Probability, statistical analysis, statistics, Testing
@inproceedings{Djuric2009,
title = {Model Assessment with Kolmogorov-Smirnov Statistics},
author = {Petar M Djuric and Joaquin Miguez},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4960248},
issn = {1520-6149},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE International Conference on Acoustics, Speech and Signal Processing},
pages = {2973--2976},
publisher = {IEEE},
address = {Taipei},
abstract = {One of the most basic problems in science and engineering is the assessment of a considered model. The model should describe a set of observed data and the objective is to find ways of deciding if the model should be rejected. It seems that this is an ill-conditioned problem because we have to test the model against all the possible alternative models. In this paper we use the Kolmogorov-Smirnov statistic to develop a test that shows if the model should be kept or it should be rejected. We explain how this testing can be implemented in the context of particle filtering. We demonstrate the performance of the proposed method by computer simulations.},
keywords = {Bayesian methods, Computer Simulation, Context modeling, Electronic mail, Filtering, ill-conditioned problem, Kolmogorov-Smirnov statistics, model assessment, modelling, Predictive models, Probability, statistical analysis, statistics, Testing},
pubstate = {published},
tppubtype = {inproceedings}
}
Maiz, Cristina S; Miguez, Joaquin; Djuric, Petar M
Particle Filtering in the Presence of Outliers Proceedings Article
En: 2009 IEEE/SP 15th Workshop on Statistical Signal Processing, pp. 33–36, IEEE, Cardiff, 2009, ISBN: 978-1-4244-2709-3.
Resumen | Enlaces | BibTeX | Etiquetas: computer simulations, Degradation, Filtering, multidimensional random variates, Multidimensional signal processing, Multidimensional systems, Nonlinear tracking, Outlier detection, predictive distributions, Signal processing, signal processing tools, signal-power observations, spatial depth, statistical analysis, statistical distributions, statistics, Target tracking, Testing
@inproceedings{Maiz2009,
title = {Particle Filtering in the Presence of Outliers},
author = {Cristina S Maiz and Joaquin Miguez and Petar M Djuric},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5278645},
isbn = {978-1-4244-2709-3},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE/SP 15th Workshop on Statistical Signal Processing},
pages = {33--36},
publisher = {IEEE},
address = {Cardiff},
abstract = {Particle filters have become very popular signal processing tools for problems that involve nonlinear tracking of an unobserved signal of interest given a series of related observations. In this paper we propose a new scheme for particle filtering when the observed data are possibly contaminated with outliers. An outlier is an observation that has been generated by some (unknown) mechanism different from the assumed model of the data. Therefore, when handled in the same way as regular observations, outliers may drastically degrade the performance of the particle filter. To address this problem, we introduce an auxiliary particle filtering scheme that incorporates an outlier detection step. We propose to implement it by means of a test involving statistics of the predictive distributions of the observations. Specifically, we investigate the use of a proposed statistic called spatial depth that can easily be applied to multidimensional random variates. The performance of the resulting algorithm is assessed by computer simulations of target tracking based on signal-power observations.},
keywords = {computer simulations, Degradation, Filtering, multidimensional random variates, Multidimensional signal processing, Multidimensional systems, Nonlinear tracking, Outlier detection, predictive distributions, Signal processing, signal processing tools, signal-power observations, spatial depth, statistical analysis, statistical distributions, statistics, Target tracking, Testing},
pubstate = {published},
tppubtype = {inproceedings}
}
Martino, Luca; Miguez, Joaquin
A Novel Rejection Sampling Scheme for Posterior Probability Distributions Proceedings Article
En: 2009 IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 2921–2924, IEEE, Taipei, 2009, ISSN: 1520-6149.
Resumen | Enlaces | BibTeX | Etiquetas: Additive noise, arbitrary target probability distributions, Bayes methods, Bayesian methods, Monte Carlo integration, Monte Carlo methods, Monte Carlo techniques, Overbounding, posterior probability distributions, Probability density function, Probability distribution, Proposals, Rejection sampling, rejection sampling scheme, Sampling methods, Signal processing algorithms, signal sampling, Upper bound
@inproceedings{Martino2009,
title = {A Novel Rejection Sampling Scheme for Posterior Probability Distributions},
author = {Luca Martino and Joaquin Miguez},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4960235},
issn = {1520-6149},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE International Conference on Acoustics, Speech and Signal Processing},
pages = {2921--2924},
publisher = {IEEE},
address = {Taipei},
abstract = {Rejection sampling (RS) is a well-known method to draw from arbitrary target probability distributions, which has important applications by itself or as a building block for more sophisticated Monte Carlo techniques. The main limitation to the use of RS is the need to find an adequate upper bound for the ratio of the target probability density function (pdf) over the proposal pdf from which the samples are generated. There are no general methods to analytically find this bound, except in the particular case in which the target pdf is log-concave. In this paper we adopt a Bayesian view of the problem and propose a general RS scheme to draw from the posterior pdf of a signal of interest using its prior density as a proposal function. The method enables the analytical calculation of the bound and can be applied to a large class of target densities. We illustrate its use with a simple numerical example.},
keywords = {Additive noise, arbitrary target probability distributions, Bayes methods, Bayesian methods, Monte Carlo integration, Monte Carlo methods, Monte Carlo techniques, Overbounding, posterior probability distributions, Probability density function, Probability distribution, Proposals, Rejection sampling, rejection sampling scheme, Sampling methods, Signal processing algorithms, signal sampling, Upper bound},
pubstate = {published},
tppubtype = {inproceedings}
}
Achutegui, Katrin; Martino, Luca; Rodas, Javier; Escudero, Carlos J; Miguez, Joaquin
A Multi-Model Particle Filtering Algorithm for Indoor Tracking of Mobile Terminals Using RSS Data Proceedings Article
En: 2009 IEEE International Conference on Control Applications, pp. 1702–1707, IEEE, Saint Petersburg, 2009, ISBN: 978-1-4244-4601-8.
Resumen | Enlaces | BibTeX | Etiquetas: Bayesian methods, Control systems, Filtering algorithms, generalized interacting multiple model, GIMM, indoor radio, Indoor tracking, mobile radio, mobile terminal, Monte Carlo methods, multipath propagation, position-dependent data measurement, random process, random processes, Rao-Blackwellized sequential Monte Carlo tracking, received signal strength, RSS data, Sliding mode control, State-space methods, state-space model, Target tracking, tracking, transmitter-to-receiver distance, wireless network, wireless technology
@inproceedings{Achutegui2009,
title = {A Multi-Model Particle Filtering Algorithm for Indoor Tracking of Mobile Terminals Using RSS Data},
author = {Katrin Achutegui and Luca Martino and Javier Rodas and Carlos J Escudero and Joaquin Miguez},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5280960},
isbn = {978-1-4244-4601-8},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE International Conference on Control Applications},
pages = {1702--1707},
publisher = {IEEE},
address = {Saint Petersburg},
abstract = {In this paper we address the problem of indoor tracking using received signal strength (RSS) as a position-dependent data measurement. This type of measurements is very appealing because they can be easily obtained with a variety of wireless technologies which are relatively inexpensive. The extraction of accurate location information from RSS in indoor scenarios is not an easy task, though. Since RSS is highly influenced by multipath propagation, it turns out very hard to adequately model the correspondence between the received power and the transmitter-to-receiver distance. The measurement models proposed in the literature are site-specific and require a great deal of information regarding the structure of the building where the tracking will be performed and therefore are not useful for a general application. For that reason we propose the use of a compound model that combines several sub-models, whose parameters are adjusted to specific and different propagation environments. This methodology, is called interacting multiple models (IMM), has been used in the past for modeling the motion of maneuvering targets. Here, we extend its application to handle also the uncertainty in the RSS observations and we refer to the resulting state-space model as a generalized IMM (GIMM) system. The flexibility of the GIMM approach is attained at the expense of an increase in the number of random processes that must be accurately tracked. To overcome this difficulty, we introduce a Rao-Blackwellized sequential Monte Carlo tracking algorithm that exhibits good performance both with synthetic and experimental data.},
keywords = {Bayesian methods, Control systems, Filtering algorithms, generalized interacting multiple model, GIMM, indoor radio, Indoor tracking, mobile radio, mobile terminal, Monte Carlo methods, multipath propagation, position-dependent data measurement, random process, random processes, Rao-Blackwellized sequential Monte Carlo tracking, received signal strength, RSS data, Sliding mode control, State-space methods, state-space model, Target tracking, tracking, transmitter-to-receiver distance, wireless network, wireless technology},
pubstate = {published},
tppubtype = {inproceedings}
}
Djuric, Petar M; Bugallo, Monica F; Closas, Pau; Miguez, Joaquin
Measuring the Robustness of Sequential Methods Proceedings Article
En: 2009 IEEE 13th Digital Signal Processing Workshop and 5th IEEE Signal Processing Education Workshop, pp. 29–32, IEEE, Aruba, Dutch Antilles, 2009, ISBN: 978-1-4244-5179-1.
Resumen | Enlaces | BibTeX | Etiquetas: Additive noise, cumulative distribution functions, data processing method, extended Kalman filtering, Extraterrestrial measurements, Filtering, Gaussian distribution, Gaussian noise, Kalman filters, Kolmogorov-Smirnov distance, Least squares approximation, Noise robustness, nonlinear filters, robustness, sequential methods, statistical distributions, telecommunication computing
@inproceedings{Djuric2009a,
title = {Measuring the Robustness of Sequential Methods},
author = {Petar M Djuric and Monica F Bugallo and Pau Closas and Joaquin Miguez},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5413275},
isbn = {978-1-4244-5179-1},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE 13th Digital Signal Processing Workshop and 5th IEEE Signal Processing Education Workshop},
pages = {29--32},
publisher = {IEEE},
address = {Aruba, Dutch Antilles},
abstract = {Whenever we apply methods for processing data, we make a number of model assumptions. In reality, these assumptions are not always correct. Robust methods can withstand model inaccuracies, that is, despite some incorrect assumptions they can still produce good results. We often want to know how robust employed methods are. To that end we need to have a yardstick for measuring robustness. In this paper, we propose an approach for constructing such metrics for sequential methods. These metrics are derived from the Kolmogorov-Smirnov distance between the cumulative distribution functions of the actual observations and the ones based on the assumed model. The use of the proposed metrics is demonstrated with simulation examples.},
keywords = {Additive noise, cumulative distribution functions, data processing method, extended Kalman filtering, Extraterrestrial measurements, Filtering, Gaussian distribution, Gaussian noise, Kalman filters, Kolmogorov-Smirnov distance, Least squares approximation, Noise robustness, nonlinear filters, robustness, sequential methods, statistical distributions, telecommunication computing},
pubstate = {published},
tppubtype = {inproceedings}
}
Martino, Luca; Miguez, Joaquin
New Accept/Reject Methods for Independent Sampling from Posterior Probability Distributions Proceedings Article
En: 17th European Signal Processing Conference (EUSIPCO 2009), Glasgow, 2009.
Resumen | Enlaces | BibTeX | Etiquetas:
@inproceedings{Martino2009a,
title = {New Accept/Reject Methods for Independent Sampling from Posterior Probability Distributions},
author = {Luca Martino and Joaquin Miguez},
url = {http://www.academia.edu/2355641/NEW_ACCEPT_REJECT_METHODS_FOR_INDEPENDENT_SAMPLING_FROM_POSTERIOR_PROBABILITY_DISTRIBUTIONS},
year = {2009},
date = {2009-01-01},
booktitle = {17th European Signal Processing Conference (EUSIPCO 2009)},
address = {Glasgow},
abstract = {Rejection sampling (RS) is a well-known method to generate(pseudo-)random samples from arbitrary probability distributionsthat enjoys important applications, either by itself or as a tool inmore sophisticated Monte Carlo techniques. Unfortunately, the useof RS techniques demands the calculation of tight upper bounds forthe ratio of the target probability density function (pdf) over theproposal density from which candidate samples are drawn. Exceptfor the class of log-concave target pdf’s, for which an efficientalgorithm exists, there are no general methods to analyticallydetermine this bound, which has to be derived from scratch foreach specific case. In this paper, we tackle the general problemof applying RS to draw from an arbitrary posterior pdf using theprior density as a proposal function. This is a scenario that appearsfrequently in Bayesian signal processing methods. We derive ageneral geometric procedure for the calculation of upper boundsthat can be used with a broad class of target pdf’s, includingscenarios with correlated observations, multimodal and/or mixturemeasurement noises. We provide some simple numerical examplesto illustrate the application of the proposed techniques},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Perez-Cruz, Fernando; Kulkarni, S R
Distributed Least Square for Consensus Building in Sensor Networks Proceedings Article
En: 2009 IEEE International Symposium on Information Theory, pp. 2877–2881, IEEE, Seoul, 2009, ISBN: 978-1-4244-4312-3.
Resumen | Enlaces | BibTeX | Etiquetas: Change detection algorithms, Channel Coding, Distributed computing, distributed least square method, graphical models, Inference algorithms, Kernel, Least squares methods, nonparametric statistics, Parametric statistics, robustness, sensor-network learning, statistical analysis, Telecommunication network reliability, Wireless sensor network, Wireless Sensor Networks
@inproceedings{Perez-Cruz2009,
title = {Distributed Least Square for Consensus Building in Sensor Networks},
author = {Fernando Perez-Cruz and S R Kulkarni},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5205336},
isbn = {978-1-4244-4312-3},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE International Symposium on Information Theory},
pages = {2877--2881},
publisher = {IEEE},
address = {Seoul},
abstract = {We present a novel mechanism for consensus building in sensor networks. The proposed algorithm has three main properties that make it suitable for general sensor-network learning. First, the proposed algorithm is based on robust nonparametric statistics and thereby needs little prior knowledge about the network and the function that needs to be estimated. Second, the algorithm uses only local information about the network and it communicates only with nearby sensors. Third, the algorithm is completely asynchronous and robust. It does not need to coordinate the sensors to estimate the underlying function and it is not affected if other sensors in the network stop working. Therefore, the proposed algorithm is an ideal candidate for sensor networks deployed in remote and inaccessible areas, which might need to change their objective once they have been set up.},
keywords = {Change detection algorithms, Channel Coding, Distributed computing, distributed least square method, graphical models, Inference algorithms, Kernel, Least squares methods, nonparametric statistics, Parametric statistics, robustness, sensor-network learning, statistical analysis, Telecommunication network reliability, Wireless sensor network, Wireless Sensor Networks},
pubstate = {published},
tppubtype = {inproceedings}
}
Fresia, Maria; Perez-Cruz, Fernando; Poor, Vincent H
Optimized Concatenated LDPC Codes for Joint Source-Channel Coding Proceedings Article
En: 2009 IEEE International Symposium on Information Theory, pp. 2131–2135, IEEE, Seoul, 2009, ISBN: 978-1-4244-4312-3.
Resumen | Enlaces | BibTeX | Etiquetas: approximation theory, asymptotic behavior analysis, Channel Coding, combined source-channel coding, Concatenated codes, Decoding, Entropy, EXIT chart, extrinsic information transfer, H infinity control, Information analysis, joint belief propagation decoder, joint source-channel coding, low-density-parity-check code, optimized concatenated independent LDPC codes, parity check codes, Redundancy, source coding, transmitter, Transmitters
@inproceedings{Fresia2009,
title = {Optimized Concatenated LDPC Codes for Joint Source-Channel Coding},
author = {Maria Fresia and Fernando Perez-Cruz and Vincent H Poor},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5205766},
isbn = {978-1-4244-4312-3},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE International Symposium on Information Theory},
pages = {2131--2135},
publisher = {IEEE},
address = {Seoul},
abstract = {In this paper a scheme for joint source-channel coding based on low-density-parity-check (LDPC) codes is investigated. Two concatenated independent LDPC codes are used in the transmitter: one for source coding and the other for channel coding, with a joint belief propagation decoder. The asymptotic behavior is analyzed using EXtrinsic Information Transfer (EXIT) charts and this approximation is corroborated with illustrative experiments. The optimization of the degree distributions for our sparse code to maximize the information transmission rate is also considered.},
keywords = {approximation theory, asymptotic behavior analysis, Channel Coding, combined source-channel coding, Concatenated codes, Decoding, Entropy, EXIT chart, extrinsic information transfer, H infinity control, Information analysis, joint belief propagation decoder, joint source-channel coding, low-density-parity-check code, optimized concatenated independent LDPC codes, parity check codes, Redundancy, source coding, transmitter, Transmitters},
pubstate = {published},
tppubtype = {inproceedings}
}
Martino, Luca; Miguez, Joaquin
An Adaptive Accept/Reject Sampling Algorithm for Posterior Probability Distributions Proceedings Article
En: 2009 IEEE/SP 15th Workshop on Statistical Signal Processing, pp. 45–48, IEEE, Cardiff, 2009, ISBN: 978-1-4244-2709-3.
Resumen | Enlaces | BibTeX | Etiquetas: adaptive accept/reject sampling, Adaptive rejection sampling, arbitrary target probability distributions, Computer Simulation, Filtering, Monte Carlo integration, Monte Carlo methods, posterior probability distributions, Probability, Probability density function, Probability distribution, Proposals, Rejection sampling, Sampling methods, sensor networks, Signal processing algorithms, signal sampling, Testing
@inproceedings{Martino2009b,
title = {An Adaptive Accept/Reject Sampling Algorithm for Posterior Probability Distributions},
author = {Luca Martino and Joaquin Miguez},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5278644},
isbn = {978-1-4244-2709-3},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE/SP 15th Workshop on Statistical Signal Processing},
pages = {45--48},
publisher = {IEEE},
address = {Cardiff},
abstract = {Accept/reject sampling is a well-known method to generate random samples from arbitrary target probability distributions. It demands the design of a suitable proposal probability density function (pdf) from which candidate samples can be drawn. These samples are either accepted or rejected depending on a test involving the ratio of the target and proposal densities. In this paper we introduce an adaptive method to build a sequence of proposal pdf's that approximate the target density and hence can ensure a high acceptance rate. In order to illustrate the application of the method we design an accept/reject particle filter and then assess its performance and sampling efficiency numerically, by means of computer simulations.},
keywords = {adaptive accept/reject sampling, Adaptive rejection sampling, arbitrary target probability distributions, Computer Simulation, Filtering, Monte Carlo integration, Monte Carlo methods, posterior probability distributions, Probability, Probability density function, Probability distribution, Proposals, Rejection sampling, Sampling methods, sensor networks, Signal processing algorithms, signal sampling, Testing},
pubstate = {published},
tppubtype = {inproceedings}
}
Vinuelas-Peris, Pablo; Artés-Rodríguez, Antonio
Sensing Matrix Optimization in Distributed Compressed Sensing Proceedings Article
En: 2009 IEEE/SP 15th Workshop on Statistical Signal Processing, pp. 638–641, IEEE, Cardiff, 2009, ISBN: 978-1-4244-2709-3.
Resumen | Enlaces | BibTeX | Etiquetas: Compressed sensing, Computer Simulation, computer simulations, correlated signal, Correlated signals, correlation theory, Dictionaries, distributed coding strategy, distributed compressed sensing, Distributed control, efficient projection method, Encoding, joint recovery method, Matching pursuit algorithms, Optimization methods, orthogonal matching pursuit, Projection Matrix Optimization, sensing matrix optimization, Sensor Network, Sensor phenomena and characterization, Sensor systems, Signal processing, Sparse matrices, Technological innovation
@inproceedings{Vinuelas-Peris2009,
title = {Sensing Matrix Optimization in Distributed Compressed Sensing},
author = {Pablo Vinuelas-Peris and Antonio Art\'{e}s-Rodr\'{i}guez},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5278496},
isbn = {978-1-4244-2709-3},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE/SP 15th Workshop on Statistical Signal Processing},
pages = {638--641},
publisher = {IEEE},
address = {Cardiff},
abstract = {Distributed compressed sensing (DCS) seeks to simultaneously measure signals that are each individually sparse in some domain(s) and also mutually correlated. In this paper we consider the scenario in which the (overcomplete) bases for common component and innovations are different. We propose and analyze a distributed coding strategy for the common component, and also the use of efficient projection (EP) method for optimizing the sensing matrices in this setting. We show the effectiveness of our approach by computer simulations using the orthogonal matching pursuit (OMP) as joint recovery method, and we discuss the configuration of the distribution strategy.},
keywords = {Compressed sensing, Computer Simulation, computer simulations, correlated signal, Correlated signals, correlation theory, Dictionaries, distributed coding strategy, distributed compressed sensing, Distributed control, efficient projection method, Encoding, joint recovery method, Matching pursuit algorithms, Optimization methods, orthogonal matching pursuit, Projection Matrix Optimization, sensing matrix optimization, Sensor Network, Sensor phenomena and characterization, Sensor systems, Signal processing, Sparse matrices, Technological innovation},
pubstate = {published},
tppubtype = {inproceedings}
}
Perez-Cruz, Fernando; Rodrigues, Miguel R D; Verdu, Sergio
Optimal Precoding for Multiple-Input Multiple-Output Gaussian Channels Proceedings Article
En: Seminar PIIRS, Princeton, 2009.
Resumen | Enlaces | BibTeX | Etiquetas: Theory & Algorithms
@inproceedings{Perez-Cruz2009a,
title = {Optimal Precoding for Multiple-Input Multiple-Output Gaussian Channels},
author = {Fernando Perez-Cruz and Miguel R D Rodrigues and Sergio Verdu},
url = {http://eprints.pascal-network.org/archive/00006754/},
year = {2009},
date = {2009-01-01},
booktitle = {Seminar PIIRS},
address = {Princeton},
abstract = {We investigate the linear precoding and power allocation policies that maximize the mutual information for general multiple-input multiple-output (MIMO) Gaussian channels with arbitrary input distributions, by capitalizing on the relationship between mutual information and minimum mean-square error. The optimal linear precoder satisfies a fixed-point equation as a function of the channel and the input constellation. For nonGaussian inputs, a nondiagonal precoding matrix in general increases the information transmission rate, even for parallel noninteracting channels. Whenever precoding is precluded, the optimal power allocation policy also satisfies a fixed-point equation; we put forth a generalization of the mercury/waterfilling algorithm, previously proposed for parallel noninterfering channels, in which the mercury level accounts not only for the nonGaussian input distributions, but also for the interference among inputs.},
keywords = {Theory \& Algorithms},
pubstate = {published},
tppubtype = {inproceedings}
}
Miguez, Joaquin; Maiz, Cristina S; Djuric, Petar M; Crisan, Dan
Sequential Monte Carlo Optimization Using Artificial State-Space Models Proceedings Article
En: 2009 IEEE 13th Digital Signal Processing Workshop and 5th IEEE Signal Processing Education Workshop, pp. 268–273, IEEE, Marco Island, FL, 2009.
Resumen | Enlaces | BibTeX | Etiquetas: Acceleration, Cost function, Design optimization, discrete-time dynamical system, Educational institutions, Mathematics, maximum a posteriori estimate, maximum likelihood estimation, minimisation, Monte Carlo methods, Optimization methods, Probability distribution, sequential Monte Carlo optimization, Sequential optimization, Signal design, State-space methods, state-space model, Stochastic optimization
@inproceedings{Miguez2009,
title = {Sequential Monte Carlo Optimization Using Artificial State-Space Models},
author = {Joaquin Miguez and Cristina S Maiz and Petar M Djuric and Dan Crisan},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4785933},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE 13th Digital Signal Processing Workshop and 5th IEEE Signal Processing Education Workshop},
pages = {268--273},
publisher = {IEEE},
address = {Marco Island, FL},
abstract = {We introduce a method for sequential minimization of a certain class of (possibly non-convex) cost functions with respect to a high dimensional signal of interest. The proposed approach involves the transformation of the optimization problem into one of estimation in a discrete-time dynamical system. In particular, we describe a methodology for constructing an artificial state-space model which has the signal of interest as its unobserved dynamic state. The model is \"{a}dapted" to the cost function in the sense that the maximum a posteriori (MAP) estimate of the system state is also a global minimizer of the cost function. The advantage of the estimation framework is that we can draw from a pool of sequential Monte Carlo methods, for particle approximation of probability measures in dynamic systems, that enable the numerical computation of MAP estimates. We provide examples of how to apply the proposed methodology, including some illustrative simulation results.},
keywords = {Acceleration, Cost function, Design optimization, discrete-time dynamical system, Educational institutions, Mathematics, maximum a posteriori estimate, maximum likelihood estimation, minimisation, Monte Carlo methods, Optimization methods, Probability distribution, sequential Monte Carlo optimization, Sequential optimization, Signal design, State-space methods, state-space model, Stochastic optimization},
pubstate = {published},
tppubtype = {inproceedings}
}
Fresia, Maria; Perez-Cruz, Fernando; Poor, Vincent H; Verdu, Sergio
Joint Source-Channel Coding with Concatenated LDPC Codes Proceedings Article
En: Information Theory and Applications (ITA), San Diego, 2009.
Resumen | Enlaces | BibTeX | Etiquetas: Learning/Statistics & Optimisation
@inproceedings{Fresia2009a,
title = {Joint Source-Channel Coding with Concatenated LDPC Codes},
author = {Maria Fresia and Fernando Perez-Cruz and Vincent H Poor and Sergio Verdu},
url = {http://eprints.pascal-network.org/archive/00004905/},
year = {2009},
date = {2009-01-01},
booktitle = {Information Theory and Applications (ITA)},
address = {San Diego},
abstract = {The separation principle, a milestone in information theory, establishes that for stationary sources and channels there is no loss of optimality when a channel-independent source encoder followed by a source-independent channel encoder are used to transmit the data, as the code length tends to infinity. Thereby, the source and channel encoding have been typically treated as independent problems. For finite-length codes, the separation principle does not hold and a joint encoder and decoder can potentially increase the achieved information transmission rate. In this paper, a scheme for joint source-channel coding based on low-density parity-check (LDPC) codes is presented. The source is compressed and protected with two concatenated LDPC codes and a joint belief propagation decoder is implemented. EXIT chart performance of the proposed schemes is studied. The results are verified with some illustrative experiments.},
keywords = {Learning/Statistics \& Optimisation},
pubstate = {published},
tppubtype = {inproceedings}
}
Goez, Roger; Lazaro, Marcelino
Training of Neural Classifiers by Separating Distributions at the Hidden Layer Proceedings Article
En: 2009 IEEE International Workshop on Machine Learning for Signal Processing, pp. 1–6, IEEE, Grenoble, 2009, ISBN: 978-1-4244-4947-7.
Resumen | Enlaces | BibTeX | Etiquetas: Artificial neural networks, Bayesian methods, Cost function, Curve fitting, Databases, Function approximation, Neural networks, Speech recognition, Support vector machine classification, Support vector machines
@inproceedings{Goez2009,
title = {Training of Neural Classifiers by Separating Distributions at the Hidden Layer},
author = {Roger Goez and Marcelino Lazaro},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5306240},
isbn = {978-1-4244-4947-7},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE International Workshop on Machine Learning for Signal Processing},
pages = {1--6},
publisher = {IEEE},
address = {Grenoble},
abstract = {A new cost function for training of binary classifiers based on neural networks is proposed. This cost function aims at separating the distributions for patterns of each class at the output of the hidden layer of the network. It has been implemented in a Generalized Radial Basis Function (GRBF) network and its performance has been evaluated under three different databases, showing advantages with respect to the conventional Mean Squared Error (MSE) cost function. With respect to the Support Vector Machine (SVM) classifier, the proposed method has also advantages both in terms of performance and complexity.},
keywords = {Artificial neural networks, Bayesian methods, Cost function, Curve fitting, Databases, Function approximation, Neural networks, Speech recognition, Support vector machine classification, Support vector machines},
pubstate = {published},
tppubtype = {inproceedings}
}
Plata-Chaves, Jorge; Lazaro, Marcelino
Closed-Form Error Exponent for the Neyman-Pearson Fusion of Markov Local Decisions Proceedings Article
En: 2009 IEEE/SP 15th Workshop on Statistical Signal Processing, pp. 533–536, IEEE, Cardiff, 2009, ISBN: 978-1-4244-2709-3.
Resumen | Enlaces | BibTeX | Etiquetas:
@inproceedings{Plata-Chaves2009,
title = {Closed-Form Error Exponent for the Neyman-Pearson Fusion of Markov Local Decisions},
author = {Jorge Plata-Chaves and Marcelino Lazaro},
url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=5278522},
isbn = {978-1-4244-2709-3},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE/SP 15th Workshop on Statistical Signal Processing},
pages = {533--536},
publisher = {IEEE},
address = {Cardiff},
abstract = {In this correspondence, we derive a closed-form expression of the error exponent associated with the binary Neyman-Pearson test performed at the fusion center of a distributed detection system where a large number of local detectors take dependent binary decisions regarding a specific phenomenon. We assume that the sensors are equally spaced along a straight line, that their local decisions are taken with no kind of cooperation, and that they are transmitted to the fusion center over an error free parallel access channel. Under each one of the two possible hypothesis, H0 and H1 the correlation structure of the local binary decisions is modelled with a first-order binary Markov chain whose transition probabilities are linked with different physical parameters of the network. Through different simulations based on the error exponent and a deterministic physical model of the aforementioned transition probabilities we study the effect of network density on the overall detection performance.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Alvarez, Mauricio; Luengo, David; Lawrence, Neil D
Latent Force Models Proceedings Article
En: Conf. on Artificial Intelligence and Statistics, Clearwater Beach, 2009.
BibTeX | Etiquetas:
@inproceedings{Alvarez2009,
title = {Latent Force Models},
author = {Mauricio Alvarez and David Luengo and Neil D Lawrence},
year = {2009},
date = {2009-01-01},
booktitle = {Conf. on Artificial Intelligence and Statistics},
address = {Clearwater Beach},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Davidson, Pavel; Vázquez, Manuel A; Piche, Robert
Uninterrupted portable car navigation system using GPS, map and inertial sensors data Proceedings Article
En: 2009 IEEE 13th International Symposium on Consumer Electronics, pp. 836-840, 2009.
@inproceedings{5156849,
title = {Uninterrupted portable car navigation system using GPS, map and inertial sensors data},
author = {Pavel Davidson and Manuel A V\'{a}zquez and Robert Piche},
doi = {10.1109/ISCE.2009.5156849},
year = {2009},
date = {2009-01-01},
urldate = {2009-01-01},
booktitle = {2009 IEEE 13th International Symposium on Consumer Electronics},
pages = {836-840},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Parviainen, Jussi; Vázquez, Manuel A; Pekkalin, Olli; Hautamaki, Jani; Collin, Jussi; Davidson, Pavel
Using Doppler radar and MEMS gyro to augment DGPS for land vehicle navigation Proceedings Article
En: 2009 IEEE Control Applications, (CCA) & Intelligent Control, (ISIC), pp. 1690-1695, 2009.
@inproceedings{5281057,
title = {Using Doppler radar and MEMS gyro to augment DGPS for land vehicle navigation},
author = {Jussi Parviainen and Manuel A V\'{a}zquez and Olli Pekkalin and Jani Hautamaki and Jussi Collin and Pavel Davidson},
doi = {10.1109/CCA.2009.5281057},
year = {2009},
date = {2009-01-01},
urldate = {2009-01-01},
booktitle = {2009 IEEE Control Applications, (CCA) \& Intelligent Control, (ISIC)},
pages = {1690-1695},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2008
Vazquez-Vilar, Gonzalo; Majjigi, Vinay; Sezgin, Aydin; Paulraj, Arogyaswami
Mobility Dependent Feedback Scheme for point-to-point MIMO Systems Proceedings Article
En: Asilomar Conference on Signals, Systems, and Computers (Asilomar SSC 2008), Pacific Grove, CA, U.S.A., 2008.
BibTeX | Etiquetas:
@inproceedings{asilomar2008,
title = {Mobility Dependent Feedback Scheme for point-to-point MIMO Systems},
author = {Gonzalo Vazquez-Vilar and Vinay Majjigi and Aydin Sezgin and Arogyaswami Paulraj},
year = {2008},
date = {2008-10-01},
booktitle = {Asilomar Conference on Signals, Systems, and Computers (Asilomar SSC 2008)},
address = {Pacific Grove, CA, U.S.A.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Koch, Tobias; Lapidoth, Amos
On Multipath Fading Channels at High SNR Proceedings Article
En: 2008 IEEE International Symposium on Information Theory, pp. 1572–1576, IEEE, Toronto, 2008, ISBN: 978-1-4244-2256-2.
Resumen | Enlaces | BibTeX | Etiquetas: channel capacity, Delay, discrete time systems, discrete-time channels, Entropy, Fading, fading channels, Frequency, Mathematical model, multipath channels, multipath fading channels, noncoherent channel model, Random variables, Signal to noise ratio, signal-to-noise ratios, SNR, statistics, Transmitters
@inproceedings{Koch2008,
title = {On Multipath Fading Channels at High SNR},
author = {Tobias Koch and Amos Lapidoth},
url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=4595252},
isbn = {978-1-4244-2256-2},
year = {2008},
date = {2008-01-01},
booktitle = {2008 IEEE International Symposium on Information Theory},
pages = {1572--1576},
publisher = {IEEE},
address = {Toronto},
abstract = {This paper studies the capacity of discrete-time multipath fading channels. It is assumed that the number of paths is finite, i.e., that the channel output is influenced by the present and by the L previous channel inputs. A noncoherent channel model is considered where neither transmitter nor receiver are cognizant of the fading's realization, but both are aware of its statistic. The focus is on capacity at high signal-to-noise ratios (SNR). In particular, the capacity pre-loglog-defined as the limiting ratio of the capacity to loglog(SNR) as SNR tends to infinity-is studied. It is shown that, irrespective of the number of paths L, the capacity pre-loglog is 1.},
keywords = {channel capacity, Delay, discrete time systems, discrete-time channels, Entropy, Fading, fading channels, Frequency, Mathematical model, multipath channels, multipath fading channels, noncoherent channel model, Random variables, Signal to noise ratio, signal-to-noise ratios, SNR, statistics, Transmitters},
pubstate = {published},
tppubtype = {inproceedings}
}
Vazquez, Manuel A; Miguez, Joaquin
A Per-Survivor Processing Algorithm for Maximum Likelihood Equalization of MIMO Channels with Unknown Order Proceedings Article
En: 2008 International ITG Workshop on Smart Antennas, pp. 387–391, IEEE, Vienna, 2008, ISBN: 978-1-4244-1756-8.
Resumen | Enlaces | BibTeX | Etiquetas: Channel estimation, channel impulse response, computational complexity, Computer science education, Computer Simulation, Degradation, Frequency, frequency-selective multiple-input multiple-output, maximum likelihood detection, maximum likelihood equalization, maximum likelihood estimation, maximum likelihood sequence detection, maximum likelihood sequence estimation, MIMO, MIMO channels, MIMO communication, per-survivor processing algorithm, time-selective channels, Transmitting antennas
@inproceedings{Vazquez2008,
title = {A Per-Survivor Processing Algorithm for Maximum Likelihood Equalization of MIMO Channels with Unknown Order},
author = {Manuel A Vazquez and Joaquin Miguez},
url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=4475587},
isbn = {978-1-4244-1756-8},
year = {2008},
date = {2008-01-01},
booktitle = {2008 International ITG Workshop on Smart Antennas},
pages = {387--391},
publisher = {IEEE},
address = {Vienna},
abstract = {In the equalization of frequency-selective multiple-input multiple-output (MIMO) channels it is usually assumed that the length of the channel impulse response (CIR), also referred to as the channel order, is known. However, this is not true in most practical situations and, in order to avoid the serious performance degradation that occurs when the CIR length is underestimated, a channel with "more than enough" taps is usually considered. This possibly means overestimating the channel order, and is not desirable since the computational complexity of maximum likelihood sequence detection (MLSD) in frequency-selective channels grows exponentially with the channel order. In addition to that, the higher the channel order considered, the more the number of channel coefficients that need to be estimated from the same set of observations. In this paper, we introduce an algorithm for MLSD that incorporates the full estimation of the MIMO CIR parameters, including its order. The proposed technique is based on the per survivor processing (PSP) methodology, it admits both blind and semiblind implementations, depending on the availability of pilot data, and is designed to work with time-selective channels. Besides the analytical derivation of the algorithm, we provide computer simulation results that illustrate the effectiveness of the resulting receiver},
keywords = {Channel estimation, channel impulse response, computational complexity, Computer science education, Computer Simulation, Degradation, Frequency, frequency-selective multiple-input multiple-output, maximum likelihood detection, maximum likelihood equalization, maximum likelihood estimation, maximum likelihood sequence detection, maximum likelihood sequence estimation, MIMO, MIMO channels, MIMO communication, per-survivor processing algorithm, time-selective channels, Transmitting antennas},
pubstate = {published},
tppubtype = {inproceedings}
}
Miguez, Joaquin
Analysis of a Sequential Monte Carlo Optimization Methodology Proceedings Article
En: 16th European Signal Processing Conference (EUSIPCO 2008, Lausanne, 2008.
Resumen | Enlaces | BibTeX | Etiquetas:
@inproceedings{Miguez2008,
title = {Analysis of a Sequential Monte Carlo Optimization Methodology},
author = {Joaquin Miguez},
url = {http://www.eurasip.org/Proceedings/Eusipco/Eusipco2008/papers/1569105254.pdf},
year = {2008},
date = {2008-01-01},
booktitle = {16th European Signal Processing Conference (EUSIPCO 2008},
address = {Lausanne},
abstract = {We investigate a family of stochastic exploration methods that has been recently proposed to carry out estimation and prediction in discrete-time random dynamical systems. The key of the novel approach is to identify a cost function whose minima provide valid estimates of the system state at successive time instants. This function is recursively optimized using a sequential Monte Carlo minimization (SMCM) procedure which is similar to standard particle filtering algorithms but does not require a explicit probabilistic model to be imposed on the system. In this paper, we analyze the asymptotic convergence of SMCM methods and show that a properly designed algorithm produces a sequence of system-state estimates with individually minimal contributions to the cost function. We apply the SMCM method to a target tracking problem in order to illustrate how convergence is achieved in the way predicted by the theory.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Perez-Cruz, Fernando
Kullback-Leibler Divergence Estimation of Continuous Distributions Proceedings Article
En: 2008 IEEE International Symposium on Information Theory, pp. 1666–1670, IEEE, Toronto, 2008, ISBN: 978-1-4244-2256-2.
Resumen | Enlaces | BibTeX | Etiquetas: Convergence, density estimation, Density measurement, Entropy, Frequency estimation, H infinity control, information theory, k-nearest-neighbour density estimation, Kullback-Leibler divergence estimation, Machine learning, Mutual information, neuroscience, Random variables, statistical distributions, waiting-times distributions
@inproceedings{Perez-Cruz2008,
title = {Kullback-Leibler Divergence Estimation of Continuous Distributions},
author = {Fernando Perez-Cruz},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4595271},
isbn = {978-1-4244-2256-2},
year = {2008},
date = {2008-01-01},
booktitle = {2008 IEEE International Symposium on Information Theory},
pages = {1666--1670},
publisher = {IEEE},
address = {Toronto},
abstract = {We present a method for estimating the KL divergence between continuous densities and we prove it converges almost surely. Divergence estimation is typically solved estimating the densities first. Our main result shows this intermediate step is unnecessary and that the divergence can be either estimated using the empirical cdf or k-nearest-neighbour density estimation, which does not converge to the true measure for finite k. The convergence proof is based on describing the statistics of our estimator using waiting-times distributions, as the exponential or Erlang. We illustrate the proposed estimators and show how they compare to existing methods based on density estimation, and we also outline how our divergence estimators can be used for solving the two-sample problem.},
keywords = {Convergence, density estimation, Density measurement, Entropy, Frequency estimation, H infinity control, information theory, k-nearest-neighbour density estimation, Kullback-Leibler divergence estimation, Machine learning, Mutual information, neuroscience, Random variables, statistical distributions, waiting-times distributions},
pubstate = {published},
tppubtype = {inproceedings}
}