2010
Olmos, Pablo M; Murillo-Fuentes, Juan Jose; Perez-Cruz, Fernando
Tree-Structure Expectation Propagation for Decoding LDPC Codes over Binary Erasure Channels Artículo en actas
En: 2010 IEEE International Symposium on Information Theory, pp. 799–803, IEEE, Austin, TX, 2010, ISBN: 978-1-4244-7892-7.
Resumen | Enlaces | BibTeX | Etiquetas: belief propagation, binary erasure channels, Bipartite graph, BP decoder, Capacity planning, Channel Coding, codeword, computational complexity, Decoding, Finishing, graph theory, H infinity control, LDPC code decoding, LDPC Tanner graph, Maxwell decoder, parity check codes, Performance analysis, tree structure expectation propagation, trees (mathematics), Upper bound
@inproceedings{Olmos2010,
title = {Tree-Structure Expectation Propagation for Decoding LDPC Codes over Binary Erasure Channels},
author = {Pablo M Olmos and Juan Jose Murillo-Fuentes and Fernando Perez-Cruz},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5513636},
isbn = {978-1-4244-7892-7},
year = {2010},
date = {2010-01-01},
booktitle = {2010 IEEE International Symposium on Information Theory},
pages = {799--803},
publisher = {IEEE},
address = {Austin, TX},
abstract = {Expectation Propagation is a generalization to Belief Propagation (BP) in two ways. First, it can be used with any exponential family distribution over the cliques in the graph. Second, it can impose additional constraints on the marginal distributions. We use this second property to impose pair-wise marginal distribution constraints in some check nodes of the LDPC Tanner graph. These additional constraints allow decoding the received codeword when the BP decoder gets stuck. In this paper, we first present the new decoding algorithm, whose complexity is identical to the BP decoder, and we then prove that it is able to decode codewords with a larger fraction of erasures, as the block size tends to infinity. The proposed algorithm can be also understood as a simplification of the Maxwell decoder, but without its computational complexity. We also illustrate that the new algorithm outperforms the BP decoder for finite block-size codes.},
keywords = {belief propagation, binary erasure channels, Bipartite graph, BP decoder, Capacity planning, Channel Coding, codeword, computational complexity, Decoding, Finishing, graph theory, H infinity control, LDPC code decoding, LDPC Tanner graph, Maxwell decoder, parity check codes, Performance analysis, tree structure expectation propagation, trees (mathematics), Upper bound},
pubstate = {published},
tppubtype = {inproceedings}
}
Djuric, Petar M; Closas, Pau; Bugallo, Monica F; Miguez, Joaquin
Evaluation of a Method's Robustness Artículo en actas
En: 2010 IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 3598–3601, IEEE, Dallas, 2010, ISSN: 1520-6149.
Resumen | Enlaces | BibTeX | Etiquetas: Electronic mail, Extraterrestrial measurements, Filtering, Gaussian processes, method's robustness, Random variables, robustness, sequential methods, Signal processing, statistical distributions, Telecommunications, uniform distribution, Wireless communication
@inproceedings{Djuric2010,
title = {Evaluation of a Method's Robustness},
author = {Petar M Djuric and Pau Closas and Monica F Bugallo and Joaquin Miguez},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5495921},
issn = {1520-6149},
year = {2010},
date = {2010-01-01},
booktitle = {2010 IEEE International Conference on Acoustics, Speech and Signal Processing},
pages = {3598--3601},
publisher = {IEEE},
address = {Dallas},
abstract = {In signal processing, it is typical to develop or use a method based on a given model. In practice, however, we almost never know the actual model and we hope that the assumed model is in the neighborhood of the true one. If deviations exist, the method may be more or less sensitive to them. Therefore, it is important to know more about this sensitivity, or in other words, how robust the method is to model deviations. To that end, it is useful to have a metric that can quantify the robustness of the method. In this paper we propose a procedure for developing a variety of metrics for measuring robustness. They are based on a discrete random variable that is generated from observed data and data generated according to past data and the adopted model. This random variable is uniform if the model is correct. When the model deviates from the true one, the distribution of the random variable deviates from the uniform distribution. One can then employ measures for differences between distributions in order to quantify robustness. In this paper we describe the proposed methodology and demonstrate it with simulated data.},
keywords = {Electronic mail, Extraterrestrial measurements, Filtering, Gaussian processes, method's robustness, Random variables, robustness, sequential methods, Signal processing, statistical distributions, Telecommunications, uniform distribution, Wireless communication},
pubstate = {published},
tppubtype = {inproceedings}
}
Salamanca, Luis; Murillo-Fuentes, Juan Jose; Perez-Cruz, Fernando
Bayesian BCJR for Channel Equalization and Decoding Artículo en actas
En: 2010 IEEE International Workshop on Machine Learning for Signal Processing, pp. 53–58, IEEE, Kittila, 2010, ISSN: 1551-2541.
Resumen | Enlaces | BibTeX | Etiquetas: a posteriori probability, Bayes methods, Bayesian BCJR, Bayesian methods, Bit error rate, channel decoding, channel estate information, Channel estimation, Decoding, digital communication, digital communications, equalisers, Equalizers, error statistics, Markov processes, Maximum likelihood decoding, maximum likelihood estimation, multipath channel, probabilistic channel equalization, Probability, single input single output model, SISO model, statistical information, Training
@inproceedings{Salamanca2010,
title = {Bayesian BCJR for Channel Equalization and Decoding},
author = {Luis Salamanca and Juan Jose Murillo-Fuentes and Fernando Perez-Cruz},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5589201},
issn = {1551-2541},
year = {2010},
date = {2010-01-01},
booktitle = {2010 IEEE International Workshop on Machine Learning for Signal Processing},
pages = {53--58},
publisher = {IEEE},
address = {Kittila},
abstract = {In this paper we focus on the probabilistic channel equalization in digital communications. We face the single input single output (SISO) model to show how the statistical information about the multipath channel can be exploited to further improve our estimation of the a posteriori probabilities (APP) during the equalization process. We consider not only the uncertainty due to the noise in the channel, but also in the estimate of the channel estate information (CSI). Thus, we resort to a Bayesian approach for the computation of the APP. This novel algorithm has the same complexity as the BCJR, exhibiting lower bit error rate at the output of the channel decoder than the standard BCJR that considers maximum likelihood (ML) to estimate the CSI.},
keywords = {a posteriori probability, Bayes methods, Bayesian BCJR, Bayesian methods, Bit error rate, channel decoding, channel estate information, Channel estimation, Decoding, digital communication, digital communications, equalisers, Equalizers, error statistics, Markov processes, Maximum likelihood decoding, maximum likelihood estimation, multipath channel, probabilistic channel equalization, Probability, single input single output model, SISO model, statistical information, Training},
pubstate = {published},
tppubtype = {inproceedings}
}
Vinuelas-Peris, Pablo; Artés-Rodríguez, Antonio
Bayesian Joint Recovery of Correlated Signals in Distributed Compressed Sensing Artículo en actas
En: 2010 2nd International Workshop on Cognitive Information Processing, pp. 382–387, IEEE, Elba, 2010, ISBN: 978-1-4244-6459-3.
Resumen | Enlaces | BibTeX | Etiquetas: Bayes methods, Bayesian joint recovery, Bayesian methods, correlated signal, Correlation, correlation methods, Covariance matrix, Dictionaries, distributed compressed sensing, matrix decomposition, Noise measurement, sensors, sparse component correlation coefficient
@inproceedings{Vinuelas-Peris2010,
title = {Bayesian Joint Recovery of Correlated Signals in Distributed Compressed Sensing},
author = {Pablo Vinuelas-Peris and Antonio Art\'{e}s-Rodr\'{i}guez},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5604103},
isbn = {978-1-4244-6459-3},
year = {2010},
date = {2010-01-01},
booktitle = {2010 2nd International Workshop on Cognitive Information Processing},
pages = {382--387},
publisher = {IEEE},
address = {Elba},
abstract = {In this paper we address the problem of Distributed Compressed Sensing (DCS) of correlated signals. We model the correlation using the sparse components correlation coefficient of signals, a general and simple measure. We develop an sparse Bayesian learning method for this setting, that can be applied to both random and optimized projection matrices. As a result, we obtain a reduction of the number of measurements needed for a given recovery error that is dependent on the correlation coefficient, as shown by computer simulations in different scenarios.},
keywords = {Bayes methods, Bayesian joint recovery, Bayesian methods, correlated signal, Correlation, correlation methods, Covariance matrix, Dictionaries, distributed compressed sensing, matrix decomposition, Noise measurement, sensors, sparse component correlation coefficient},
pubstate = {published},
tppubtype = {inproceedings}
}
Achutegui, Katrin; Rodas, Javier; Escudero, Carlos J; Miguez, Joaquin
A Model-Switching Sequential Monte Carlo Algorithm for Indoor Tracking with Experimental RSS Data Artículo en actas
En: 2010 International Conference on Indoor Positioning and Indoor Navigation, pp. 1–8, IEEE, Zurich, 2010, ISBN: 978-1-4244-5862-2.
Resumen | Enlaces | BibTeX | Etiquetas: Approximation methods, Computational modeling, Data models, generalized IMM system, GIMM approach, indoor radio, Indoor tracking, Kalman filters, maneuvering target motion, Mathematical model, model switching sequential Monte Carlo algorithm, Monte Carlo methods, multipath propagation, multiple model interaction, propagation environment, radio receivers, radio tracking, radio transmitters, random processes, Rao-Blackwellized sequential Monte Carlo tracking, received signal strength, RSS data, sensors, state space model, target position dependent data, transmitter-to-receiver distance, wireless technology
@inproceedings{Achutegui2010,
title = {A Model-Switching Sequential Monte Carlo Algorithm for Indoor Tracking with Experimental RSS Data},
author = {Katrin Achutegui and Javier Rodas and Carlos J Escudero and Joaquin Miguez},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5648053},
isbn = {978-1-4244-5862-2},
year = {2010},
date = {2010-01-01},
booktitle = {2010 International Conference on Indoor Positioning and Indoor Navigation},
pages = {1--8},
publisher = {IEEE},
address = {Zurich},
abstract = {In this paper we address the problem of indoor tracking using received signal strength (RSS) as position-dependent data. This type of measurements are very appealing because they can be easily obtained with a variety of (inexpensive) wireless technologies. However, the extraction of accurate location information from RSS in indoor scenarios is not an easy task. Due to the multipath propagation, it is hard to adequately model the correspondence between the received power and the transmitter-to-receiver distance. For that reason, we propose the use of a compound model that combines several sub-models, whose parameters are adjusted to different propagation environments. This methodology, called Interacting Multiple Models (IMM), has been used in the past either for modeling the motion of maneuvering targets or the relationship between the target position and the observations. Here, we extend its application to handle both types of uncertainty simultaneously and we refer to the resulting state-space model as a generalized IMM (GIMM) system. The flexibility of the GIMM approach is attained at the expense of an increase in the number of random processes that must be accurately tracked. To overcome this difficulty, we introduce a Rao-Blackwellized sequential Monte Carlo tracking algorithm that exhibits good performance both with synthetic and experimental data.},
keywords = {Approximation methods, Computational modeling, Data models, generalized IMM system, GIMM approach, indoor radio, Indoor tracking, Kalman filters, maneuvering target motion, Mathematical model, model switching sequential Monte Carlo algorithm, Monte Carlo methods, multipath propagation, multiple model interaction, propagation environment, radio receivers, radio tracking, radio transmitters, random processes, Rao-Blackwellized sequential Monte Carlo tracking, received signal strength, RSS data, sensors, state space model, target position dependent data, transmitter-to-receiver distance, wireless technology},
pubstate = {published},
tppubtype = {inproceedings}
}
Helander, E; Silén, H; Miguez, Joaquin; Gabbouj, M
Maximum a Posteriori Voice Conversion Using Sequential Monte Carlo Methods Artículo en actas
En: Eleventh Annual Conference of the International Speech Communication Association (INTERSPEECH), Makuhari, Chiba, Japan, 2010.
Resumen | Enlaces | BibTeX | Etiquetas:
@inproceedings{Helander2010,
title = {Maximum a Posteriori Voice Conversion Using Sequential Monte Carlo Methods},
author = {E Helander and H Sil\'{e}n and Joaquin Miguez and M Gabbouj},
url = {http://www.isca-speech.org/archive/interspeech_2010/i10_1716.html},
year = {2010},
date = {2010-01-01},
booktitle = {Eleventh Annual Conference of the International Speech Communication Association (INTERSPEECH)},
address = {Makuhari, Chiba, Japan},
abstract = {Many voice conversion algorithms are based on frame-wise mapping from source features into target features. This ignores the inherent temporal continuity that is present in speech and can degrade the subjective quality. In this paper, we propose to optimize the speech feature sequence after a frame-based conversion algorithm has been applied. In particular, we select the sequence of speech features through the minimization of a cost function that involves both the conversion error and the smoothness of the sequence. The estimation problem is solved using sequential Monte Carlo methods. Both subjective and objective results show the effectiveness of the method.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Salamanca, Luis; Murillo-Fuentes, Juan Jose; Perez-Cruz, Fernando
Channel Decoding with a Bayesian Equalizer Artículo en actas
En: 2010 IEEE International Symposium on Information Theory, pp. 1998–2002, IEEE, Austin, TX, 2010, ISBN: 978-1-4244-7892-7.
Resumen | Enlaces | BibTeX | Etiquetas: a posteriori probability, Bayesian equalizer, Bayesian methods, BER, Bit error rate, Channel Coding, channel decoding, channel estate information, Communication channels, Decoding, equalisers, Equalizers, error statistics, low-density parity-check decoders, LPDC decoders, Maximum likelihood decoding, maximum likelihood detection, maximum likelihood estimation, Noise reduction, parity check codes, Probability, Uncertainty
@inproceedings{Salamanca2010a,
title = {Channel Decoding with a Bayesian Equalizer},
author = {Luis Salamanca and Juan Jose Murillo-Fuentes and Fernando Perez-Cruz},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5513348},
isbn = {978-1-4244-7892-7},
year = {2010},
date = {2010-01-01},
booktitle = {2010 IEEE International Symposium on Information Theory},
pages = {1998--2002},
publisher = {IEEE},
address = {Austin, TX},
abstract = {Low-density parity-check (LPDC) decoders assume the channel estate information (CSI) is known and they have the true a posteriori probability (APP) for each transmitted bit. But in most cases of interest, the CSI needs to be estimated with the help of a short training sequence and the LDPC decoder has to decode the received word using faulty APP estimates. In this paper, we study the uncertainty in the CSI estimate and how it affects the bit error rate (BER) output by the LDPC decoder. To improve these APP estimates, we propose a Bayesian equalizer that takes into consideration not only the uncertainty due to the noise in the channel, but also the uncertainty in the CSI estimate, reducing the BER after the LDPC decoder.},
keywords = {a posteriori probability, Bayesian equalizer, Bayesian methods, BER, Bit error rate, Channel Coding, channel decoding, channel estate information, Communication channels, Decoding, equalisers, Equalizers, error statistics, low-density parity-check decoders, LPDC decoders, Maximum likelihood decoding, maximum likelihood detection, maximum likelihood estimation, Noise reduction, parity check codes, Probability, Uncertainty},
pubstate = {published},
tppubtype = {inproceedings}
}
Alvarez, Mauricio; Luengo, David; Titsias, Michalis; Lawrence, Neil D
Efficient Multioutput Gaussian Processes Through Variational Inducing Kernels Artículo en actas
En: AISTATS 2010, Sardinia, 2010.
Resumen | Enlaces | BibTeX | Etiquetas:
@inproceedings{Alvarez2010,
title = {Efficient Multioutput Gaussian Processes Through Variational Inducing Kernels},
author = {Mauricio Alvarez and David Luengo and Michalis Titsias and Neil D Lawrence},
url = {http://eprints.pascal-network.org/archive/00006397/},
year = {2010},
date = {2010-01-01},
booktitle = {AISTATS 2010},
address = {Sardinia},
abstract = {Interest in multioutput kernel methods is increasing, whether under the guise of multitask learning, multisensor networks or structured output data. From the Gaussian process perspective a multioutput Mercer kernel is a covariance function over correlated output functions. One way of constructing such kernels is based on convolution processes (CP). A key problem for this approach is efficient inference. Alvarez and Lawrence recently presented a sparse approximation for CPs that enabled efficient inference. In this paper, we extend this work in two directions: we introduce the concept of variational inducing functions to handle potential non-smooth functions involved in the kernel CP construction and we consider an alternative approach to approximate inference based on variational methods, extending the work by Titsias (2009) to the multiple output case. We demonstrate our approaches on prediction of school marks, compiler performance and financial time series.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Plata-Chaves, Jorge; Lazaro, Marcelino
Closed-Form Error Exponent for the Neyman-Pearson Fusion of Two-Dimensional Markov Local Decisions Artículo en actas
En: European Signal Processing Conference (EUSIPCO 2010), Aalborg, 2010.
Resumen | Enlaces | BibTeX | Etiquetas:
@inproceedings{Plata-Chaves2010,
title = {Closed-Form Error Exponent for the Neyman-Pearson Fusion of Two-Dimensional Markov Local Decisions},
author = {Jorge Plata-Chaves and Marcelino Lazaro},
url = {http://www.eurasip.org/Proceedings/Eusipco/Eusipco2010/Contents/papers/1569292447.pdf},
year = {2010},
date = {2010-01-01},
booktitle = {European Signal Processing Conference (EUSIPCO 2010)},
address = {Aalborg},
abstract = {We consider a distributed detection system formed by a large num- ber of local detectors and a fusion center that performs a Neyman- Pearson fusion of the binary quantizations of the sensor observa- tions. The aforementioned local decisions are taken with no kind of cooperation and transmitted to the fusion center over error free parallel access channels. Furthermore, the devices are located on a rectangular lattice so that sensors belonging to a specific row or column are equally spaced. For each hypothesis H 0 and H 1 , the correlation structure of the local decisions is modelled with a two- dimensional causal field where the rows and columns are outcomes of the same first-order binary Markov chain. Under this scenario, we derive a closed-form error exponent for the Neyman-Pearson fusion of the local decisions. Afterwards, using the derived error exponent we study the effect of different design parameters of the network on its overall detection performance},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2009
Czink, Nicolai; Bandemer, Bernd; Vazquez-Vilar, Gonzalo; Jalloul, Louay; Oestges, Claude; Paulraj, Arogyaswami
Spatial Separation of Multi-User MIMO Channels Artículo en actas
En: 20th Personal, Indoor and Mobile Radio Communications Symposium 2009 (PIMRC 09), Tokyo, Japan, 2009.
BibTeX | Etiquetas:
@inproceedings{nczink2009,
title = {Spatial Separation of Multi-User MIMO Channels},
author = {Nicolai Czink and Bernd Bandemer and Gonzalo Vazquez-Vilar and Louay Jalloul and Claude Oestges and Arogyaswami Paulraj},
year = {2009},
date = {2009-09-01},
booktitle = {20th Personal, Indoor and Mobile Radio Communications Symposium 2009 (PIMRC 09)},
address = {Tokyo, Japan},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bandemer, Bernd; Vazquez-Vilar, Gonzalo; Gamal, Abbas El
On the Sum Capacity of A Class of Cyclically Symmetric Deterministic Interference Channels Artículo en actas
En: 2009 IEEE International Symposium on Information Theory (ISIT 2009), Coex, Seoul, Korea, 2009.
BibTeX | Etiquetas:
@inproceedings{bbandemer2009,
title = {On the Sum Capacity of A Class of Cyclically Symmetric Deterministic Interference Channels},
author = {Bernd Bandemer and Gonzalo Vazquez-Vilar and Abbas El Gamal},
year = {2009},
date = {2009-06-01},
booktitle = {2009 IEEE International Symposium on Information Theory (ISIT 2009)},
address = {Coex, Seoul, Korea},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
López-Valcarce, Roberto; Vazquez-Vilar, Gonzalo; Álvarez-Díaz, Marcos
Multiantenna detection of multicarrier primary signals exploiting spectral a priori information Artículo en actas
En: 4th International Conference on Cognitive Radio Oriented Wireless Networks and Communications (Crowncom 2009), Hannover, Germany, 2009.
BibTeX | Etiquetas:
@inproceedings{crowncom2009,
title = {Multiantenna detection of multicarrier primary signals exploiting spectral a priori information},
author = {Roberto L\'{o}pez-Valcarce and Gonzalo Vazquez-Vilar and Marcos \'{A}lvarez-D\'{i}az},
year = {2009},
date = {2009-06-01},
booktitle = {4th International Conference on Cognitive Radio Oriented Wireless Networks and Communications (Crowncom 2009)},
address = {Hannover, Germany},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
López-Valcarce, Roberto; Vazquez-Vilar, Gonzalo
Wideband Spectrum Sensing in Cognitive Radio: Joint Estimation of Noise Variance and Multiple Signal Levels Artículo en actas
En: 2009 IEEE International Workshop on Signal Processing Advances for Wireless Communications (Spawc 2009), Perugia, Italy, 2009.
BibTeX | Etiquetas:
@inproceedings{spawc2009,
title = {Wideband Spectrum Sensing in Cognitive Radio: Joint Estimation of Noise Variance and Multiple Signal Levels},
author = {Roberto L\'{o}pez-Valcarce and Gonzalo Vazquez-Vilar},
year = {2009},
date = {2009-06-01},
booktitle = {2009 IEEE International Workshop on Signal Processing Advances for Wireless Communications (Spawc 2009)},
address = {Perugia, Italy},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Olmos, Pablo M; Murillo-Fuentes, Juan Jose; Perez-Cruz, Fernando
Soft LDPC Decoding in Nonlinear Channels with Gaussian Processes for Classification Artículo en actas
En: European Signal Processing Conference (EUSIPCO), Glasgow, 2009.
Resumen | Enlaces | BibTeX | Etiquetas:
@inproceedings{Olmos2009,
title = {Soft LDPC Decoding in Nonlinear Channels with Gaussian Processes for Classification},
author = {Pablo M Olmos and Juan Jose Murillo-Fuentes and Fernando Perez-Cruz},
url = {http://www.eurasip.org/Proceedings/Eusipco/Eusipco2009/contents/papers/1569186781.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {European Signal Processing Conference (EUSIPCO)},
address = {Glasgow},
abstract = {In this paper, we propose a new approach for nonlinear equalization based on Gaussian processes for classification (GPC).We also measure the performance of the equalizer after a low-density parity-check channel decoder has detected the received sequence. Typically, most channel equalizers concentrate on reducing the bit error rate, instead of providing accurate posterior probability estimates. GPC is a Bayesian nonlinear classification tool that provides accurate posterior probability estimates with short training sequences. We show that the accuracy of these estimates is essential for optimal performance of the channel decoder and that the error rate outputted by the equalizer might be irrelevant to understand the performance of the overall communication receiver. We compare the proposed equalizers with state-ofthe- art solutions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bravo-Santos, Ángel M; Djuric, Petar M
Cooperative Relay Communications in Mesh Networks Artículo en actas
En: 2009 IEEE 10th Workshop on Signal Processing Advances in Wireless Communications, pp. 499–503, IEEE, Perugia, 2009, ISBN: 978-1-4244-3695-8.
Resumen | Enlaces | BibTeX | Etiquetas: binary transmission, bit error probability, Bit error rate, cooperative relay communications, decode-and-forward relays, Detectors, error statistics, Maximum likelihood decoding, maximum likelihood detection, Mesh networks, mesh wireless networks, multi-hop networks, Network topology, optimal node decision rules, Peer to peer computing, radio networks, Relays, spread spectrum communication, telecommunication network topology, Wireless Sensor Networks
@inproceedings{Bravo-Santos2009,
title = {Cooperative Relay Communications in Mesh Networks},
author = {\'{A}ngel M Bravo-Santos and Petar M Djuric},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5161835},
isbn = {978-1-4244-3695-8},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE 10th Workshop on Signal Processing Advances in Wireless Communications},
pages = {499--503},
publisher = {IEEE},
address = {Perugia},
abstract = {In previous literature on cooperative relay communications, the emphasis has been on the study of multi-hop networks. In this paper we address mesh wireless networks that use decode-and-forward relays for which we derive the optimal node decision rules in case of binary transmission. We also obtain the expression for the overall bit error probability. We compare the mesh networks with multi-hop networks and show the improvement in performance that can be achieved with them when both networks have the same number of nodes and equal number of hops.},
keywords = {binary transmission, bit error probability, Bit error rate, cooperative relay communications, decode-and-forward relays, Detectors, error statistics, Maximum likelihood decoding, maximum likelihood detection, Mesh networks, mesh wireless networks, multi-hop networks, Network topology, optimal node decision rules, Peer to peer computing, radio networks, Relays, spread spectrum communication, telecommunication network topology, Wireless Sensor Networks},
pubstate = {published},
tppubtype = {inproceedings}
}
Bugallo, Monica F; Maiz, Cristina S; Miguez, Joaquin; Djuric, Petar M
Cost-Reference Particle Filters and Fusion of Information Artículo en actas
En: 2009 IEEE 13th Digital Signal Processing Workshop and 5th IEEE Signal Processing Education Workshop, pp. 286–291, IEEE, Marco Island, FL, 2009.
Resumen | Enlaces | BibTeX | Etiquetas: costs, distributed processing, Electronic mail, fusion, Information filtering, Information filters, information fusion, Measurement standards, probabilistic information, random measures, sensor fusion, smoothing methods, Weight measurement
@inproceedings{Bugallo2009,
title = {Cost-Reference Particle Filters and Fusion of Information},
author = {Monica F Bugallo and Cristina S Maiz and Joaquin Miguez and Petar M Djuric},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4785936},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE 13th Digital Signal Processing Workshop and 5th IEEE Signal Processing Education Workshop},
pages = {286--291},
publisher = {IEEE},
address = {Marco Island, FL},
abstract = {Cost-reference particle filtering is a methodology for tracking unknowns in a system without reliance on probabilistic information about the noises in the system. The methodology is based on analogous principles as the ones of standard particle filtering. Unlike the random measures of standard particle filters that are composed of particles and weights, the random measures of cost-reference particle filters contain particles and user-defined costs. In this paper, we discuss a few scenarios where we need to meld random measures of two or more cost-reference particle filters. The objective is to obtain a fused random measure that combines the information from the individual cost-reference particle filters.},
keywords = {costs, distributed processing, Electronic mail, fusion, Information filtering, Information filters, information fusion, Measurement standards, probabilistic information, random measures, sensor fusion, smoothing methods, Weight measurement},
pubstate = {published},
tppubtype = {inproceedings}
}
Djuric, Petar M; Miguez, Joaquin
Model Assessment with Kolmogorov-Smirnov Statistics Artículo en actas
En: 2009 IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 2973–2976, IEEE, Taipei, 2009, ISSN: 1520-6149.
Resumen | Enlaces | BibTeX | Etiquetas: Bayesian methods, Computer Simulation, Context modeling, Electronic mail, Filtering, ill-conditioned problem, Kolmogorov-Smirnov statistics, model assessment, modelling, Predictive models, Probability, statistical analysis, statistics, Testing
@inproceedings{Djuric2009,
title = {Model Assessment with Kolmogorov-Smirnov Statistics},
author = {Petar M Djuric and Joaquin Miguez},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4960248},
issn = {1520-6149},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE International Conference on Acoustics, Speech and Signal Processing},
pages = {2973--2976},
publisher = {IEEE},
address = {Taipei},
abstract = {One of the most basic problems in science and engineering is the assessment of a considered model. The model should describe a set of observed data and the objective is to find ways of deciding if the model should be rejected. It seems that this is an ill-conditioned problem because we have to test the model against all the possible alternative models. In this paper we use the Kolmogorov-Smirnov statistic to develop a test that shows if the model should be kept or it should be rejected. We explain how this testing can be implemented in the context of particle filtering. We demonstrate the performance of the proposed method by computer simulations.},
keywords = {Bayesian methods, Computer Simulation, Context modeling, Electronic mail, Filtering, ill-conditioned problem, Kolmogorov-Smirnov statistics, model assessment, modelling, Predictive models, Probability, statistical analysis, statistics, Testing},
pubstate = {published},
tppubtype = {inproceedings}
}
Maiz, Cristina S; Miguez, Joaquin; Djuric, Petar M
Particle Filtering in the Presence of Outliers Artículo en actas
En: 2009 IEEE/SP 15th Workshop on Statistical Signal Processing, pp. 33–36, IEEE, Cardiff, 2009, ISBN: 978-1-4244-2709-3.
Resumen | Enlaces | BibTeX | Etiquetas: computer simulations, Degradation, Filtering, multidimensional random variates, Multidimensional signal processing, Multidimensional systems, Nonlinear tracking, Outlier detection, predictive distributions, Signal processing, signal processing tools, signal-power observations, spatial depth, statistical analysis, statistical distributions, statistics, Target tracking, Testing
@inproceedings{Maiz2009,
title = {Particle Filtering in the Presence of Outliers},
author = {Cristina S Maiz and Joaquin Miguez and Petar M Djuric},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5278645},
isbn = {978-1-4244-2709-3},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE/SP 15th Workshop on Statistical Signal Processing},
pages = {33--36},
publisher = {IEEE},
address = {Cardiff},
abstract = {Particle filters have become very popular signal processing tools for problems that involve nonlinear tracking of an unobserved signal of interest given a series of related observations. In this paper we propose a new scheme for particle filtering when the observed data are possibly contaminated with outliers. An outlier is an observation that has been generated by some (unknown) mechanism different from the assumed model of the data. Therefore, when handled in the same way as regular observations, outliers may drastically degrade the performance of the particle filter. To address this problem, we introduce an auxiliary particle filtering scheme that incorporates an outlier detection step. We propose to implement it by means of a test involving statistics of the predictive distributions of the observations. Specifically, we investigate the use of a proposed statistic called spatial depth that can easily be applied to multidimensional random variates. The performance of the resulting algorithm is assessed by computer simulations of target tracking based on signal-power observations.},
keywords = {computer simulations, Degradation, Filtering, multidimensional random variates, Multidimensional signal processing, Multidimensional systems, Nonlinear tracking, Outlier detection, predictive distributions, Signal processing, signal processing tools, signal-power observations, spatial depth, statistical analysis, statistical distributions, statistics, Target tracking, Testing},
pubstate = {published},
tppubtype = {inproceedings}
}
Martino, Luca; Miguez, Joaquin
A Novel Rejection Sampling Scheme for Posterior Probability Distributions Artículo en actas
En: 2009 IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 2921–2924, IEEE, Taipei, 2009, ISSN: 1520-6149.
Resumen | Enlaces | BibTeX | Etiquetas: Additive noise, arbitrary target probability distributions, Bayes methods, Bayesian methods, Monte Carlo integration, Monte Carlo methods, Monte Carlo techniques, Overbounding, posterior probability distributions, Probability density function, Probability distribution, Proposals, Rejection sampling, rejection sampling scheme, Sampling methods, Signal processing algorithms, signal sampling, Upper bound
@inproceedings{Martino2009,
title = {A Novel Rejection Sampling Scheme for Posterior Probability Distributions},
author = {Luca Martino and Joaquin Miguez},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4960235},
issn = {1520-6149},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE International Conference on Acoustics, Speech and Signal Processing},
pages = {2921--2924},
publisher = {IEEE},
address = {Taipei},
abstract = {Rejection sampling (RS) is a well-known method to draw from arbitrary target probability distributions, which has important applications by itself or as a building block for more sophisticated Monte Carlo techniques. The main limitation to the use of RS is the need to find an adequate upper bound for the ratio of the target probability density function (pdf) over the proposal pdf from which the samples are generated. There are no general methods to analytically find this bound, except in the particular case in which the target pdf is log-concave. In this paper we adopt a Bayesian view of the problem and propose a general RS scheme to draw from the posterior pdf of a signal of interest using its prior density as a proposal function. The method enables the analytical calculation of the bound and can be applied to a large class of target densities. We illustrate its use with a simple numerical example.},
keywords = {Additive noise, arbitrary target probability distributions, Bayes methods, Bayesian methods, Monte Carlo integration, Monte Carlo methods, Monte Carlo techniques, Overbounding, posterior probability distributions, Probability density function, Probability distribution, Proposals, Rejection sampling, rejection sampling scheme, Sampling methods, Signal processing algorithms, signal sampling, Upper bound},
pubstate = {published},
tppubtype = {inproceedings}
}
Achutegui, Katrin; Martino, Luca; Rodas, Javier; Escudero, Carlos J; Miguez, Joaquin
A Multi-Model Particle Filtering Algorithm for Indoor Tracking of Mobile Terminals Using RSS Data Artículo en actas
En: 2009 IEEE International Conference on Control Applications, pp. 1702–1707, IEEE, Saint Petersburg, 2009, ISBN: 978-1-4244-4601-8.
Resumen | Enlaces | BibTeX | Etiquetas: Bayesian methods, Control systems, Filtering algorithms, generalized interacting multiple model, GIMM, indoor radio, Indoor tracking, mobile radio, mobile terminal, Monte Carlo methods, multipath propagation, position-dependent data measurement, random process, random processes, Rao-Blackwellized sequential Monte Carlo tracking, received signal strength, RSS data, Sliding mode control, State-space methods, state-space model, Target tracking, tracking, transmitter-to-receiver distance, wireless network, wireless technology
@inproceedings{Achutegui2009,
title = {A Multi-Model Particle Filtering Algorithm for Indoor Tracking of Mobile Terminals Using RSS Data},
author = {Katrin Achutegui and Luca Martino and Javier Rodas and Carlos J Escudero and Joaquin Miguez},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5280960},
isbn = {978-1-4244-4601-8},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE International Conference on Control Applications},
pages = {1702--1707},
publisher = {IEEE},
address = {Saint Petersburg},
abstract = {In this paper we address the problem of indoor tracking using received signal strength (RSS) as a position-dependent data measurement. This type of measurements is very appealing because they can be easily obtained with a variety of wireless technologies which are relatively inexpensive. The extraction of accurate location information from RSS in indoor scenarios is not an easy task, though. Since RSS is highly influenced by multipath propagation, it turns out very hard to adequately model the correspondence between the received power and the transmitter-to-receiver distance. The measurement models proposed in the literature are site-specific and require a great deal of information regarding the structure of the building where the tracking will be performed and therefore are not useful for a general application. For that reason we propose the use of a compound model that combines several sub-models, whose parameters are adjusted to specific and different propagation environments. This methodology, is called interacting multiple models (IMM), has been used in the past for modeling the motion of maneuvering targets. Here, we extend its application to handle also the uncertainty in the RSS observations and we refer to the resulting state-space model as a generalized IMM (GIMM) system. The flexibility of the GIMM approach is attained at the expense of an increase in the number of random processes that must be accurately tracked. To overcome this difficulty, we introduce a Rao-Blackwellized sequential Monte Carlo tracking algorithm that exhibits good performance both with synthetic and experimental data.},
keywords = {Bayesian methods, Control systems, Filtering algorithms, generalized interacting multiple model, GIMM, indoor radio, Indoor tracking, mobile radio, mobile terminal, Monte Carlo methods, multipath propagation, position-dependent data measurement, random process, random processes, Rao-Blackwellized sequential Monte Carlo tracking, received signal strength, RSS data, Sliding mode control, State-space methods, state-space model, Target tracking, tracking, transmitter-to-receiver distance, wireless network, wireless technology},
pubstate = {published},
tppubtype = {inproceedings}
}
Djuric, Petar M; Bugallo, Monica F; Closas, Pau; Miguez, Joaquin
Measuring the Robustness of Sequential Methods Artículo en actas
En: 2009 IEEE 13th Digital Signal Processing Workshop and 5th IEEE Signal Processing Education Workshop, pp. 29–32, IEEE, Aruba, Dutch Antilles, 2009, ISBN: 978-1-4244-5179-1.
Resumen | Enlaces | BibTeX | Etiquetas: Additive noise, cumulative distribution functions, data processing method, extended Kalman filtering, Extraterrestrial measurements, Filtering, Gaussian distribution, Gaussian noise, Kalman filters, Kolmogorov-Smirnov distance, Least squares approximation, Noise robustness, nonlinear filters, robustness, sequential methods, statistical distributions, telecommunication computing
@inproceedings{Djuric2009a,
title = {Measuring the Robustness of Sequential Methods},
author = {Petar M Djuric and Monica F Bugallo and Pau Closas and Joaquin Miguez},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5413275},
isbn = {978-1-4244-5179-1},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE 13th Digital Signal Processing Workshop and 5th IEEE Signal Processing Education Workshop},
pages = {29--32},
publisher = {IEEE},
address = {Aruba, Dutch Antilles},
abstract = {Whenever we apply methods for processing data, we make a number of model assumptions. In reality, these assumptions are not always correct. Robust methods can withstand model inaccuracies, that is, despite some incorrect assumptions they can still produce good results. We often want to know how robust employed methods are. To that end we need to have a yardstick for measuring robustness. In this paper, we propose an approach for constructing such metrics for sequential methods. These metrics are derived from the Kolmogorov-Smirnov distance between the cumulative distribution functions of the actual observations and the ones based on the assumed model. The use of the proposed metrics is demonstrated with simulation examples.},
keywords = {Additive noise, cumulative distribution functions, data processing method, extended Kalman filtering, Extraterrestrial measurements, Filtering, Gaussian distribution, Gaussian noise, Kalman filters, Kolmogorov-Smirnov distance, Least squares approximation, Noise robustness, nonlinear filters, robustness, sequential methods, statistical distributions, telecommunication computing},
pubstate = {published},
tppubtype = {inproceedings}
}
Martino, Luca; Miguez, Joaquin
New Accept/Reject Methods for Independent Sampling from Posterior Probability Distributions Artículo en actas
En: 17th European Signal Processing Conference (EUSIPCO 2009), Glasgow, 2009.
Resumen | Enlaces | BibTeX | Etiquetas:
@inproceedings{Martino2009a,
title = {New Accept/Reject Methods for Independent Sampling from Posterior Probability Distributions},
author = {Luca Martino and Joaquin Miguez},
url = {http://www.academia.edu/2355641/NEW_ACCEPT_REJECT_METHODS_FOR_INDEPENDENT_SAMPLING_FROM_POSTERIOR_PROBABILITY_DISTRIBUTIONS},
year = {2009},
date = {2009-01-01},
booktitle = {17th European Signal Processing Conference (EUSIPCO 2009)},
address = {Glasgow},
abstract = {Rejection sampling (RS) is a well-known method to generate(pseudo-)random samples from arbitrary probability distributionsthat enjoys important applications, either by itself or as a tool inmore sophisticated Monte Carlo techniques. Unfortunately, the useof RS techniques demands the calculation of tight upper bounds forthe ratio of the target probability density function (pdf) over theproposal density from which candidate samples are drawn. Exceptfor the class of log-concave target pdf’s, for which an efficientalgorithm exists, there are no general methods to analyticallydetermine this bound, which has to be derived from scratch foreach specific case. In this paper, we tackle the general problemof applying RS to draw from an arbitrary posterior pdf using theprior density as a proposal function. This is a scenario that appearsfrequently in Bayesian signal processing methods. We derive ageneral geometric procedure for the calculation of upper boundsthat can be used with a broad class of target pdf’s, includingscenarios with correlated observations, multimodal and/or mixturemeasurement noises. We provide some simple numerical examplesto illustrate the application of the proposed techniques},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Perez-Cruz, Fernando; Kulkarni, S R
Distributed Least Square for Consensus Building in Sensor Networks Artículo en actas
En: 2009 IEEE International Symposium on Information Theory, pp. 2877–2881, IEEE, Seoul, 2009, ISBN: 978-1-4244-4312-3.
Resumen | Enlaces | BibTeX | Etiquetas: Change detection algorithms, Channel Coding, Distributed computing, distributed least square method, graphical models, Inference algorithms, Kernel, Least squares methods, nonparametric statistics, Parametric statistics, robustness, sensor-network learning, statistical analysis, Telecommunication network reliability, Wireless sensor network, Wireless Sensor Networks
@inproceedings{Perez-Cruz2009,
title = {Distributed Least Square for Consensus Building in Sensor Networks},
author = {Fernando Perez-Cruz and S R Kulkarni},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5205336},
isbn = {978-1-4244-4312-3},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE International Symposium on Information Theory},
pages = {2877--2881},
publisher = {IEEE},
address = {Seoul},
abstract = {We present a novel mechanism for consensus building in sensor networks. The proposed algorithm has three main properties that make it suitable for general sensor-network learning. First, the proposed algorithm is based on robust nonparametric statistics and thereby needs little prior knowledge about the network and the function that needs to be estimated. Second, the algorithm uses only local information about the network and it communicates only with nearby sensors. Third, the algorithm is completely asynchronous and robust. It does not need to coordinate the sensors to estimate the underlying function and it is not affected if other sensors in the network stop working. Therefore, the proposed algorithm is an ideal candidate for sensor networks deployed in remote and inaccessible areas, which might need to change their objective once they have been set up.},
keywords = {Change detection algorithms, Channel Coding, Distributed computing, distributed least square method, graphical models, Inference algorithms, Kernel, Least squares methods, nonparametric statistics, Parametric statistics, robustness, sensor-network learning, statistical analysis, Telecommunication network reliability, Wireless sensor network, Wireless Sensor Networks},
pubstate = {published},
tppubtype = {inproceedings}
}
Fresia, Maria; Perez-Cruz, Fernando; Poor, Vincent H
Optimized Concatenated LDPC Codes for Joint Source-Channel Coding Artículo en actas
En: 2009 IEEE International Symposium on Information Theory, pp. 2131–2135, IEEE, Seoul, 2009, ISBN: 978-1-4244-4312-3.
Resumen | Enlaces | BibTeX | Etiquetas: approximation theory, asymptotic behavior analysis, Channel Coding, combined source-channel coding, Concatenated codes, Decoding, Entropy, EXIT chart, extrinsic information transfer, H infinity control, Information analysis, joint belief propagation decoder, joint source-channel coding, low-density-parity-check code, optimized concatenated independent LDPC codes, parity check codes, Redundancy, source coding, transmitter, Transmitters
@inproceedings{Fresia2009,
title = {Optimized Concatenated LDPC Codes for Joint Source-Channel Coding},
author = {Maria Fresia and Fernando Perez-Cruz and Vincent H Poor},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5205766},
isbn = {978-1-4244-4312-3},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE International Symposium on Information Theory},
pages = {2131--2135},
publisher = {IEEE},
address = {Seoul},
abstract = {In this paper a scheme for joint source-channel coding based on low-density-parity-check (LDPC) codes is investigated. Two concatenated independent LDPC codes are used in the transmitter: one for source coding and the other for channel coding, with a joint belief propagation decoder. The asymptotic behavior is analyzed using EXtrinsic Information Transfer (EXIT) charts and this approximation is corroborated with illustrative experiments. The optimization of the degree distributions for our sparse code to maximize the information transmission rate is also considered.},
keywords = {approximation theory, asymptotic behavior analysis, Channel Coding, combined source-channel coding, Concatenated codes, Decoding, Entropy, EXIT chart, extrinsic information transfer, H infinity control, Information analysis, joint belief propagation decoder, joint source-channel coding, low-density-parity-check code, optimized concatenated independent LDPC codes, parity check codes, Redundancy, source coding, transmitter, Transmitters},
pubstate = {published},
tppubtype = {inproceedings}
}
Martino, Luca; Miguez, Joaquin
An Adaptive Accept/Reject Sampling Algorithm for Posterior Probability Distributions Artículo en actas
En: 2009 IEEE/SP 15th Workshop on Statistical Signal Processing, pp. 45–48, IEEE, Cardiff, 2009, ISBN: 978-1-4244-2709-3.
Resumen | Enlaces | BibTeX | Etiquetas: adaptive accept/reject sampling, Adaptive rejection sampling, arbitrary target probability distributions, Computer Simulation, Filtering, Monte Carlo integration, Monte Carlo methods, posterior probability distributions, Probability, Probability density function, Probability distribution, Proposals, Rejection sampling, Sampling methods, sensor networks, Signal processing algorithms, signal sampling, Testing
@inproceedings{Martino2009b,
title = {An Adaptive Accept/Reject Sampling Algorithm for Posterior Probability Distributions},
author = {Luca Martino and Joaquin Miguez},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5278644},
isbn = {978-1-4244-2709-3},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE/SP 15th Workshop on Statistical Signal Processing},
pages = {45--48},
publisher = {IEEE},
address = {Cardiff},
abstract = {Accept/reject sampling is a well-known method to generate random samples from arbitrary target probability distributions. It demands the design of a suitable proposal probability density function (pdf) from which candidate samples can be drawn. These samples are either accepted or rejected depending on a test involving the ratio of the target and proposal densities. In this paper we introduce an adaptive method to build a sequence of proposal pdf's that approximate the target density and hence can ensure a high acceptance rate. In order to illustrate the application of the method we design an accept/reject particle filter and then assess its performance and sampling efficiency numerically, by means of computer simulations.},
keywords = {adaptive accept/reject sampling, Adaptive rejection sampling, arbitrary target probability distributions, Computer Simulation, Filtering, Monte Carlo integration, Monte Carlo methods, posterior probability distributions, Probability, Probability density function, Probability distribution, Proposals, Rejection sampling, Sampling methods, sensor networks, Signal processing algorithms, signal sampling, Testing},
pubstate = {published},
tppubtype = {inproceedings}
}
Vinuelas-Peris, Pablo; Artés-Rodríguez, Antonio
Sensing Matrix Optimization in Distributed Compressed Sensing Artículo en actas
En: 2009 IEEE/SP 15th Workshop on Statistical Signal Processing, pp. 638–641, IEEE, Cardiff, 2009, ISBN: 978-1-4244-2709-3.
Resumen | Enlaces | BibTeX | Etiquetas: Compressed sensing, Computer Simulation, computer simulations, correlated signal, Correlated signals, correlation theory, Dictionaries, distributed coding strategy, distributed compressed sensing, Distributed control, efficient projection method, Encoding, joint recovery method, Matching pursuit algorithms, Optimization methods, orthogonal matching pursuit, Projection Matrix Optimization, sensing matrix optimization, Sensor Network, Sensor phenomena and characterization, Sensor systems, Signal processing, Sparse matrices, Technological innovation
@inproceedings{Vinuelas-Peris2009,
title = {Sensing Matrix Optimization in Distributed Compressed Sensing},
author = {Pablo Vinuelas-Peris and Antonio Art\'{e}s-Rodr\'{i}guez},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5278496},
isbn = {978-1-4244-2709-3},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE/SP 15th Workshop on Statistical Signal Processing},
pages = {638--641},
publisher = {IEEE},
address = {Cardiff},
abstract = {Distributed compressed sensing (DCS) seeks to simultaneously measure signals that are each individually sparse in some domain(s) and also mutually correlated. In this paper we consider the scenario in which the (overcomplete) bases for common component and innovations are different. We propose and analyze a distributed coding strategy for the common component, and also the use of efficient projection (EP) method for optimizing the sensing matrices in this setting. We show the effectiveness of our approach by computer simulations using the orthogonal matching pursuit (OMP) as joint recovery method, and we discuss the configuration of the distribution strategy.},
keywords = {Compressed sensing, Computer Simulation, computer simulations, correlated signal, Correlated signals, correlation theory, Dictionaries, distributed coding strategy, distributed compressed sensing, Distributed control, efficient projection method, Encoding, joint recovery method, Matching pursuit algorithms, Optimization methods, orthogonal matching pursuit, Projection Matrix Optimization, sensing matrix optimization, Sensor Network, Sensor phenomena and characterization, Sensor systems, Signal processing, Sparse matrices, Technological innovation},
pubstate = {published},
tppubtype = {inproceedings}
}
Perez-Cruz, Fernando; Rodrigues, Miguel R D; Verdu, Sergio
Optimal Precoding for Multiple-Input Multiple-Output Gaussian Channels Artículo en actas
En: Seminar PIIRS, Princeton, 2009.
Resumen | Enlaces | BibTeX | Etiquetas: Theory & Algorithms
@inproceedings{Perez-Cruz2009a,
title = {Optimal Precoding for Multiple-Input Multiple-Output Gaussian Channels},
author = {Fernando Perez-Cruz and Miguel R D Rodrigues and Sergio Verdu},
url = {http://eprints.pascal-network.org/archive/00006754/},
year = {2009},
date = {2009-01-01},
booktitle = {Seminar PIIRS},
address = {Princeton},
abstract = {We investigate the linear precoding and power allocation policies that maximize the mutual information for general multiple-input multiple-output (MIMO) Gaussian channels with arbitrary input distributions, by capitalizing on the relationship between mutual information and minimum mean-square error. The optimal linear precoder satisfies a fixed-point equation as a function of the channel and the input constellation. For nonGaussian inputs, a nondiagonal precoding matrix in general increases the information transmission rate, even for parallel noninteracting channels. Whenever precoding is precluded, the optimal power allocation policy also satisfies a fixed-point equation; we put forth a generalization of the mercury/waterfilling algorithm, previously proposed for parallel noninterfering channels, in which the mercury level accounts not only for the nonGaussian input distributions, but also for the interference among inputs.},
keywords = {Theory \& Algorithms},
pubstate = {published},
tppubtype = {inproceedings}
}
Miguez, Joaquin; Maiz, Cristina S; Djuric, Petar M; Crisan, Dan
Sequential Monte Carlo Optimization Using Artificial State-Space Models Artículo en actas
En: 2009 IEEE 13th Digital Signal Processing Workshop and 5th IEEE Signal Processing Education Workshop, pp. 268–273, IEEE, Marco Island, FL, 2009.
Resumen | Enlaces | BibTeX | Etiquetas: Acceleration, Cost function, Design optimization, discrete-time dynamical system, Educational institutions, Mathematics, maximum a posteriori estimate, maximum likelihood estimation, minimisation, Monte Carlo methods, Optimization methods, Probability distribution, sequential Monte Carlo optimization, Sequential optimization, Signal design, State-space methods, state-space model, Stochastic optimization
@inproceedings{Miguez2009,
title = {Sequential Monte Carlo Optimization Using Artificial State-Space Models},
author = {Joaquin Miguez and Cristina S Maiz and Petar M Djuric and Dan Crisan},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4785933},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE 13th Digital Signal Processing Workshop and 5th IEEE Signal Processing Education Workshop},
pages = {268--273},
publisher = {IEEE},
address = {Marco Island, FL},
abstract = {We introduce a method for sequential minimization of a certain class of (possibly non-convex) cost functions with respect to a high dimensional signal of interest. The proposed approach involves the transformation of the optimization problem into one of estimation in a discrete-time dynamical system. In particular, we describe a methodology for constructing an artificial state-space model which has the signal of interest as its unobserved dynamic state. The model is \"{a}dapted" to the cost function in the sense that the maximum a posteriori (MAP) estimate of the system state is also a global minimizer of the cost function. The advantage of the estimation framework is that we can draw from a pool of sequential Monte Carlo methods, for particle approximation of probability measures in dynamic systems, that enable the numerical computation of MAP estimates. We provide examples of how to apply the proposed methodology, including some illustrative simulation results.},
keywords = {Acceleration, Cost function, Design optimization, discrete-time dynamical system, Educational institutions, Mathematics, maximum a posteriori estimate, maximum likelihood estimation, minimisation, Monte Carlo methods, Optimization methods, Probability distribution, sequential Monte Carlo optimization, Sequential optimization, Signal design, State-space methods, state-space model, Stochastic optimization},
pubstate = {published},
tppubtype = {inproceedings}
}
Fresia, Maria; Perez-Cruz, Fernando; Poor, Vincent H; Verdu, Sergio
Joint Source-Channel Coding with Concatenated LDPC Codes Artículo en actas
En: Information Theory and Applications (ITA), San Diego, 2009.
Resumen | Enlaces | BibTeX | Etiquetas: Learning/Statistics & Optimisation
@inproceedings{Fresia2009a,
title = {Joint Source-Channel Coding with Concatenated LDPC Codes},
author = {Maria Fresia and Fernando Perez-Cruz and Vincent H Poor and Sergio Verdu},
url = {http://eprints.pascal-network.org/archive/00004905/},
year = {2009},
date = {2009-01-01},
booktitle = {Information Theory and Applications (ITA)},
address = {San Diego},
abstract = {The separation principle, a milestone in information theory, establishes that for stationary sources and channels there is no loss of optimality when a channel-independent source encoder followed by a source-independent channel encoder are used to transmit the data, as the code length tends to infinity. Thereby, the source and channel encoding have been typically treated as independent problems. For finite-length codes, the separation principle does not hold and a joint encoder and decoder can potentially increase the achieved information transmission rate. In this paper, a scheme for joint source-channel coding based on low-density parity-check (LDPC) codes is presented. The source is compressed and protected with two concatenated LDPC codes and a joint belief propagation decoder is implemented. EXIT chart performance of the proposed schemes is studied. The results are verified with some illustrative experiments.},
keywords = {Learning/Statistics \& Optimisation},
pubstate = {published},
tppubtype = {inproceedings}
}
Goez, Roger; Lazaro, Marcelino
Training of Neural Classifiers by Separating Distributions at the Hidden Layer Artículo en actas
En: 2009 IEEE International Workshop on Machine Learning for Signal Processing, pp. 1–6, IEEE, Grenoble, 2009, ISBN: 978-1-4244-4947-7.
Resumen | Enlaces | BibTeX | Etiquetas: Artificial neural networks, Bayesian methods, Cost function, Curve fitting, Databases, Function approximation, Neural networks, Speech recognition, Support vector machine classification, Support vector machines
@inproceedings{Goez2009,
title = {Training of Neural Classifiers by Separating Distributions at the Hidden Layer},
author = {Roger Goez and Marcelino Lazaro},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5306240},
isbn = {978-1-4244-4947-7},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE International Workshop on Machine Learning for Signal Processing},
pages = {1--6},
publisher = {IEEE},
address = {Grenoble},
abstract = {A new cost function for training of binary classifiers based on neural networks is proposed. This cost function aims at separating the distributions for patterns of each class at the output of the hidden layer of the network. It has been implemented in a Generalized Radial Basis Function (GRBF) network and its performance has been evaluated under three different databases, showing advantages with respect to the conventional Mean Squared Error (MSE) cost function. With respect to the Support Vector Machine (SVM) classifier, the proposed method has also advantages both in terms of performance and complexity.},
keywords = {Artificial neural networks, Bayesian methods, Cost function, Curve fitting, Databases, Function approximation, Neural networks, Speech recognition, Support vector machine classification, Support vector machines},
pubstate = {published},
tppubtype = {inproceedings}
}
Plata-Chaves, Jorge; Lazaro, Marcelino
Closed-Form Error Exponent for the Neyman-Pearson Fusion of Markov Local Decisions Artículo en actas
En: 2009 IEEE/SP 15th Workshop on Statistical Signal Processing, pp. 533–536, IEEE, Cardiff, 2009, ISBN: 978-1-4244-2709-3.
Resumen | Enlaces | BibTeX | Etiquetas:
@inproceedings{Plata-Chaves2009,
title = {Closed-Form Error Exponent for the Neyman-Pearson Fusion of Markov Local Decisions},
author = {Jorge Plata-Chaves and Marcelino Lazaro},
url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=5278522},
isbn = {978-1-4244-2709-3},
year = {2009},
date = {2009-01-01},
booktitle = {2009 IEEE/SP 15th Workshop on Statistical Signal Processing},
pages = {533--536},
publisher = {IEEE},
address = {Cardiff},
abstract = {In this correspondence, we derive a closed-form expression of the error exponent associated with the binary Neyman-Pearson test performed at the fusion center of a distributed detection system where a large number of local detectors take dependent binary decisions regarding a specific phenomenon. We assume that the sensors are equally spaced along a straight line, that their local decisions are taken with no kind of cooperation, and that they are transmitted to the fusion center over an error free parallel access channel. Under each one of the two possible hypothesis, H0 and H1 the correlation structure of the local binary decisions is modelled with a first-order binary Markov chain whose transition probabilities are linked with different physical parameters of the network. Through different simulations based on the error exponent and a deterministic physical model of the aforementioned transition probabilities we study the effect of network density on the overall detection performance.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Alvarez, Mauricio; Luengo, David; Lawrence, Neil D
Latent Force Models Artículo en actas
En: Conf. on Artificial Intelligence and Statistics, Clearwater Beach, 2009.
BibTeX | Etiquetas:
@inproceedings{Alvarez2009,
title = {Latent Force Models},
author = {Mauricio Alvarez and David Luengo and Neil D Lawrence},
year = {2009},
date = {2009-01-01},
booktitle = {Conf. on Artificial Intelligence and Statistics},
address = {Clearwater Beach},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2008
Vazquez-Vilar, Gonzalo; Majjigi, Vinay; Sezgin, Aydin; Paulraj, Arogyaswami
Mobility Dependent Feedback Scheme for point-to-point MIMO Systems Artículo en actas
En: Asilomar Conference on Signals, Systems, and Computers (Asilomar SSC 2008), Pacific Grove, CA, U.S.A., 2008.
BibTeX | Etiquetas:
@inproceedings{asilomar2008,
title = {Mobility Dependent Feedback Scheme for point-to-point MIMO Systems},
author = {Gonzalo Vazquez-Vilar and Vinay Majjigi and Aydin Sezgin and Arogyaswami Paulraj},
year = {2008},
date = {2008-10-01},
booktitle = {Asilomar Conference on Signals, Systems, and Computers (Asilomar SSC 2008)},
address = {Pacific Grove, CA, U.S.A.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Koch, Tobias; Lapidoth, Amos
On Multipath Fading Channels at High SNR Artículo en actas
En: 2008 IEEE International Symposium on Information Theory, pp. 1572–1576, IEEE, Toronto, 2008, ISBN: 978-1-4244-2256-2.
Resumen | Enlaces | BibTeX | Etiquetas: channel capacity, Delay, discrete time systems, discrete-time channels, Entropy, Fading, fading channels, Frequency, Mathematical model, multipath channels, multipath fading channels, noncoherent channel model, Random variables, Signal to noise ratio, signal-to-noise ratios, SNR, statistics, Transmitters
@inproceedings{Koch2008,
title = {On Multipath Fading Channels at High SNR},
author = {Tobias Koch and Amos Lapidoth},
url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=4595252},
isbn = {978-1-4244-2256-2},
year = {2008},
date = {2008-01-01},
booktitle = {2008 IEEE International Symposium on Information Theory},
pages = {1572--1576},
publisher = {IEEE},
address = {Toronto},
abstract = {This paper studies the capacity of discrete-time multipath fading channels. It is assumed that the number of paths is finite, i.e., that the channel output is influenced by the present and by the L previous channel inputs. A noncoherent channel model is considered where neither transmitter nor receiver are cognizant of the fading's realization, but both are aware of its statistic. The focus is on capacity at high signal-to-noise ratios (SNR). In particular, the capacity pre-loglog-defined as the limiting ratio of the capacity to loglog(SNR) as SNR tends to infinity-is studied. It is shown that, irrespective of the number of paths L, the capacity pre-loglog is 1.},
keywords = {channel capacity, Delay, discrete time systems, discrete-time channels, Entropy, Fading, fading channels, Frequency, Mathematical model, multipath channels, multipath fading channels, noncoherent channel model, Random variables, Signal to noise ratio, signal-to-noise ratios, SNR, statistics, Transmitters},
pubstate = {published},
tppubtype = {inproceedings}
}
Vazquez, Manuel A; Miguez, Joaquin
A Per-Survivor Processing Algorithm for Maximum Likelihood Equalization of MIMO Channels with Unknown Order Artículo en actas
En: 2008 International ITG Workshop on Smart Antennas, pp. 387–391, IEEE, Vienna, 2008, ISBN: 978-1-4244-1756-8.
Resumen | Enlaces | BibTeX | Etiquetas: Channel estimation, channel impulse response, computational complexity, Computer science education, Computer Simulation, Degradation, Frequency, frequency-selective multiple-input multiple-output, maximum likelihood detection, maximum likelihood equalization, maximum likelihood estimation, maximum likelihood sequence detection, maximum likelihood sequence estimation, MIMO, MIMO channels, MIMO communication, per-survivor processing algorithm, time-selective channels, Transmitting antennas
@inproceedings{Vazquez2008,
title = {A Per-Survivor Processing Algorithm for Maximum Likelihood Equalization of MIMO Channels with Unknown Order},
author = {Manuel A Vazquez and Joaquin Miguez},
url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=4475587},
isbn = {978-1-4244-1756-8},
year = {2008},
date = {2008-01-01},
booktitle = {2008 International ITG Workshop on Smart Antennas},
pages = {387--391},
publisher = {IEEE},
address = {Vienna},
abstract = {In the equalization of frequency-selective multiple-input multiple-output (MIMO) channels it is usually assumed that the length of the channel impulse response (CIR), also referred to as the channel order, is known. However, this is not true in most practical situations and, in order to avoid the serious performance degradation that occurs when the CIR length is underestimated, a channel with "more than enough" taps is usually considered. This possibly means overestimating the channel order, and is not desirable since the computational complexity of maximum likelihood sequence detection (MLSD) in frequency-selective channels grows exponentially with the channel order. In addition to that, the higher the channel order considered, the more the number of channel coefficients that need to be estimated from the same set of observations. In this paper, we introduce an algorithm for MLSD that incorporates the full estimation of the MIMO CIR parameters, including its order. The proposed technique is based on the per survivor processing (PSP) methodology, it admits both blind and semiblind implementations, depending on the availability of pilot data, and is designed to work with time-selective channels. Besides the analytical derivation of the algorithm, we provide computer simulation results that illustrate the effectiveness of the resulting receiver},
keywords = {Channel estimation, channel impulse response, computational complexity, Computer science education, Computer Simulation, Degradation, Frequency, frequency-selective multiple-input multiple-output, maximum likelihood detection, maximum likelihood equalization, maximum likelihood estimation, maximum likelihood sequence detection, maximum likelihood sequence estimation, MIMO, MIMO channels, MIMO communication, per-survivor processing algorithm, time-selective channels, Transmitting antennas},
pubstate = {published},
tppubtype = {inproceedings}
}
Miguez, Joaquin
Analysis of a Sequential Monte Carlo Optimization Methodology Artículo en actas
En: 16th European Signal Processing Conference (EUSIPCO 2008, Lausanne, 2008.
Resumen | Enlaces | BibTeX | Etiquetas:
@inproceedings{Miguez2008,
title = {Analysis of a Sequential Monte Carlo Optimization Methodology},
author = {Joaquin Miguez},
url = {http://www.eurasip.org/Proceedings/Eusipco/Eusipco2008/papers/1569105254.pdf},
year = {2008},
date = {2008-01-01},
booktitle = {16th European Signal Processing Conference (EUSIPCO 2008},
address = {Lausanne},
abstract = {We investigate a family of stochastic exploration methods that has been recently proposed to carry out estimation and prediction in discrete-time random dynamical systems. The key of the novel approach is to identify a cost function whose minima provide valid estimates of the system state at successive time instants. This function is recursively optimized using a sequential Monte Carlo minimization (SMCM) procedure which is similar to standard particle filtering algorithms but does not require a explicit probabilistic model to be imposed on the system. In this paper, we analyze the asymptotic convergence of SMCM methods and show that a properly designed algorithm produces a sequence of system-state estimates with individually minimal contributions to the cost function. We apply the SMCM method to a target tracking problem in order to illustrate how convergence is achieved in the way predicted by the theory.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Perez-Cruz, Fernando
Kullback-Leibler Divergence Estimation of Continuous Distributions Artículo en actas
En: 2008 IEEE International Symposium on Information Theory, pp. 1666–1670, IEEE, Toronto, 2008, ISBN: 978-1-4244-2256-2.
Resumen | Enlaces | BibTeX | Etiquetas: Convergence, density estimation, Density measurement, Entropy, Frequency estimation, H infinity control, information theory, k-nearest-neighbour density estimation, Kullback-Leibler divergence estimation, Machine learning, Mutual information, neuroscience, Random variables, statistical distributions, waiting-times distributions
@inproceedings{Perez-Cruz2008,
title = {Kullback-Leibler Divergence Estimation of Continuous Distributions},
author = {Fernando Perez-Cruz},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4595271},
isbn = {978-1-4244-2256-2},
year = {2008},
date = {2008-01-01},
booktitle = {2008 IEEE International Symposium on Information Theory},
pages = {1666--1670},
publisher = {IEEE},
address = {Toronto},
abstract = {We present a method for estimating the KL divergence between continuous densities and we prove it converges almost surely. Divergence estimation is typically solved estimating the densities first. Our main result shows this intermediate step is unnecessary and that the divergence can be either estimated using the empirical cdf or k-nearest-neighbour density estimation, which does not converge to the true measure for finite k. The convergence proof is based on describing the statistics of our estimator using waiting-times distributions, as the exponential or Erlang. We illustrate the proposed estimators and show how they compare to existing methods based on density estimation, and we also outline how our divergence estimators can be used for solving the two-sample problem.},
keywords = {Convergence, density estimation, Density measurement, Entropy, Frequency estimation, H infinity control, information theory, k-nearest-neighbour density estimation, Kullback-Leibler divergence estimation, Machine learning, Mutual information, neuroscience, Random variables, statistical distributions, waiting-times distributions},
pubstate = {published},
tppubtype = {inproceedings}
}
Perez-Cruz, Fernando; Rodrigues, Miguel R D; Verdu, Sergio
Optimal Precoding for Digital Subscriber Lines Artículo en actas
En: 2008 IEEE International Conference on Communications, pp. 1200–1204, IEEE, Beijing, 2008, ISBN: 978-1-4244-2075-9.
Resumen | Enlaces | BibTeX | Etiquetas: Bit error rate, channel matrix diagonalization, Communications Society, Computer science, digital subscriber lines, DSL, Equations, fixed-point equation, Gaussian channels, least mean squares methods, linear codes, matrix algebra, MIMO, MIMO communication, MIMO Gaussian channel, minimum mean squared error method, MMSE, multiple-input multiple-output communication, Mutual information, optimal linear precoder, precoding, Telecommunications, Telephony
@inproceedings{Perez-Cruz2008a,
title = {Optimal Precoding for Digital Subscriber Lines},
author = {Fernando Perez-Cruz and Miguel R D Rodrigues and Sergio Verdu},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4533270},
isbn = {978-1-4244-2075-9},
year = {2008},
date = {2008-01-01},
booktitle = {2008 IEEE International Conference on Communications},
pages = {1200--1204},
publisher = {IEEE},
address = {Beijing},
abstract = {We determine the linear precoding policy that maximizes the mutual information for general multiple-input multiple-output (MIMO) Gaussian channels with arbitrary input distributions, by capitalizing on the relationship between mutual information and minimum mean squared error (MMSE). The optimal linear precoder can be computed by means of a fixed- point equation as a function of the channel and the input constellation. We show that diagonalizing the channel matrix does not maximize the information transmission rate for nonGaussian inputs. A full precoding matrix may significantly increase the information transmission rate, even for parallel non-interacting channels. We illustrate the application of our results to typical Gigabit DSL systems.},
keywords = {Bit error rate, channel matrix diagonalization, Communications Society, Computer science, digital subscriber lines, DSL, Equations, fixed-point equation, Gaussian channels, least mean squares methods, linear codes, matrix algebra, MIMO, MIMO communication, MIMO Gaussian channel, minimum mean squared error method, MMSE, multiple-input multiple-output communication, Mutual information, optimal linear precoder, precoding, Telecommunications, Telephony},
pubstate = {published},
tppubtype = {inproceedings}
}
Koch, Tobias; Lapidoth, Amos
Multipath Channels of Bounded Capacity Artículo en actas
En: 2008 IEEE Information Theory Workshop, pp. 6–10, IEEE, Oporto, 2008, ISBN: 978-1-4244-2269-2.
Resumen | Enlaces | BibTeX | Etiquetas:
@inproceedings{Koch2008a,
title = {Multipath Channels of Bounded Capacity},
author = {Tobias Koch and Amos Lapidoth},
url = {http://www.researchgate.net/publication/4353168_Multipath_channels_of_bounded_capacity},
isbn = {978-1-4244-2269-2},
year = {2008},
date = {2008-01-01},
booktitle = {2008 IEEE Information Theory Workshop},
pages = {6--10},
publisher = {IEEE},
address = {Oporto},
abstract = {The capacity of discrete-time, non-coherent, multi-path fading channels is considered. It is shown that if the delay spread is large in the sense that the variances of the path gains do not decay faster than geometrically, then capacity is bounded in the signal-to-noise ratio.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Leiva-murillo, Jose M; Artés-Rodríguez, Antonio
Linear Dimensionality Reduction With Gausian Mixture Models Artículo en actas
En: Cognitive Information Processing, (CIP) 2008, Santorini, 2008.
Resumen | Enlaces | BibTeX | Etiquetas:
@inproceedings{JoseM.Leiva-murillo2008,
title = {Linear Dimensionality Reduction With Gausian Mixture Models},
author = {Jose M Leiva-murillo and Antonio Art\'{e}s-Rodr\'{i}guez},
url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.167.798},
year = {2008},
date = {2008-01-01},
booktitle = {Cognitive Information Processing, (CIP) 2008},
address = {Santorini},
abstract = {In this paper, we explore the application of several informationtheoretic criteria to the problem of reducing the dimension in pattern recognition. We consider the use of Gaussian mixture models for estimating the distribution of the data. Three algorithms are proposed for linear feature extraction by the maximization of the mutual information, the likelihood or the hypotheses test, respectively. The experiments show that the proposed methods outperform the classical methods based on parametric Gaussian models, and avoid the intense computational complexity of nonparametric kernel density estimators.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Koch, Tobias; Lapidoth, Amos
Multipath Channels of Unbounded Capacity Artículo en actas
En: 2008 IEEE 25th Convention of Electrical and Electronics Engineers in Israel, pp. 640–644, IEEE, Eilat, 2008, ISBN: 978-1-4244-2481-8.
Resumen | Enlaces | BibTeX | Etiquetas: channel capacity, discrete-time capacity, Entropy, Fading, fading channels, Frequency, H infinity control, Information rates, multipath channels, multipath fading channels, noncoherent, noncoherent capacity, path gains decay, Signal to noise ratio, statistics, Transmitters, unbounded capacity
@inproceedings{Koch2008b,
title = {Multipath Channels of Unbounded Capacity},
author = {Tobias Koch and Amos Lapidoth},
url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=4736611},
isbn = {978-1-4244-2481-8},
year = {2008},
date = {2008-01-01},
booktitle = {2008 IEEE 25th Convention of Electrical and Electronics Engineers in Israel},
pages = {640--644},
publisher = {IEEE},
address = {Eilat},
abstract = {The capacity of discrete-time, noncoherent, multipath fading channels is considered. It is shown that if the variances of the path gains decay faster than exponentially, then capacity is unbounded in the transmit power.},
keywords = {channel capacity, discrete-time capacity, Entropy, Fading, fading channels, Frequency, H infinity control, Information rates, multipath channels, multipath fading channels, noncoherent, noncoherent capacity, path gains decay, Signal to noise ratio, statistics, Transmitters, unbounded capacity},
pubstate = {published},
tppubtype = {inproceedings}
}
Rodrigues, Miguel R D; Perez-Cruz, Fernando; Verdu, Sergio
Multiple-Input Multiple-Output Gaussian Channels: Optimal Covariance for Non-Gaussian Inputs Artículo en actas
En: 2008 IEEE Information Theory Workshop, pp. 445–449, IEEE, Porto, 2008, ISBN: 978-1-4244-2269-2.
Resumen | Enlaces | BibTeX | Etiquetas: Binary phase shift keying, covariance matrices, Covariance matrix, deterministic MIMO Gaussian channel, fixed-point equation, Gaussian channels, Gaussian noise, Information rates, intersymbol interference, least mean squares methods, Magnetic recording, mercury-waterfilling power allocation policy, MIMO, MIMO communication, minimum mean-squared error, MMSE, MMSE matrix, multiple-input multiple-output system, Multiple-Input Multiple-Output Systems, Mutual information, Optimal Input Covariance, Optimization, Telecommunications
@inproceedings{Rodrigues2008,
title = {Multiple-Input Multiple-Output Gaussian Channels: Optimal Covariance for Non-Gaussian Inputs},
author = {Miguel R D Rodrigues and Fernando Perez-Cruz and Sergio Verdu},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4578704},
isbn = {978-1-4244-2269-2},
year = {2008},
date = {2008-01-01},
booktitle = {2008 IEEE Information Theory Workshop},
pages = {445--449},
publisher = {IEEE},
address = {Porto},
abstract = {We investigate the input covariance that maximizes the mutual information of deterministic multiple-input multipleo-utput (MIMO) Gaussian channels with arbitrary (not necessarily Gaussian) input distributions, by capitalizing on the relationship between the gradient of the mutual information and the minimum mean-squared error (MMSE) matrix. We show that the optimal input covariance satisfies a simple fixed-point equation involving key system quantities, including the MMSE matrix. We also specialize the form of the optimal input covariance to the asymptotic regimes of low and high snr. We demonstrate that in the low-snr regime the optimal covariance fully correlates the inputs to better combat noise. In contrast, in the high-snr regime the optimal covariance is diagonal with diagonal elements obeying the generalized mercury/waterfilling power allocation policy. Numerical results illustrate that covariance optimization may lead to significant gains with respect to conventional strategies based on channel diagonalization followed by mercury/waterfilling or waterfilling power allocation, particularly in the regimes of medium and high snr.},
keywords = {Binary phase shift keying, covariance matrices, Covariance matrix, deterministic MIMO Gaussian channel, fixed-point equation, Gaussian channels, Gaussian noise, Information rates, intersymbol interference, least mean squares methods, Magnetic recording, mercury-waterfilling power allocation policy, MIMO, MIMO communication, minimum mean-squared error, MMSE, MMSE matrix, multiple-input multiple-output system, Multiple-Input Multiple-Output Systems, Mutual information, Optimal Input Covariance, Optimization, Telecommunications},
pubstate = {published},
tppubtype = {inproceedings}
}
Vazquez, Manuel A; Miguez, Joaquin
A Per-Survivor Processing Algorithm for Maximum Likelihood Equalization of MIMO Channels with Unknown Order Artículo en actas
En: 2008 International ITG Workshop on Smart Antennas, pp. 387–391, IEEE, Vienna, 2008, ISBN: 978-1-4244-1756-8.
Resumen | Enlaces | BibTeX | Etiquetas: Channel estimation, channel impulse response, computational complexity, Computer science education, Computer Simulation, Degradation, Frequency, frequency-selective multiple-input multiple-output, maximum likelihood detection, maximum likelihood equalization, maximum likelihood estimation, maximum likelihood sequence detection, maximum likelihood sequence estimation, MIMO, MIMO channels, MIMO communication, per-survivor processing algorithm, time-selective channels, Transmitting antennas
@inproceedings{Vazquez2008a,
title = {A Per-Survivor Processing Algorithm for Maximum Likelihood Equalization of MIMO Channels with Unknown Order},
author = {Manuel A Vazquez and Joaquin Miguez},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4475587},
isbn = {978-1-4244-1756-8},
year = {2008},
date = {2008-01-01},
booktitle = {2008 International ITG Workshop on Smart Antennas},
pages = {387--391},
publisher = {IEEE},
address = {Vienna},
abstract = {In the equalization of frequency-selective multiple-input multiple-output (MIMO) channels it is usually assumed that the length of the channel impulse response (CIR), also referred to as the channel order, is known. However, this is not true in most practical situations and, in order to avoid the serious performance degradation that occurs when the CIR length is underestimated, a channel with "more than enough" taps is usually considered. This possibly means overestimating the channel order, and is not desirable since the computational complexity of maximum likelihood sequence detection (MLSD) in frequency-selective channels grows exponentially with the channel order. In addition to that, the higher the channel order considered, the more the number of channel coefficients that need to be estimated from the same set of observations. In this paper, we introduce an algorithm for MLSD that incorporates the full estimation of the MIMO CIR parameters, including its order. The proposed technique is based on the per survivor processing (PSP) methodology, it admits both blind and semiblind implementations, depending on the availability of pilot data, and is designed to work with time-selective channels. Besides the analytical derivation of the algorithm, we provide computer simulation results that illustrate the effectiveness of the resulting receiver.},
keywords = {Channel estimation, channel impulse response, computational complexity, Computer science education, Computer Simulation, Degradation, Frequency, frequency-selective multiple-input multiple-output, maximum likelihood detection, maximum likelihood equalization, maximum likelihood estimation, maximum likelihood sequence detection, maximum likelihood sequence estimation, MIMO, MIMO channels, MIMO communication, per-survivor processing algorithm, time-selective channels, Transmitting antennas},
pubstate = {published},
tppubtype = {inproceedings}
}
Leiva-Murillo, Jose M; Artés-Rodríguez, Antonio
Algorithms for Gaussian Bandwidth Selection in Kernel Density Estimators Artículo en actas
En: NIPS 2008, Workshop on Optimization for Machine Learning Vancouver, Vancouver, 2008.
Resumen | Enlaces | BibTeX | Etiquetas:
@inproceedings{Leiva-Murillo2008a,
title = {Algorithms for Gaussian Bandwidth Selection in Kernel Density Estimators},
author = {Jose M Leiva-Murillo and Antonio Art\'{e}s-Rodr\'{i}guez},
url = {http://www.researchgate.net/publication/228859873_Algorithms_for_gaussian_bandwidth_selection_in_kernel_density_estimators},
year = {2008},
date = {2008-01-01},
booktitle = {NIPS 2008, Workshop on Optimization for Machine Learning Vancouver},
address = {Vancouver},
abstract = {In this paper we study the classical statistical problem of choos-ing an appropriate bandwidth for Kernel Density Estimators. For the special case of Gaussian kernel, two algorithms are proposed for the spherical covariance matrix and for the general case, respec-tively. These methods avoid the unsatisfactory procedure of tuning the bandwidth while evaluating the likelihood, which is impractical with multivariate data in the general case. The convergence con-ditions are provided together with the algorithms proposed. We measure the accuracy of the models obtained by a set of classifica-tion experiments.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
de-Prado-Cumplido, Mario Mario; Artés-Rodríguez, Antonio
SVM Discovery of Causation Direction by Machine Learning Techniques Artículo en actas
En: NIPS’08, Workshop on Causality, Vancouver, 2008.
BibTeX | Etiquetas:
@inproceedings{Mariode-Prado-Cumplido2008,
title = {SVM Discovery of Causation Direction by Machine Learning Techniques},
author = {Mario Mario de-Prado-Cumplido and Antonio Art\'{e}s-Rodr\'{i}guez},
year = {2008},
date = {2008-01-01},
booktitle = {NIPS’08, Workshop on Causality},
address = {Vancouver},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ruiz, Manuel Martinez; Artés-Rodríguez, Antonio; Sabatini, R
Progressive Still Image Transmission over a Tactical Data Link Network Artículo en actas
En: RTO 2008 Information Systems Technology Panel (IST) Symposium, Praga, 2008.
@inproceedings{MartinezRuiz2008,
title = {Progressive Still Image Transmission over a Tactical Data Link Network},
author = {Manuel Martinez Ruiz and Antonio Art\'{e}s-Rodr\'{i}guez and R Sabatini},
year = {2008},
date = {2008-01-01},
booktitle = {RTO 2008 Information Systems Technology Panel (IST) Symposium},
address = {Praga},
abstract = {Future military communications will be required to provide higher data capacity and wideband in real time, greater flexibility, reliability, robustness and seamless networking capabilities. The next generation of communication systems and standards should be able to outperform in a littoral combat environment with a high density of civilian emissions and “ad-hoc” spot jammers. In this operational context it is extremely important to ensure the proper performance of the information grid and to provide not all the available but only the required information in real time either by broadcasting or upon demand, with the best possible “quality of service”. Existing tactical data link systems and standards have being designed to convey mainly textual information such as surveillance and identification data, electronic warfare parameters, aircraft control information, coded voice. The future tactical data link systems and standards should take into consideration the multimedia nature of most of the dispersed and “fuzzy” information available in the battlefield to correlate the ISR components in a way to better contribute to the Network Centric Operations. For this to be accomplished new wideband coalition waveforms should be developed and new coding and image compression standards should be taken into account, such as MPEG-7 (Multimedia Content Description Interface), MPEG-21, JPEG2000 and many others. In the meantime it is important to find new applications for the current tactical data links in order to better exploit their capabilities and to overcome or minimize their limitations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bravo-Santos, Ángel M
Multireception Systems in Mobile Environments Artículo en actas
En: 2008 International Workshop on Advances in Communications, Victoria BC, 2008.
BibTeX | Etiquetas:
@inproceedings{Bravo-Santos2008,
title = {Multireception Systems in Mobile Environments},
author = {\'{A}ngel M Bravo-Santos},
year = {2008},
date = {2008-01-01},
booktitle = {2008 International Workshop on Advances in Communications},
address = {Victoria BC},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Plata-Chaves, Jorge; Lázaro, Marcelino; Artés-Rodríguez, Antonio
Decentralized Detection in a Dense Wireless Sensor Network with Correlated Observations Artículo en actas
En: International Workshop on Information Theory for Sensor Networks (WITS 2008), Santorini, 2008.
@inproceedings{Plata-Chaves2008,
title = {Decentralized Detection in a Dense Wireless Sensor Network with Correlated Observations},
author = {Jorge Plata-Chaves and Marcelino L\'{a}zaro and Antonio Art\'{e}s-Rodr\'{i}guez},
url = {http://www.dcc.fc.up.pt/wits08/wits-advance-program.pdf},
year = {2008},
date = {2008-01-01},
booktitle = {International Workshop on Information Theory for Sensor Networks (WITS 2008)},
address = {Santorini},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Santiago-Mozos, Ricardo; Fernandez-Lorenzana, R; Perez-Cruz, Fernando; Artés-Rodríguez, Antonio
On the Uncertainty in Sequential Hypothesis Testing Artículo en actas
En: 2008 5th IEEE International Symposium on Biomedical Imaging: From Nano to Macro, pp. 1223–1226, IEEE, Paris, 2008, ISBN: 978-1-4244-2002-5.
Resumen | Enlaces | BibTeX | Etiquetas: binary hypothesis test, Biomedical imaging, Detectors, H infinity control, likelihood ratio, Medical diagnostic imaging, medical image application, medical image processing, Medical tests, patient diagnosis, Probability, Random variables, Sequential analysis, sequential hypothesis testing, sequential probability ratio test, Signal processing, Testing, tuberculosis diagnosis, Uncertainty
@inproceedings{Santiago-Mozos2008,
title = {On the Uncertainty in Sequential Hypothesis Testing},
author = {Ricardo Santiago-Mozos and R Fernandez-Lorenzana and Fernando Perez-Cruz and Antonio Art\'{e}s-Rodr\'{i}guez},
url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=4541223},
isbn = {978-1-4244-2002-5},
year = {2008},
date = {2008-01-01},
booktitle = {2008 5th IEEE International Symposium on Biomedical Imaging: From Nano to Macro},
pages = {1223--1226},
publisher = {IEEE},
address = {Paris},
abstract = {We consider the problem of sequential hypothesis testing when the exact pdfs are not known but instead a set of iid samples are used to describe the hypotheses. We modify the classical test by introducing a likelihood ratio interval which accommodates the uncertainty in the pdfs. The test finishes when the whole likelihood ratio interval crosses one of the thresholds and reduces to the classical test as the number of samples to describe the hypotheses tend to infinity. We illustrate the performance of this test in a medical image application related to tuberculosis diagnosis. We show in this example how the test confidence level can be accurately determined.},
keywords = {binary hypothesis test, Biomedical imaging, Detectors, H infinity control, likelihood ratio, Medical diagnostic imaging, medical image application, medical image processing, Medical tests, patient diagnosis, Probability, Random variables, Sequential analysis, sequential hypothesis testing, sequential probability ratio test, Signal processing, Testing, tuberculosis diagnosis, Uncertainty},
pubstate = {published},
tppubtype = {inproceedings}
}
Vila-Forcen, J E; Artés-Rodríguez, Antonio; Garcia-Frias, J
Compressive Sensing Detection of Stochastic Signals Artículo en actas
En: 2008 42nd Annual Conference on Information Sciences and Systems, pp. 956–960, IEEE, Princeton, 2008, ISBN: 978-1-4244-2246-3.
Resumen | Enlaces | BibTeX | Etiquetas: Additive white noise, AWGN, compressive sensing detection, dimensionality reduction techniques, Distortion measurement, Gaussian noise, matrix algebra, Mutual information, optimized projections, projection matrix, signal detection, Signal processing, signal reconstruction, Stochastic processes, stochastic signals, Support vector machine classification, Support vector machines, SVM
@inproceedings{Vila-Forcen2008,
title = {Compressive Sensing Detection of Stochastic Signals},
author = {J E Vila-Forcen and Antonio Art\'{e}s-Rodr\'{i}guez and J Garcia-Frias},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4558656},
isbn = {978-1-4244-2246-3},
year = {2008},
date = {2008-01-01},
booktitle = {2008 42nd Annual Conference on Information Sciences and Systems},
pages = {956--960},
publisher = {IEEE},
address = {Princeton},
abstract = {Inspired by recent work in compressive sensing, we propose a framework for the detection of stochastic signals from optimized projections. In order to generate a good projection matrix, we use dimensionality reduction techniques based on the maximization of the mutual information between the projected signals and their corresponding class labels. In addition, classification techniques based on support vector machines (SVMs) are applied for the final decision process. Simulation results show that the realizations of the stochastic process are detected with higher accuracy and lower complexity than a scheme performing signal reconstruction first, followed by detection based on the reconstructed signal.},
keywords = {Additive white noise, AWGN, compressive sensing detection, dimensionality reduction techniques, Distortion measurement, Gaussian noise, matrix algebra, Mutual information, optimized projections, projection matrix, signal detection, Signal processing, signal reconstruction, Stochastic processes, stochastic signals, Support vector machine classification, Support vector machines, SVM},
pubstate = {published},
tppubtype = {inproceedings}
}