### 2014

Koch, Tobias

On the Dither-Quantized Gaussian Channel at Low SNR Inproceedings

In: 2014 IEEE International Symposium on Information Theory, pp. 186–190, IEEE, Honolulu, 2014, ISBN: 978-1-4799-5186-4.

Abstract | Links | BibTeX | Tags: Additive noise, channel capacity, dither quantized Gaussian channel, Entropy, Gaussian channels, low signal-to-noise-ratio, low-SNR asymptotic capacity, peak power constraint, peak-and-average-power-limited Gaussian channel, Quantization (signal), Signal to noise ratio

@inproceedings{Koch2014,

title = {On the Dither-Quantized Gaussian Channel at Low SNR},

author = {Tobias Koch},

url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=6874820},

isbn = {978-1-4799-5186-4},

year = {2014},

date = {2014-01-01},

booktitle = {2014 IEEE International Symposium on Information Theory},

pages = {186--190},

publisher = {IEEE},

address = {Honolulu},

abstract = {We study the capacity of the peak-and-average-power-limited Gaussian channel when its output is quantized using a dithered, infinite-level, uniform quantizer of step size $Delta$. We focus on the low signal-to-noise-ratio (SNR) regime, where communication at low spectral efficiencies takes place. We show that, when the peak-power constraint is absent, the low-SNR asymptotic capacity is equal to that of the unquantized channel irrespective of $Delta$. We further derive an expression for the low-SNR asymptotic capacity for finite peak-to-average-power ratios and evaluate it in the low- and high-resolution limit. We demonstrate that, in this case, the low-SNR asymptotic capacity converges to that of the unquantized channel when $Delta$ tends to zero, and it tends to zero when $Delta$ tends to infinity.},

keywords = {Additive noise, channel capacity, dither quantized Gaussian channel, Entropy, Gaussian channels, low signal-to-noise-ratio, low-SNR asymptotic capacity, peak power constraint, peak-and-average-power-limited Gaussian channel, Quantization (signal), Signal to noise ratio},

pubstate = {published},

tppubtype = {inproceedings}

}

### 2009

Martino, Luca; Miguez, Joaquin

A Novel Rejection Sampling Scheme for Posterior Probability Distributions Inproceedings

In: 2009 IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 2921–2924, IEEE, Taipei, 2009, ISSN: 1520-6149.

Abstract | Links | BibTeX | Tags: Additive noise, arbitrary target probability distributions, Bayes methods, Bayesian methods, Monte Carlo integration, Monte Carlo methods, Monte Carlo techniques, Overbounding, posterior probability distributions, Probability density function, Probability distribution, Proposals, Rejection sampling, rejection sampling scheme, Sampling methods, Signal processing algorithms, signal sampling, Upper bound

@inproceedings{Martino2009,

title = {A Novel Rejection Sampling Scheme for Posterior Probability Distributions},

author = {Luca Martino and Joaquin Miguez},

url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4960235},

issn = {1520-6149},

year = {2009},

date = {2009-01-01},

booktitle = {2009 IEEE International Conference on Acoustics, Speech and Signal Processing},

pages = {2921--2924},

publisher = {IEEE},

address = {Taipei},

abstract = {Rejection sampling (RS) is a well-known method to draw from arbitrary target probability distributions, which has important applications by itself or as a building block for more sophisticated Monte Carlo techniques. The main limitation to the use of RS is the need to find an adequate upper bound for the ratio of the target probability density function (pdf) over the proposal pdf from which the samples are generated. There are no general methods to analytically find this bound, except in the particular case in which the target pdf is log-concave. In this paper we adopt a Bayesian view of the problem and propose a general RS scheme to draw from the posterior pdf of a signal of interest using its prior density as a proposal function. The method enables the analytical calculation of the bound and can be applied to a large class of target densities. We illustrate its use with a simple numerical example.},

keywords = {Additive noise, arbitrary target probability distributions, Bayes methods, Bayesian methods, Monte Carlo integration, Monte Carlo methods, Monte Carlo techniques, Overbounding, posterior probability distributions, Probability density function, Probability distribution, Proposals, Rejection sampling, rejection sampling scheme, Sampling methods, Signal processing algorithms, signal sampling, Upper bound},

pubstate = {published},

tppubtype = {inproceedings}

}

Djuric, Petar M; Bugallo, Monica F; Closas, Pau; Miguez, Joaquin

Measuring the Robustness of Sequential Methods Inproceedings

In: 2009 IEEE 13th Digital Signal Processing Workshop and 5th IEEE Signal Processing Education Workshop, pp. 29–32, IEEE, Aruba, Dutch Antilles, 2009, ISBN: 978-1-4244-5179-1.

Abstract | Links | BibTeX | Tags: Additive noise, cumulative distribution functions, data processing method, extended Kalman filtering, Extraterrestrial measurements, Filtering, Gaussian distribution, Gaussian noise, Kalman filters, Kolmogorov-Smirnov distance, Least squares approximation, Noise robustness, nonlinear filters, robustness, sequential methods, statistical distributions, telecommunication computing

@inproceedings{Djuric2009a,

title = {Measuring the Robustness of Sequential Methods},

author = {Petar M Djuric and Monica F Bugallo and Pau Closas and Joaquin Miguez},

url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5413275},

isbn = {978-1-4244-5179-1},

year = {2009},

date = {2009-01-01},

booktitle = {2009 IEEE 13th Digital Signal Processing Workshop and 5th IEEE Signal Processing Education Workshop},

pages = {29--32},

publisher = {IEEE},

address = {Aruba, Dutch Antilles},

abstract = {Whenever we apply methods for processing data, we make a number of model assumptions. In reality, these assumptions are not always correct. Robust methods can withstand model inaccuracies, that is, despite some incorrect assumptions they can still produce good results. We often want to know how robust employed methods are. To that end we need to have a yardstick for measuring robustness. In this paper, we propose an approach for constructing such metrics for sequential methods. These metrics are derived from the Kolmogorov-Smirnov distance between the cumulative distribution functions of the actual observations and the ones based on the assumed model. The use of the proposed metrics is demonstrated with simulation examples.},

keywords = {Additive noise, cumulative distribution functions, data processing method, extended Kalman filtering, Extraterrestrial measurements, Filtering, Gaussian distribution, Gaussian noise, Kalman filters, Kolmogorov-Smirnov distance, Least squares approximation, Noise robustness, nonlinear filters, robustness, sequential methods, statistical distributions, telecommunication computing},

pubstate = {published},

tppubtype = {inproceedings}

}