## 2016 |

Vazquez-Vilar, Gonzalo; Campo, Adria Tauste; i Fabregas, Albert Guillen; Martinez, Alfonso Bayesian M-Ary Hypothesis Testing: The Meta-Converse and Verdú-Han Bounds Are Tight Journal Article IEEE Transactions on Information Theory, 62 (5), pp. 2324–2333, 2016, ISSN: 0018-9448. Abstract | Links | BibTeX | Tags: Bayes methods, Channel Coding, Electronic mail, error probability, Journal, Random variables, Testing @article{Vazquez-Vilar2016, title = {Bayesian M-Ary Hypothesis Testing: The Meta-Converse and Verdú-Han Bounds Are Tight}, author = {Gonzalo Vazquez-Vilar and Adria Tauste Campo and Albert Guillen i Fabregas and Alfonso Martinez}, url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=7434042}, doi = {10.1109/TIT.2016.2542080}, issn = {0018-9448}, year = {2016}, date = {2016-05-01}, journal = {IEEE Transactions on Information Theory}, volume = {62}, number = {5}, pages = {2324--2333}, abstract = {Two alternative exact characterizations of the minimum error probability of Bayesian M-ary hypothesis testing are derived. The first expression corresponds to the error probability of an induced binary hypothesis test and implies the tightness of the meta-converse bound by Polyanskiy et al.; the second expression is a function of an information-spectrum measure and implies the tightness of a generalized Verdú-Han lower bound. The formulas characterize the minimum error probability of several problems in information theory and help to identify the steps where existing converse bounds are loose.}, keywords = {Bayes methods, Channel Coding, Electronic mail, error probability, Journal, Random variables, Testing}, pubstate = {published}, tppubtype = {article} } Two alternative exact characterizations of the minimum error probability of Bayesian M-ary hypothesis testing are derived. The first expression corresponds to the error probability of an induced binary hypothesis test and implies the tightness of the meta-converse bound by Polyanskiy et al.; the second expression is a function of an information-spectrum measure and implies the tightness of a generalized Verdú-Han lower bound. The formulas characterize the minimum error probability of several problems in information theory and help to identify the steps where existing converse bounds are loose. |

## 2014 |

A, Pastore; Koch, Tobias; Fonollosa, Javier Rodriguez A Rate-Splitting Approach to Fading Channels With Imperfect Channel-State Information Journal Article IEEE Transactions on Information Theory, 60 (7), pp. 4266–4285, 2014, ISSN: 0018-9448. Abstract | Links | BibTeX | Tags: channel capacity, COMONSENS, DEIPRO, Entropy, Fading, fading channels, flat fading, imperfect channel-state information, MobileNET, Mutual information, OTOSiS, Random variables, Receivers, Signal to noise ratio, Upper bound @article{Pastore2014a, title = {A Rate-Splitting Approach to Fading Channels With Imperfect Channel-State Information}, author = {Pastore A and Tobias Koch and Javier Rodriguez Fonollosa}, url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=6832779 http://www.tsc.uc3m.es/~koch/files/IEEE_TIT_60(7).pdf http://arxiv.org/pdf/1301.6120.pdf}, issn = {0018-9448}, year = {2014}, date = {2014-01-01}, journal = {IEEE Transactions on Information Theory}, volume = {60}, number = {7}, pages = {4266--4285}, publisher = {IEEE}, abstract = {As shown by Médard, the capacity of fading channels with imperfect channel-state information can be lower-bounded by assuming a Gaussian channel input (X) with power (P) and by upper-bounding the conditional entropy (h(X|Y,hat Ħ)) by the entropy of a Gaussian random variable with variance equal to the linear minimum mean-square error in estimating (X) from ((Y,hat Ħ)) . We demonstrate that, using a rate-splitting approach, this lower bound can be sharpened: by expressing the Gaussian input (X) as the sum of two independent Gaussian variables (X_1) and (X_2) and by applying Médard's lower bound first to bound the mutual information between (X_1) and (Y) while treating (X_2) as noise, and by applying it a second time to the mutual information between (X_2) and (Y) while assuming (X_1) to be known, we obtain a capacity lower bound that is strictly larger than Médard's lower bound. We then generalize this approach to an arbi- rary number (L) of layers, where (X) is expressed as the sum of (L) independent Gaussian random variables of respective variances (P_ell ) , (ell = 1,dotsc ,L) summing up to (P) . Among all such rate-splitting bounds, we determine the supremum over power allocations (P_ell ) and total number of layers (L) . This supremum is achieved for (L rightarrow infty ) and gives rise to an analytically expressible capacity lower bound. For Gaussian fading, this novel bound is shown to converge to the Gaussian-input mutual information as the signal-to-noise ratio (SNR) grows, provided that the variance of the channel estimation error (H-hat Ħ) tends to zero as the SNR tends to infinity.}, keywords = {channel capacity, COMONSENS, DEIPRO, Entropy, Fading, fading channels, flat fading, imperfect channel-state information, MobileNET, Mutual information, OTOSiS, Random variables, Receivers, Signal to noise ratio, Upper bound}, pubstate = {published}, tppubtype = {article} } As shown by Médard, the capacity of fading channels with imperfect channel-state information can be lower-bounded by assuming a Gaussian channel input (X) with power (P) and by upper-bounding the conditional entropy (h(X|Y,hat Ħ)) by the entropy of a Gaussian random variable with variance equal to the linear minimum mean-square error in estimating (X) from ((Y,hat Ħ)) . We demonstrate that, using a rate-splitting approach, this lower bound can be sharpened: by expressing the Gaussian input (X) as the sum of two independent Gaussian variables (X_1) and (X_2) and by applying Médard's lower bound first to bound the mutual information between (X_1) and (Y) while treating (X_2) as noise, and by applying it a second time to the mutual information between (X_2) and (Y) while assuming (X_1) to be known, we obtain a capacity lower bound that is strictly larger than Médard's lower bound. We then generalize this approach to an arbi- rary number (L) of layers, where (X) is expressed as the sum of (L) independent Gaussian random variables of respective variances (P_ell ) , (ell = 1,dotsc ,L) summing up to (P) . Among all such rate-splitting bounds, we determine the supremum over power allocations (P_ell ) and total number of layers (L) . This supremum is achieved for (L rightarrow infty ) and gives rise to an analytically expressible capacity lower bound. For Gaussian fading, this novel bound is shown to converge to the Gaussian-input mutual information as the signal-to-noise ratio (SNR) grows, provided that the variance of the channel estimation error (H-hat Ħ) tends to zero as the SNR tends to infinity. |

Yang, Wei; Durisi, Giuseppe; Koch, Tobias; Polyanskiy, Yury Dispersion of Quasi-Static MIMO Fading Channels via Stokes' Theorem Inproceedings 2014 IEEE International Symposium on Information Theory, pp. 2072–2076, IEEE, Honolulu, 2014, ISBN: 978-1-4799-5186-4. Abstract | Links | BibTeX | Tags: channel capacity, differential form integration, Dispersion, Fading, fading channels, fading distribution, integration, Manifolds, Measurement, MIMO, MIMO communication, quasistatic MIMO fading channels dispersion, quasistatic multiple-input multiple-output fading, radio transmitters, Random variables, Stoke Theorem, transmitter @inproceedings{Yang2014b, title = {Dispersion of Quasi-Static MIMO Fading Channels via Stokes' Theorem}, author = {Wei Yang and Giuseppe Durisi and Tobias Koch and Yury Polyanskiy}, url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=6875198}, isbn = {978-1-4799-5186-4}, year = {2014}, date = {2014-01-01}, booktitle = {2014 IEEE International Symposium on Information Theory}, pages = {2072--2076}, publisher = {IEEE}, address = {Honolulu}, abstract = {This paper analyzes the channel dispersion of quasi-static multiple-input multiple-output fading channels with no channel state information at the transmitter. We show that the channel dispersion is zero under mild conditions on the fading distribution. The proof of our result is based on Stokes' theorem, which deals with the integration of differential forms on manifolds with boundary.}, keywords = {channel capacity, differential form integration, Dispersion, Fading, fading channels, fading distribution, integration, Manifolds, Measurement, MIMO, MIMO communication, quasistatic MIMO fading channels dispersion, quasistatic multiple-input multiple-output fading, radio transmitters, Random variables, Stoke Theorem, transmitter}, pubstate = {published}, tppubtype = {inproceedings} } This paper analyzes the channel dispersion of quasi-static multiple-input multiple-output fading channels with no channel state information at the transmitter. We show that the channel dispersion is zero under mild conditions on the fading distribution. The proof of our result is based on Stokes' theorem, which deals with the integration of differential forms on manifolds with boundary. |

## 2013 |

Yang, Wei; Durisi, Giuseppe; Koch, Tobias; Polyanskiy, Yury Quasi-Static SIMO Fading Channels at Finite Blocklength Inproceedings 2013 IEEE International Symposium on Information Theory, pp. 1531–1535, IEEE, Istanbul, 2013, ISSN: 2157-8095. Abstract | Links | BibTeX | Tags: achievability bounds, AWGN channel, AWGN channels, channel capacity, channel dispersion, channel gains, Dispersion, error probability, error statistics, Fading, fading channels, fading realizations, fast convergence, finite blocklength, maximal achievable rate, numerical evaluation, outage capacity, quasistatic SIMO fading channels, Random variables, Receivers, SIMO Rician channel, single-input multiple-output, Transmitters, zero dispersion @inproceedings{Yang2013a, title = {Quasi-Static SIMO Fading Channels at Finite Blocklength}, author = {Wei Yang and Giuseppe Durisi and Tobias Koch and Yury Polyanskiy}, url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6620483}, issn = {2157-8095}, year = {2013}, date = {2013-01-01}, booktitle = {2013 IEEE International Symposium on Information Theory}, pages = {1531--1535}, publisher = {IEEE}, address = {Istanbul}, abstract = {We investigate the maximal achievable rate for a given blocklength and error probability over quasi-static single-input multiple-output (SIMO) fading channels. Under mild conditions on the channel gains, it is shown that the channel dispersion is zero regardless of whether the fading realizations are available at the transmitter and/or the receiver. The result follows from computationally and analytically tractable converse and achievability bounds. Through numerical evaluation, we verify that, in some scenarios, zero dispersion indeed entails fast convergence to outage capacity as the blocklength increases. In the example of a particular 1×2 SIMO Rician channel, the blocklength required to achieve 90% of capacity is about an order of magnitude smaller compared to the blocklength required for an AWGN channel with the same capacity.}, keywords = {achievability bounds, AWGN channel, AWGN channels, channel capacity, channel dispersion, channel gains, Dispersion, error probability, error statistics, Fading, fading channels, fading realizations, fast convergence, finite blocklength, maximal achievable rate, numerical evaluation, outage capacity, quasistatic SIMO fading channels, Random variables, Receivers, SIMO Rician channel, single-input multiple-output, Transmitters, zero dispersion}, pubstate = {published}, tppubtype = {inproceedings} } We investigate the maximal achievable rate for a given blocklength and error probability over quasi-static single-input multiple-output (SIMO) fading channels. Under mild conditions on the channel gains, it is shown that the channel dispersion is zero regardless of whether the fading realizations are available at the transmitter and/or the receiver. The result follows from computationally and analytically tractable converse and achievability bounds. Through numerical evaluation, we verify that, in some scenarios, zero dispersion indeed entails fast convergence to outage capacity as the blocklength increases. In the example of a particular 1×2 SIMO Rician channel, the blocklength required to achieve 90% of capacity is about an order of magnitude smaller compared to the blocklength required for an AWGN channel with the same capacity. |

## 2012 |

Koch, Tobias; Martinez, Alfonso; i Fabregas, Albert Guillen The Capacity Loss of Dense Constellations Inproceedings 2012 IEEE International Symposium on Information Theory Proceedings, pp. 572–576, IEEE, Cambridge, MA, 2012, ISSN: 2157-8095. Abstract | Links | BibTeX | Tags: capacity loss, channel capacity, Constellation diagram, dense constellations, Entropy, general complex-valued additive-noise channels, high signal-to-noise ratio, loss 1.53 dB, power loss, Quadrature amplitude modulation, Random variables, signal constellations, Signal processing, Signal to noise ratio, square signal constellations, Upper bound @inproceedings{Koch2012, title = {The Capacity Loss of Dense Constellations}, author = {Tobias Koch and Alfonso Martinez and Albert Guillen i Fabregas}, url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6283482}, issn = {2157-8095}, year = {2012}, date = {2012-01-01}, booktitle = {2012 IEEE International Symposium on Information Theory Proceedings}, pages = {572--576}, publisher = {IEEE}, address = {Cambridge, MA}, abstract = {We determine the loss in capacity incurred by using signal constellations with a bounded support over general complex-valued additive-noise channels for suitably high signal-to-noise ratio. Our expression for the capacity loss recovers the power loss of 1.53 dB for square signal constellations.}, keywords = {capacity loss, channel capacity, Constellation diagram, dense constellations, Entropy, general complex-valued additive-noise channels, high signal-to-noise ratio, loss 1.53 dB, power loss, Quadrature amplitude modulation, Random variables, signal constellations, Signal processing, Signal to noise ratio, square signal constellations, Upper bound}, pubstate = {published}, tppubtype = {inproceedings} } We determine the loss in capacity incurred by using signal constellations with a bounded support over general complex-valued additive-noise channels for suitably high signal-to-noise ratio. Our expression for the capacity loss recovers the power loss of 1.53 dB for square signal constellations. |

Taborda, Camilo G; Perez-Cruz, Fernando Derivative of the Relative Entropy over the Poisson and Binomial Channel Inproceedings 2012 IEEE Information Theory Workshop, pp. 386–390, IEEE, Lausanne, 2012, ISBN: 978-1-4673-0223-4. Abstract | Links | BibTeX | Tags: binomial channel, binomial distribution, Channel estimation, conditional distribution, Entropy, Estimation, function expectation, Mutual information, mutual information concept, Poisson channel, Poisson distribution, Random variables, relative entropy derivative, similar expression @inproceedings{Taborda2012, title = {Derivative of the Relative Entropy over the Poisson and Binomial Channel}, author = {Camilo G Taborda and Fernando Perez-Cruz}, url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6404699}, isbn = {978-1-4673-0223-4}, year = {2012}, date = {2012-01-01}, booktitle = {2012 IEEE Information Theory Workshop}, pages = {386--390}, publisher = {IEEE}, address = {Lausanne}, abstract = {In this paper it is found that, regardless of the statistics of the input, the derivative of the relative entropy over the Binomial channel can be seen as the expectation of a function that has as argument the mean of the conditional distribution that models the channel. Based on this relationship we formulate a similar expression for the mutual information concept. In addition to this, using the connection between the Binomial and Poisson distribution we develop similar results for the Poisson channel. Novelty of the results presented here lies on the fact that, expressions obtained can be applied to a wide range of scenarios.}, keywords = {binomial channel, binomial distribution, Channel estimation, conditional distribution, Entropy, Estimation, function expectation, Mutual information, mutual information concept, Poisson channel, Poisson distribution, Random variables, relative entropy derivative, similar expression}, pubstate = {published}, tppubtype = {inproceedings} } In this paper it is found that, regardless of the statistics of the input, the derivative of the relative entropy over the Binomial channel can be seen as the expectation of a function that has as argument the mean of the conditional distribution that models the channel. Based on this relationship we formulate a similar expression for the mutual information concept. In addition to this, using the connection between the Binomial and Poisson distribution we develop similar results for the Poisson channel. Novelty of the results presented here lies on the fact that, expressions obtained can be applied to a wide range of scenarios. |

Pastore, Adriano; Koch, Tobias; Fonollosa, Javier Rodriguez Improved Capacity Lower Bounds for Fading Channels with Imperfect CSI Using Rate Splitting Inproceedings 2012 IEEE 27th Convention of Electrical and Electronics Engineers in Israel, pp. 1–5, IEEE, Eilat, 2012, ISBN: 978-1-4673-4681-8. Abstract | Links | BibTeX | Tags: channel capacity, channel capacity lower bounds, conditional entropy, Decoding, Entropy, Fading, fading channels, Gaussian channel, Gaussian channels, Gaussian random variable, imperfect channel-state information, imperfect CSI, independent Gaussian variables, linear minimum mean-square error, mean square error methods, Medard lower bound, Mutual information, Random variables, rate splitting approach, Resource management, Upper bound, wireless communications @inproceedings{Pastore2012, title = {Improved Capacity Lower Bounds for Fading Channels with Imperfect CSI Using Rate Splitting}, author = {Adriano Pastore and Tobias Koch and Javier Rodriguez Fonollosa}, url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6377031}, isbn = {978-1-4673-4681-8}, year = {2012}, date = {2012-01-01}, booktitle = {2012 IEEE 27th Convention of Electrical and Electronics Engineers in Israel}, pages = {1--5}, publisher = {IEEE}, address = {Eilat}, abstract = {As shown by Medard (“The effect upon channel capacity in wireless communications of perfect and imperfect knowledge of the channel,” IEEE Trans. Inform. Theory, May 2000), the capacity of fading channels with imperfect channel-state information (CSI) can be lower-bounded by assuming a Gaussian channel input X, and by upper-bounding the conditional entropy h(XY, Ĥ), conditioned on the channel output Y and the CSI Ĥ, by the entropy of a Gaussian random variable with variance equal to the linear minimum mean-square error in estimating X from (Y, Ĥ). We demonstrate that, by using a rate-splitting approach, this lower bound can be sharpened: we show that by expressing the Gaussian input X as as the sum of two independent Gaussian variables X(1) and X(2), and by applying Medard's lower bound first to analyze the mutual information between X(1) and Y conditioned on Ĥ while treating X(2) as noise, and by applying the lower bound then to analyze the mutual information between X(2) and Y conditioned on (X(1), Ĥ), we obtain a lower bound on the capacity that is larger than Medard's lower bound.}, keywords = {channel capacity, channel capacity lower bounds, conditional entropy, Decoding, Entropy, Fading, fading channels, Gaussian channel, Gaussian channels, Gaussian random variable, imperfect channel-state information, imperfect CSI, independent Gaussian variables, linear minimum mean-square error, mean square error methods, Medard lower bound, Mutual information, Random variables, rate splitting approach, Resource management, Upper bound, wireless communications}, pubstate = {published}, tppubtype = {inproceedings} } As shown by Medard (“The effect upon channel capacity in wireless communications of perfect and imperfect knowledge of the channel,” IEEE Trans. Inform. Theory, May 2000), the capacity of fading channels with imperfect channel-state information (CSI) can be lower-bounded by assuming a Gaussian channel input X, and by upper-bounding the conditional entropy h(XY, Ĥ), conditioned on the channel output Y and the CSI Ĥ, by the entropy of a Gaussian random variable with variance equal to the linear minimum mean-square error in estimating X from (Y, Ĥ). We demonstrate that, by using a rate-splitting approach, this lower bound can be sharpened: we show that by expressing the Gaussian input X as as the sum of two independent Gaussian variables X(1) and X(2), and by applying Medard's lower bound first to analyze the mutual information between X(1) and Y conditioned on Ĥ while treating X(2) as noise, and by applying the lower bound then to analyze the mutual information between X(2) and Y conditioned on (X(1), Ĥ), we obtain a lower bound on the capacity that is larger than Medard's lower bound. |

Taborda, Camilo G; Perez-Cruz, Fernando Mutual Information and Relative Entropy over the Binomial and Negative Binomial Channels Inproceedings 2012 IEEE International Symposium on Information Theory Proceedings, pp. 696–700, IEEE, Cambridge, MA, 2012, ISSN: 2157-8095. Abstract | Links | BibTeX | Tags: Channel estimation, conditional mean estimation, Entropy, Estimation, estimation theoretical quantity, estimation theory, Gaussian channel, Gaussian channels, information theory concept, loss function, mean square error methods, Mutual information, negative binomial channel, Poisson channel, Random variables, relative entropy @inproceedings{Taborda2012a, title = {Mutual Information and Relative Entropy over the Binomial and Negative Binomial Channels}, author = {Camilo G Taborda and Fernando Perez-Cruz}, url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6284304}, issn = {2157-8095}, year = {2012}, date = {2012-01-01}, booktitle = {2012 IEEE International Symposium on Information Theory Proceedings}, pages = {696--700}, publisher = {IEEE}, address = {Cambridge, MA}, abstract = {We study the relation of the mutual information and relative entropy over the Binomial and Negative Binomial channels with estimation theoretical quantities, in which we extend already known results for Gaussian and Poisson channels. We establish general expressions for these information theory concepts with a direct connection with estimation theory through the conditional mean estimation and a particular loss function.}, keywords = {Channel estimation, conditional mean estimation, Entropy, Estimation, estimation theoretical quantity, estimation theory, Gaussian channel, Gaussian channels, information theory concept, loss function, mean square error methods, Mutual information, negative binomial channel, Poisson channel, Random variables, relative entropy}, pubstate = {published}, tppubtype = {inproceedings} } We study the relation of the mutual information and relative entropy over the Binomial and Negative Binomial channels with estimation theoretical quantities, in which we extend already known results for Gaussian and Poisson channels. We establish general expressions for these information theory concepts with a direct connection with estimation theory through the conditional mean estimation and a particular loss function. |

## 2010 |

Djuric, Petar M; Closas, Pau; Bugallo, Monica F; Miguez, Joaquin Evaluation of a Method's Robustness Inproceedings 2010 IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 3598–3601, IEEE, Dallas, 2010, ISSN: 1520-6149. Abstract | Links | BibTeX | Tags: Electronic mail, Extraterrestrial measurements, Filtering, Gaussian processes, method's robustness, Random variables, robustness, sequential methods, Signal processing, statistical distributions, Telecommunications, uniform distribution, Wireless communication @inproceedings{Djuric2010, title = {Evaluation of a Method's Robustness}, author = {Petar M Djuric and Pau Closas and Monica F Bugallo and Joaquin Miguez}, url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5495921}, issn = {1520-6149}, year = {2010}, date = {2010-01-01}, booktitle = {2010 IEEE International Conference on Acoustics, Speech and Signal Processing}, pages = {3598--3601}, publisher = {IEEE}, address = {Dallas}, abstract = {In signal processing, it is typical to develop or use a method based on a given model. In practice, however, we almost never know the actual model and we hope that the assumed model is in the neighborhood of the true one. If deviations exist, the method may be more or less sensitive to them. Therefore, it is important to know more about this sensitivity, or in other words, how robust the method is to model deviations. To that end, it is useful to have a metric that can quantify the robustness of the method. In this paper we propose a procedure for developing a variety of metrics for measuring robustness. They are based on a discrete random variable that is generated from observed data and data generated according to past data and the adopted model. This random variable is uniform if the model is correct. When the model deviates from the true one, the distribution of the random variable deviates from the uniform distribution. One can then employ measures for differences between distributions in order to quantify robustness. In this paper we describe the proposed methodology and demonstrate it with simulated data.}, keywords = {Electronic mail, Extraterrestrial measurements, Filtering, Gaussian processes, method's robustness, Random variables, robustness, sequential methods, Signal processing, statistical distributions, Telecommunications, uniform distribution, Wireless communication}, pubstate = {published}, tppubtype = {inproceedings} } In signal processing, it is typical to develop or use a method based on a given model. In practice, however, we almost never know the actual model and we hope that the assumed model is in the neighborhood of the true one. If deviations exist, the method may be more or less sensitive to them. Therefore, it is important to know more about this sensitivity, or in other words, how robust the method is to model deviations. To that end, it is useful to have a metric that can quantify the robustness of the method. In this paper we propose a procedure for developing a variety of metrics for measuring robustness. They are based on a discrete random variable that is generated from observed data and data generated according to past data and the adopted model. This random variable is uniform if the model is correct. When the model deviates from the true one, the distribution of the random variable deviates from the uniform distribution. One can then employ measures for differences between distributions in order to quantify robustness. In this paper we describe the proposed methodology and demonstrate it with simulated data. |

Djuric, Petar M; Miguez, Joaquin Assessment of Nonlinear Dynamic Models by Kolmogorov–Smirnov Statistics Journal Article IEEE Transactions on Signal Processing, 58 (10), pp. 5069–5079, 2010, ISSN: 1053-587X. Abstract | Links | BibTeX | Tags: Cumulative distributions, discrete random variables, dynamic nonlinear models, Electrical capacitance tomography, Filtering, filtering theory, Iron, Kolmogorov-Smirnov statistics, Kolomogorov–Smirnov statistics, model assessment, nonlinear dynamic models, nonlinear dynamical systems, Permission, predictive cumulative distributions, predictive distributions, Predictive models, Random variables, Robots, statistical analysis, statistical distributions, statistics, Telecommunication control @article{Djuric2010a, title = {Assessment of Nonlinear Dynamic Models by Kolmogorov–Smirnov Statistics}, author = {Petar M Djuric and Joaquin Miguez}, url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5491124}, issn = {1053-587X}, year = {2010}, date = {2010-01-01}, journal = {IEEE Transactions on Signal Processing}, volume = {58}, number = {10}, pages = {5069--5079}, abstract = {Model assessment is a fundamental problem in science and engineering and it addresses the question of the validity of a model in the light of empirical evidence. In this paper, we propose a method for the assessment of dynamic nonlinear models based on empirical and predictive cumulative distributions of data and the Kolmogorov-Smirnov statistics. The technique is based on the generation of discrete random variables that come from a known discrete distribution if the entertained model is correct. We provide simulation examples that demonstrate the performance of the proposed method.}, keywords = {Cumulative distributions, discrete random variables, dynamic nonlinear models, Electrical capacitance tomography, Filtering, filtering theory, Iron, Kolmogorov-Smirnov statistics, Kolomogorov–Smirnov statistics, model assessment, nonlinear dynamic models, nonlinear dynamical systems, Permission, predictive cumulative distributions, predictive distributions, Predictive models, Random variables, Robots, statistical analysis, statistical distributions, statistics, Telecommunication control}, pubstate = {published}, tppubtype = {article} } Model assessment is a fundamental problem in science and engineering and it addresses the question of the validity of a model in the light of empirical evidence. In this paper, we propose a method for the assessment of dynamic nonlinear models based on empirical and predictive cumulative distributions of data and the Kolmogorov-Smirnov statistics. The technique is based on the generation of discrete random variables that come from a known discrete distribution if the entertained model is correct. We provide simulation examples that demonstrate the performance of the proposed method. |

## 2008 |

Koch, Tobias; Lapidoth, Amos On Multipath Fading Channels at High SNR Inproceedings 2008 IEEE International Symposium on Information Theory, pp. 1572–1576, IEEE, Toronto, 2008, ISBN: 978-1-4244-2256-2. Abstract | Links | BibTeX | Tags: channel capacity, Delay, discrete time systems, discrete-time channels, Entropy, Fading, fading channels, Frequency, Mathematical model, multipath channels, multipath fading channels, noncoherent channel model, Random variables, Signal to noise ratio, signal-to-noise ratios, SNR, statistics, Transmitters @inproceedings{Koch2008, title = {On Multipath Fading Channels at High SNR}, author = {Tobias Koch and Amos Lapidoth}, url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=4595252}, isbn = {978-1-4244-2256-2}, year = {2008}, date = {2008-01-01}, booktitle = {2008 IEEE International Symposium on Information Theory}, pages = {1572--1576}, publisher = {IEEE}, address = {Toronto}, abstract = {This paper studies the capacity of discrete-time multipath fading channels. It is assumed that the number of paths is finite, i.e., that the channel output is influenced by the present and by the L previous channel inputs. A noncoherent channel model is considered where neither transmitter nor receiver are cognizant of the fading's realization, but both are aware of its statistic. The focus is on capacity at high signal-to-noise ratios (SNR). In particular, the capacity pre-loglog-defined as the limiting ratio of the capacity to loglog(SNR) as SNR tends to infinity-is studied. It is shown that, irrespective of the number of paths L, the capacity pre-loglog is 1.}, keywords = {channel capacity, Delay, discrete time systems, discrete-time channels, Entropy, Fading, fading channels, Frequency, Mathematical model, multipath channels, multipath fading channels, noncoherent channel model, Random variables, Signal to noise ratio, signal-to-noise ratios, SNR, statistics, Transmitters}, pubstate = {published}, tppubtype = {inproceedings} } This paper studies the capacity of discrete-time multipath fading channels. It is assumed that the number of paths is finite, i.e., that the channel output is influenced by the present and by the L previous channel inputs. A noncoherent channel model is considered where neither transmitter nor receiver are cognizant of the fading's realization, but both are aware of its statistic. The focus is on capacity at high signal-to-noise ratios (SNR). In particular, the capacity pre-loglog-defined as the limiting ratio of the capacity to loglog(SNR) as SNR tends to infinity-is studied. It is shown that, irrespective of the number of paths L, the capacity pre-loglog is 1. |

Perez-Cruz, Fernando Kullback-Leibler Divergence Estimation of Continuous Distributions Inproceedings 2008 IEEE International Symposium on Information Theory, pp. 1666–1670, IEEE, Toronto, 2008, ISBN: 978-1-4244-2256-2. Abstract | Links | BibTeX | Tags: Convergence, density estimation, Density measurement, Entropy, Frequency estimation, H infinity control, information theory, k-nearest-neighbour density estimation, Kullback-Leibler divergence estimation, Machine learning, Mutual information, neuroscience, Random variables, statistical distributions, waiting-times distributions @inproceedings{Perez-Cruz2008, title = {Kullback-Leibler Divergence Estimation of Continuous Distributions}, author = {Fernando Perez-Cruz}, url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4595271}, isbn = {978-1-4244-2256-2}, year = {2008}, date = {2008-01-01}, booktitle = {2008 IEEE International Symposium on Information Theory}, pages = {1666--1670}, publisher = {IEEE}, address = {Toronto}, abstract = {We present a method for estimating the KL divergence between continuous densities and we prove it converges almost surely. Divergence estimation is typically solved estimating the densities first. Our main result shows this intermediate step is unnecessary and that the divergence can be either estimated using the empirical cdf or k-nearest-neighbour density estimation, which does not converge to the true measure for finite k. The convergence proof is based on describing the statistics of our estimator using waiting-times distributions, as the exponential or Erlang. We illustrate the proposed estimators and show how they compare to existing methods based on density estimation, and we also outline how our divergence estimators can be used for solving the two-sample problem.}, keywords = {Convergence, density estimation, Density measurement, Entropy, Frequency estimation, H infinity control, information theory, k-nearest-neighbour density estimation, Kullback-Leibler divergence estimation, Machine learning, Mutual information, neuroscience, Random variables, statistical distributions, waiting-times distributions}, pubstate = {published}, tppubtype = {inproceedings} } We present a method for estimating the KL divergence between continuous densities and we prove it converges almost surely. Divergence estimation is typically solved estimating the densities first. Our main result shows this intermediate step is unnecessary and that the divergence can be either estimated using the empirical cdf or k-nearest-neighbour density estimation, which does not converge to the true measure for finite k. The convergence proof is based on describing the statistics of our estimator using waiting-times distributions, as the exponential or Erlang. We illustrate the proposed estimators and show how they compare to existing methods based on density estimation, and we also outline how our divergence estimators can be used for solving the two-sample problem. |

Santiago-Mozos, Ricardo; Fernandez-Lorenzana, R; Perez-Cruz, Fernando; Artés-Rodríguez, Antonio On the Uncertainty in Sequential Hypothesis Testing Inproceedings 2008 5th IEEE International Symposium on Biomedical Imaging: From Nano to Macro, pp. 1223–1226, IEEE, Paris, 2008, ISBN: 978-1-4244-2002-5. Abstract | Links | BibTeX | Tags: binary hypothesis test, Biomedical imaging, Detectors, H infinity control, likelihood ratio, Medical diagnostic imaging, medical image application, medical image processing, Medical tests, patient diagnosis, Probability, Random variables, Sequential analysis, sequential hypothesis testing, sequential probability ratio test, Signal processing, Testing, tuberculosis diagnosis, Uncertainty @inproceedings{Santiago-Mozos2008, title = {On the Uncertainty in Sequential Hypothesis Testing}, author = {Ricardo Santiago-Mozos and R Fernandez-Lorenzana and Fernando Perez-Cruz and Antonio Artés-Rodríguez}, url = {http://ieeexplore.ieee.org/articleDetails.jsp?arnumber=4541223}, isbn = {978-1-4244-2002-5}, year = {2008}, date = {2008-01-01}, booktitle = {2008 5th IEEE International Symposium on Biomedical Imaging: From Nano to Macro}, pages = {1223--1226}, publisher = {IEEE}, address = {Paris}, abstract = {We consider the problem of sequential hypothesis testing when the exact pdfs are not known but instead a set of iid samples are used to describe the hypotheses. We modify the classical test by introducing a likelihood ratio interval which accommodates the uncertainty in the pdfs. The test finishes when the whole likelihood ratio interval crosses one of the thresholds and reduces to the classical test as the number of samples to describe the hypotheses tend to infinity. We illustrate the performance of this test in a medical image application related to tuberculosis diagnosis. We show in this example how the test confidence level can be accurately determined.}, keywords = {binary hypothesis test, Biomedical imaging, Detectors, H infinity control, likelihood ratio, Medical diagnostic imaging, medical image application, medical image processing, Medical tests, patient diagnosis, Probability, Random variables, Sequential analysis, sequential hypothesis testing, sequential probability ratio test, Signal processing, Testing, tuberculosis diagnosis, Uncertainty}, pubstate = {published}, tppubtype = {inproceedings} } We consider the problem of sequential hypothesis testing when the exact pdfs are not known but instead a set of iid samples are used to describe the hypotheses. We modify the classical test by introducing a likelihood ratio interval which accommodates the uncertainty in the pdfs. The test finishes when the whole likelihood ratio interval crosses one of the thresholds and reduces to the classical test as the number of samples to describe the hypotheses tend to infinity. We illustrate the performance of this test in a medical image application related to tuberculosis diagnosis. We show in this example how the test confidence level can be accurately determined. |