2023
Moreno-Pino, Fernando; Olmos, Pablo M; Artés-Rodríguez, Antonio
Deep Autoregressive Models with Spectral Attention Artículo de revista
En: Pattern Recognition, pp. 109014, 2023, ISSN: 0031-3203.
Resumen | Enlaces | BibTeX | Etiquetas: Attention models, Deep learning, Filtering, global-local contexts, Signal processing, spectral domain attention, time series forecasting
@article{MORENOPINO2022109014,
title = {Deep Autoregressive Models with Spectral Attention},
author = {Fernando Moreno-Pino and Pablo M Olmos and Antonio Art\'{e}s-Rodr\'{i}guez},
url = {https://www.sciencedirect.com/science/article/pii/S0031320322004940},
doi = {https://doi.org/10.1016/j.patcog.2022.109014},
issn = {0031-3203},
year = {2023},
date = {2023-01-01},
urldate = {2022-01-01},
journal = {Pattern Recognition},
pages = {109014},
abstract = {Time series forecasting is an important problem across many domains, playing a crucial role in multiple real-world applications. In this paper, we propose a forecasting architecture that combines deep autoregressive models with a Spectral Attention (SA) module, which merges global and local frequency domain information in the model’s embedded space. By characterizing in the spectral domain the embedding of the time series as occurrences of a random process, our method can identify global trends and seasonality patterns. Two spectral attention models, global and local to the time series, integrate this information within the forecast and perform spectral filtering to remove time series’s noise. The proposed architecture has a number of useful properties: it can be effectively incorporated into well-known forecast architectures, requiring a low number of parameters and producing explainable results that improve forecasting accuracy. We test the Spectral Attention Autoregressive Model (SAAM) on several well-known forecast datasets, consistently demonstrating that our model compares favorably to state-of-the-art approaches.},
keywords = {Attention models, Deep learning, Filtering, global-local contexts, Signal processing, spectral domain attention, time series forecasting},
pubstate = {published},
tppubtype = {article}
}
Aguilera, Aurora Cobo; Olmos, Pablo M; Artés-Rodríguez, Antonio; Pérez-Cruz, Fernando
Regularizing transformers with deep probabilistic layers Artículo de revista
En: Neural Networks, 2023, ISSN: 0893-6080.
Resumen | Enlaces | BibTeX | Etiquetas: Deep learning, Missing data, Natural language processing, Regularization, Transformers, Variational auto-encoder
@article{AGUILERA2023,
title = {Regularizing transformers with deep probabilistic layers},
author = {Aurora Cobo Aguilera and Pablo M Olmos and Antonio Art\'{e}s-Rodr\'{i}guez and Fernando P\'{e}rez-Cruz},
url = {https://www.sciencedirect.com/science/article/pii/S0893608023000448},
doi = {https://doi.org/10.1016/j.neunet.2023.01.032},
issn = {0893-6080},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {Neural Networks},
abstract = {Language models (LM) have grown non-stop in the last decade, from sequence-to-sequence architectures to attention-based Transformers. However, regularization is not deeply studied in those structures. In this work, we use a Gaussian Mixture Variational Autoencoder (GMVAE) as a regularizer layer. We study its advantages regarding the depth where it is placed and prove its effectiveness in several scenarios. Experimental result demonstrates that the inclusion of deep generative models within Transformer-based architectures such as BERT, RoBERTa, or XLM-R can bring more versatile models, able to generalize better and achieve improved imputation score in tasks such as SST-2 and TREC or even impute missing/noisy words with richer text.},
keywords = {Deep learning, Missing data, Natural language processing, Regularization, Transformers, Variational auto-encoder},
pubstate = {published},
tppubtype = {article}
}
2019
Peis, Ignacio; Olmos, Pablo M; Vera-Varela, Constanza; Barrigón, María Luisa; Courtet, Philippe; Baca-García, Enrique; Artes-Rodríguez, Antonio
Deep Sequential Models for Suicidal Ideation From Multiple Source Data Artículo de revista
En: IEEE Journal of Biomedical and Health Informatics, vol. 23, no 6, pp. 2286 - 2293, 2019.
Enlaces | BibTeX | Etiquetas: attention, Deep learning, EMA, RNN, Suicide
@article{AArtes19,
title = {Deep Sequential Models for Suicidal Ideation From Multiple Source Data},
author = {Ignacio Peis and Pablo M Olmos and Constanza Vera-Varela and Mar\'{i}a Luisa Barrig\'{o}n and Philippe Courtet and Enrique Baca-Garc\'{i}a and Antonio Artes-Rodr\'{i}guez},
doi = {10.1109/JBHI.2019.2919270},
year = {2019},
date = {2019-05-27},
journal = {IEEE Journal of Biomedical and Health Informatics},
volume = {23},
number = {6},
pages = {2286 - 2293},
keywords = {attention, Deep learning, EMA, RNN, Suicide},
pubstate = {published},
tppubtype = {article}
}