@inproceedings{Wang2020a, title = {A {{Comparative Study}} on {{Word Embeddings}} in {{Deep Learning}} for {{Text Classification}}}, booktitle = {Proceedings of the 4th {{International Conference}} on {{Natural Language Processing}} and {{Information Retrieval}} ({{NLPIR}} 2020)}, author = {Wang, Congcong and Lillis, David}, year = {2020}, month = {December}, address = {{Seoul, South Korea}}, doi = {10.1145/3443279.3443304}, abstract = {Word embeddings act as an important component of deep models for providing input features in downstream language tasks, such as sequence labelling and text classification. In the last decade, a substantial number of word embedding methods have been proposed for this purpose, mainly falling into the categories of classic and context-based word embeddings. In this paper, we conduct controlled experiments to systematically examine both classic and contextualised word embeddings for the purposes of text classification. To encode a sequence from word representations, we apply two encoders, namely CNN and BiLSTM, in the downstream network architecture. To study the impact of word embeddings on different datasets, we select four benchmarking classification datasets with varying average sample length, comprising both single-label and multi-label classification tasks. The evaluation results with confidence intervals indicate that CNN as the downstream encoder outperforms BiLSTM in most situations, especially for document context-insensitive datasets. This study recommends choosing CNN over BiLSTM for document classification datasets where the context in sequence is not as indicative of class membership as sentence datasets. For word embeddings, concatenation of multiple classic embeddings or increasing their size does not lead to a statistically significant difference in performance despite a slight improvement in some cases. For context-based embeddings, we studied both ELMo and BERT. The results show that BERT overall outperforms ELMo, especially for long document datasets. Compared with classic embeddings, both achieve an improved performance for short datasets while the improvement is not observed in longer datasets.}, isbn = {978-1-4503-7760-7}, }