@inproceedings{mao-etal-2022-contrastive, title = "When do Contrastive Word Alignments Improve Many-to-many Neural Machine Translation?", author = "Mao, Zhuoyuan and Chu, Chenhui and Dabre, Raj and Song, Haiyue and Wan, Zhen and Kurohashi, Sadao", editor = "Carpuat, Marine and de Marneffe, Marie-Catherine and Meza Ruiz, Ivan Vladimir", booktitle = "Findings of the Association for Computational Linguistics: NAACL 2022", month = jul, year = "2022", address = "Seattle, United States", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2022.findings-naacl.134", doi = "10.18653/v1/2022.findings-naacl.134", pages = "1766--1775", abstract = "Word alignment has proven to benefit many-to-many neural machine translation (NMT). However, high-quality ground-truth bilingual dictionaries were used for pre-editing in previous methods, which are unavailable for most language pairs. Meanwhile, the contrastive objective can implicitly utilize automatically learned word alignment, which has not been explored in many-to-many NMT. This work proposes a word-level contrastive objective to leverage word alignments for many-to-many NMT. Empirical results show that this leads to 0.8 BLEU gains for several language pairs. Analyses reveal that in many-to-many NMT, the encoder{'}s sentence retrieval performance highly correlates with the translation quality, which explains when the proposed method impacts translation. This motivates future exploration for many-to-many NMT to improve the encoder{'}s sentence retrieval performance.", }