@conference{multimodal-gsi-conference-2017, author = "S{\'a}nchez-Rada, J. Fernando and Iglesias, Carlos A. and Hesam Sagha and Bj{\"o}rn Schuller and Ian Wood and Paul Buitelaar", abstract = "The lack of a standard emotion representation model hinders emotion analysis due to the incompatibility of annota-tion formats and models from different sources, tools and an- notation services. This is also a limiting factor for multimodal analysis, since recognition services from different modalities (audio, video, text) tend to have different representation models (e. g., continuous vs. discrete emotions). This work presents a multi-disciplinary effort to alleviate this problem by formalizing conversion between emotion models. The specific contributions are: i) a semantic representation of emotion conversion; ii) an API proposal for services that perform automatic conversion; iii) a reference implementation of such a service; and iv) validation of the proposal through use cases that integrate different emotion models and service providers.", address = "San Antonio, Texas, USA", booktitle = "Proceedings of ACII 2017", keywords = "emotion analysis;linked data;social networks", month = "October", title = "{M}ultimodal {M}ultimodel {E}motion {A}nalysis as {L}inked {D}ata", year = "2017", }