<?xml version="1.0" encoding="UTF-8"?><xml><records><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Mickaëlla Grondin-Verdon</style></author><author><style face="normal" font="default" size="100%">Nezih Younsi</style></author><author><style face="normal" font="default" size="100%">Michele Grimaldi</style></author><author><style face="normal" font="default" size="100%">Catherine Pelachaud</style></author><author><style face="normal" font="default" size="100%">Laurence Chaby</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Induction of the being-seen-feeling by an embodied conversational agent in a socially interactive context</style></title><secondary-title><style face="normal" font="default" size="100%">21st ACM International Conference on Intelligent Virtual Agents</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2021</style></year><pub-dates><date><style  face="normal" font="default" size="100%">09/2021</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://hal.archives-ouvertes.fr/hal-03342893/document</style></url></web-urls></urls><language><style face="normal" font="default" size="100%">eng</style></language><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://hal.archives-ouvertes.fr/hal-03342893/document&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Wang, Weiyi</style></author><author><style face="normal" font="default" size="100%">Athanasopoulos, Georgios</style></author><author><style face="normal" font="default" size="100%">Yilmazyildiz, Selma</style></author><author><style face="normal" font="default" size="100%">Patsis, Georgios</style></author><author><style face="normal" font="default" size="100%">Valentin Enescu</style></author><author><style face="normal" font="default" size="100%">Hichem Sahli</style></author><author><style face="normal" font="default" size="100%">Verhelst, Werner</style></author><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Natural Emotion Elicitation for Emotion Modeling in Child-Robot Interactions</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 4th Workshop on Child Computer Interaction (WOCCI 2014)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2014</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.isca-speech.org/archive/wocci_2014/wc14_051.html</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">ICSA</style></publisher><pub-location><style face="normal" font="default" size="100%">Singapore</style></pub-location><pages><style face="normal" font="default" size="100%">51–56</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Obtaining spontaneous emotional expressions is the very first and vital step in affective computing studies, for both psychologists and computer scientists. However, it is quite challenging to record them in real life, especially when certain modalities are required (e.g.  3D representation of the body).  Traditional elicitation and capturing protocols either introduce the awareness of the recording, which may impair the naturalness of the behaviors, or cause too much information loss.  In this paper, we  present  natural  emotion  elicitation  and  recording  experiments, which were set in child-robot interaction scenarios. Several state-of-the-art technologies were employed to acquire the multi-modal expressive data that will be further used for emotion modeling and recognition studies. The obtained recordings exhibit the expected emotional expressions.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://www.isca-speech.org/archive/wocci_2014/wc14_051.html&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Arnaud J Blanchard</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Luc Berthouze</style></author><author><style face="normal" font="default" size="100%">Frédéric Kaplan</style></author><author><style face="normal" font="default" size="100%">Hideki Kozima</style></author><author><style face="normal" font="default" size="100%">Hiroyuki Yano</style></author><author><style face="normal" font="default" size="100%">Jürgen Konczak</style></author><author><style face="normal" font="default" size="100%">Giorgio Metta</style></author><author><style face="normal" font="default" size="100%">Jacqueline Nadel</style></author><author><style face="normal" font="default" size="100%">Giulio Sandini</style></author><author><style face="normal" font="default" size="100%">Georgi Stojanov</style></author><author><style face="normal" font="default" size="100%">Christian Balkenius</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">From Imprinting to Adaptation: Building a History of Affective Interaction</style></title><secondary-title><style face="normal" font="default" size="100%">Fifth International Workshop on Epigenetic Robotics: Modeling Cognitive Development in Robotic Systems (EpiRob2005)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2005</style></year></dates><publisher><style face="normal" font="default" size="100%">Lund University Cognitive Studies</style></publisher><pages><style face="normal" font="default" size="100%">23–30</style></pages><isbn><style face="normal" font="default" size="100%">91-974741-4-2</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We present a Perception-Action architecture and experiments to simulate imprinting—the establishment of strong attachment links with a &quot;caregiver&quot;—in a robot. Following recent theories, we do not consider imprinting as rigidly timed and irreversible, but as a more flexible phenomenon that allows for further adaptation as a result of reward-based learning through experience. Our architecture reconciles these two types of perceptual learning traditionally considered as different and even incompatible. After the initial imprinting, adaptation is achieved in the context of a history of &quot;affective&quot; interactions between the robot and a human, driven by &quot;distress&quot; and &quot;comfort&quot; responses in the robot.</style></abstract></record></records></xml>