<?xml version="1.0" encoding="UTF-8"?><xml><records><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Mickaëlla Grondin-Verdon</style></author><author><style face="normal" font="default" size="100%">Nezih Younsi</style></author><author><style face="normal" font="default" size="100%">Michele Grimaldi</style></author><author><style face="normal" font="default" size="100%">Catherine Pelachaud</style></author><author><style face="normal" font="default" size="100%">Laurence Chaby</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Induction of the being-seen-feeling by an embodied conversational agent in a socially interactive context</style></title><secondary-title><style face="normal" font="default" size="100%">21st ACM International Conference on Intelligent Virtual Agents</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2021</style></year><pub-dates><date><style  face="normal" font="default" size="100%">09/2021</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://hal.archives-ouvertes.fr/hal-03342893/document</style></url></web-urls></urls><language><style face="normal" font="default" size="100%">eng</style></language><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://hal.archives-ouvertes.fr/hal-03342893/document&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Aryel Beck</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Luisa Damiano</style></author><author><style face="normal" font="default" size="100%">Cosi, Piero</style></author><author><style face="normal" font="default" size="100%">Tesser, Fabio</style></author><author><style face="normal" font="default" size="100%">Sommavilla, Giacomo</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Interpretation of Emotional Body Language Displayed by a Humanoid Robot: A Case Study with Children</style></title><secondary-title><style face="normal" font="default" size="100%">International Journal of Social Robotics</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">emotion</style></keyword><keyword><style  face="normal" font="default" size="100%">emotional body language</style></keyword><keyword><style  face="normal" font="default" size="100%">perception</style></keyword><keyword><style  face="normal" font="default" size="100%">Social robotics</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2013</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://link.springer.com/article/10.1007/s12369-013-0193-z</style></url></web-urls></urls><volume><style face="normal" font="default" size="100%">5</style></volume><pages><style face="normal" font="default" size="100%">325–334</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">The work reported in this paper focuses on giving humanoid robots the capacity to express emotions with their body. Previous results show that adults are able to interpret different key poses displayed by a humanoid robot and also that changing the head position affects the expressiveness of the key poses in a consistent way. Moving the head down leads to decreased arousal (the level of energy) and valence (positive or negative emotion) whereas moving the head up produces an increase along these dimensions. Hence, changing the head position during an interaction should send intuitive signals. The study reported in this paper tested children’s ability to recognize the emotional body language displayed by a humanoid robot. The results suggest that body postures and head position can be used to convey emotions during child-robot interaction.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://link.springer.com/article/10.1007/s12369-013-0193-z&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Aryel Beck</style></author><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Alexandre Mazel</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Interpretation of Emotional Body Language Displayed by Robots</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 3rd International Workshop on Affective Interaction in Natural Environments, AFFINE'10</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2010</style></year></dates><publisher><style face="normal" font="default" size="100%">ACM</style></publisher><pub-location><style face="normal" font="default" size="100%">Firenze, Italy</style></pub-location><pages><style face="normal" font="default" size="100%">37–42</style></pages><isbn><style face="normal" font="default" size="100%">978-1-4503-0170-1</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In order for robots to be socially accepted and generate empathy they must display emotions. For robots such as Nao, body language is the best medium available, as they do not have the ability to display facial expressions. Displaying emotional body language that can be interpreted whilst interacting with the robot should greatly improve its acceptance. This research investigates the creation of an &quot;Affect Space&quot; for the generation of emotional body language that could be displayed by robots. An Affect Space is generated by &quot;blending&quot; (i.e. interpolating between) different emotional expressions to create new ones. An Affect Space for body language based on the Circumplex Model of emotions has been created. The experiment reported in this paper investigated the perception of specific key poses from the Affect Space. The results suggest that this Affect Space for body expressions can be used to improve the expressiveness of humanoid robots. In addition, early results of a pilot study are described. It revealed that the context helps human subjects improve their recognition rate during a human-robot imitation game, and in turn this recognition leads to better outcome of the interactions.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">O'Bryne, Claire</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">John C Murray</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">The Importance of the Body in Affect-Modulated Action Selection: A Case Study Comparing Proximal Versus Distal Perception in a Prey-Predator Scenario</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 3rd Intl. Conference on Affective Computing and Intelligent Interaction (ACII 2009)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">09/2009</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Amsterdam, The Netherlands</style></pub-location><pages><style face="normal" font="default" size="100%">1–6</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In the context of the animat approach, we investigate the effect of an emotion-like hormonal mechanism, as a modulator of perception - and second order controller to an underlying motivation-based action selection architecture - on brain-body-environment interactions within a prey-predator scenario. We are particularly interested in the effects that affective modulation of different perceptual capabilities has on the dynamics of interactions between predator and prey, as part of a broader study of the adaptive value of emotional states such as &quot;fear&quot; and &quot;aggression&quot; in the context of action selection. In this paper we present experiments where we modulated the architecture of a prey robot using two different types of sensory capabilities, proximal and distal, effectively creating combinations of different prey &quot;brains&quot; and &quot;bodies&quot;.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">John C Murray</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Kim A. Bard</style></author><author><style face="normal" font="default" size="100%">Ross, Marina Davila</style></author><author><style face="normal" font="default" size="100%">Thorsteinsson, Kate</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Kim, Jong-Hwan</style></author><author><style face="normal" font="default" size="100%">Ge, Shuzhi Sam</style></author><author><style face="normal" font="default" size="100%">Vadakkepat, Prahlad</style></author><author><style face="normal" font="default" size="100%">Jesse, Norbert</style></author><author><style face="normal" font="default" size="100%">Al Manum, Abdullah</style></author><author><style face="normal" font="default" size="100%">Puthusserypady K, Sadasivan</style></author><author><style face="normal" font="default" size="100%">Rückert, Ulrich</style></author><author><style face="normal" font="default" size="100%">Sitte, Joaquin</style></author><author><style face="normal" font="default" size="100%">Witkowski, Ulf</style></author><author><style face="normal" font="default" size="100%">Nakatsu, Ryohei</style></author><author><style face="normal" font="default" size="100%">Braunl, Thomas</style></author><author><style face="normal" font="default" size="100%">Baltes, Jacky</style></author><author><style face="normal" font="default" size="100%">Anderson, John</style></author><author><style face="normal" font="default" size="100%">Wong, Ching-Chang</style></author><author><style face="normal" font="default" size="100%">Verner, Igor</style></author><author><style face="normal" font="default" size="100%">Ahlgren, David</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">The Influence of Social Interaction on the Perception of Emotional Expression: A Case Study with a Robot Head</style></title><secondary-title><style face="normal" font="default" size="100%">Advances in Robotics: Proc. FIRA RoboWorld Congress 2009</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lecture Notes in Computer Science</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">08/2009</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://link.springer.com/chapter/10.1007%2F978-3-642-03983-6_10</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Springer Berlin Heidelberg</style></publisher><pub-location><style face="normal" font="default" size="100%">Incheon, Korea</style></pub-location><volume><style face="normal" font="default" size="100%">5744</style></volume><pages><style face="normal" font="default" size="100%">63–72</style></pages><isbn><style face="normal" font="default" size="100%">978-3-642-03983-6</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper we focus primarily on the influence that socio-emotional interaction has on the perception of emotional expression by a robot. We also investigate and discuss the importance of emotion expression in socially interactive situations involving human robot interaction (HRI), and show the importance of utilising emotion expression when dealing with interactive robots, that are to learn and develop in socially situated environments. We discuss early expressional development and the function of emotion in communication in humans and how this can improve HRI communications. Finally we provide experimental results showing how emotion-rich interaction via emotion expression can affect the HRI process by providing additional information.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">French, Richard L B</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Introducing Neuromodulation to a Braitenberg Vehicle</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 2005 IEEE Int. Conf. on Robotics and Automation: Robots get Closer to Humans (ICRA'05)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2005</style></year><pub-dates><date><style  face="normal" font="default" size="100%">04/2005</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://ieeexplore.ieee.org/abstract/document/1570763/</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Barcelona, Spain</style></pub-location><pages><style face="normal" font="default" size="100%">4199–4204</style></pages><isbn><style face="normal" font="default" size="100%">0-7803-8914-X</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Artificial neural networks are often used as the control systems for mobile robots. However, although these models usually claim inspiration from biology, they often lack an analogue of the biological phenomenon called neuromodulation. In this paper, we describe our initial work exploring a simple model of neuromodulation, used to provide a mobile robot with foraging behaviour.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cortés, Ulises</style></author><author><style face="normal" font="default" size="100%">Annicchiarico, Roberta</style></author><author><style face="normal" font="default" size="100%">Campana, Fabio</style></author><author><style face="normal" font="default" size="100%">Vázquez-Salceda, Javier</style></author><author><style face="normal" font="default" size="100%">Urdiales, Cristina</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Maite López</style></author><author><style face="normal" font="default" size="100%">Miquel Sànchez-Marrè</style></author><author><style face="normal" font="default" size="100%">Di Vincenzo, Sarah</style></author><author><style face="normal" font="default" size="100%">Carlo Caltagirone</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Intelligenza artificiale in medicina: progetto di una piattaforma mobile inserita in un ambiente intelligente per l'assistenza ai disabili e agli anziani</style></title><secondary-title><style face="normal" font="default" size="100%">Recenti Progressi in Medicina</style></secondary-title><translated-title><style face="normal" font="default" size="100%">Artificial intelligence in medicine: project of a mobile platform in an intelligent environment for the care of disabled and elderly people</style></translated-title></titles><dates><year><style  face="normal" font="default" size="100%">2004</style></year></dates><publisher><style face="normal" font="default" size="100%">Pensiero scientifico</style></publisher><volume><style face="normal" font="default" size="100%">95</style></volume><pages><style face="normal" font="default" size="100%">190–195</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Viene presentato un progetto basato sull'integrazione di nuove tecnologie e di Intelligenza artificiale per sviluppare uno strumento – e-tool – indirizzato alle persone disabili ed agli anziani. Una piattaforma mobile inserita all'interno di ambienti intelligenti (strutture di assistenza o abitazioni), controllata e gestita attraverso un'architettura multilivello, viene proposta come supporto sia per i pazienti che per i caregiver al fine di aumentare l'autonomia nella vita quotidiana.

A project based on the integration of new technologies and artificial intelligence to develop a device – e-tool – for disabled patients and elderly people is presented. A mobile platform in intelligent environments (skilled-care facilities and home-care), controlled and managed by a multi-level architecture, is proposed to support patients and caregivers to increase self-dependency in activities of daily living.</style></abstract><issue><style face="normal" font="default" size="100%">4</style></issue></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cañamero, Lola D</style></author><author><style face="normal" font="default" size="100%">Fredslund, Jakob</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">I Show You How I Like You—Can You Read it in My Face?</style></title><secondary-title><style face="normal" font="default" size="100%">IEEE Transactions on Systems, Man and Cybernetics, Part A: Systems and Humans</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2001</style></year><pub-dates><date><style  face="normal" font="default" size="100%">09/2001</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://ieeexplore.ieee.org/document/952719/</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><volume><style face="normal" font="default" size="100%">31</style></volume><pages><style face="normal" font="default" size="100%">454–459</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We report work on a LEGO robot that displays different emotional expressions in response to physical stimulation, for the purpose of social interaction with humans. This is a first step toward our longer-term goal of exploring believable emotional exchanges to achieve plausible interaction with a simple robot. Drawing inspiration from theories of human basic emotions, we have implemented several prototypical expressions in the robot’s caricaturized face and conducted experiments to assess the recognizability of these expressions.</style></abstract><issue><style face="normal" font="default" size="100%">5</style></issue><accession-num><style face="normal" font="default" size="100%">7064042</style></accession-num></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>27</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cañamero, Lola D</style></author><author><style face="normal" font="default" size="100%">Fredslund, Jakob</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">I Show You How I Like You: Human-Robot Interaction through Emotional Expression and Tactile Stimulation</style></title><secondary-title><style face="normal" font="default" size="100%">Dept. of Computer Science Technical Report DAIMI PB 544</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2000</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://ojs.statsbiblioteket.dk/index.php/daimipb/article/view/7078</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">University of Aarhus, Denmark</style></publisher><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We report work on a LEGO robot capable of displaying several emotional expressions in response to physical contact. Our motivation has been to explore believable emotional exchanges to achieve plausible interaction with a simple robot. We have worked toward this goal in two ways. First, acknowledging the importance of physical manipulation in children's interactions, interaction with the robot is through tactile stimulation; the various kinds of stimulation that can elicit the robot's emotions are grounded in a model of emotion activation based on different stimulation patterns. Second, emotional states need to be clearly conveyed. We have drawn inspiration from theories of human basic emotions with associated universal facial expressions, which we have implemented in a caricaturized face. We have conducted experiments on both children and adults to assess the recognizability of these expressions.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">D Cañamero</style></author><author><style face="normal" font="default" size="100%">Josep Lluís Arcos</style></author><author><style face="normal" font="default" size="100%">Ramon López de Mántaras</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Imitating Human Performances to Automatically Generate Expressive Jazz Ballads</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. AISB'99 Symposium on Imitation in Animals and Artifacts</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">1999</style></year></dates><publisher><style face="normal" font="default" size="100%">AISB</style></publisher><pub-location><style face="normal" font="default" size="100%">Edinburgh, Scotland</style></pub-location><pages><style face="normal" font="default" size="100%">115–20</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">One of the main problems with the automatic generation of expressive musical performances is to grasp the way in which human performers use musical knowledge that is not explicitly noted in musical scores. Moreover, this knowledge is tacit, difficult to verbalize, and therefore it must be acquired through a process of observation, imitation, and experimentation. For this reason, AI approaches based on declarative knowledge representations have serious limitations. An alternative approach is that of directly using the implicit knowledge that is in examples from recordings of human performances. In this paper, we describe a case-based reasoning system that generates expressive musical performances imitating examples of expressive human performances.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">D Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">D Cañamero</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Issues in the Design of Emotional Agents</style></title><secondary-title><style face="normal" font="default" size="100%">Emotional and Intelligent: The Tangled Knot of Cognition. Papers from the 1998 AAAI Fall Symposium</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">1998</style></year></dates><publisher><style face="normal" font="default" size="100%">AAAI Press</style></publisher><pages><style face="normal" font="default" size="100%">49–54</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record></records></xml>