<?xml version="1.0" encoding="UTF-8"?><xml><records><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Markelius, A.</style></author><author><style face="normal" font="default" size="100%">Sjöberg, S.</style></author><author><style face="normal" font="default" size="100%">Lemhaouri, Z.</style></author><author><style face="normal" font="default" size="100%">Cohen, L.</style></author><author><style face="normal" font="default" size="100%">Lowe, R.</style></author><author><style face="normal" font="default" size="100%">Cañamero, L.</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Abdulaziz Al Ali</style></author><author><style face="normal" font="default" size="100%">Nader Meskin</style></author><author><style face="normal" font="default" size="100%">Wanyue Jiang</style></author><author><style face="normal" font="default" size="100%">Shuzhi Sam Ge</style></author><author><style face="normal" font="default" size="100%">John-John Cabibihan</style></author><author><style face="normal" font="default" size="100%">Silvia Rossi</style></author><author><style face="normal" font="default" size="100%">Hongsheng He</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">A Human-Robot Mutual Learning System with Affect-Grounded Language Acquisition and Differential Outcomes Training</style></title><secondary-title><style face="normal" font="default" size="100%">Social Robotics. 15th International Conference, ICSR 2023, Proceedings Part II</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2024</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://doi.org/10.1007/978-981-99-8718-4</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Springer</style></publisher><pub-location><style face="normal" font="default" size="100%">Doha, Qatar, December 3–7, 2023</style></pub-location><volume><style face="normal" font="default" size="100%">LNAI 14454</style></volume><pages><style face="normal" font="default" size="100%">108–122</style></pages><isbn><style face="normal" font="default" size="100%">978-981-99-8717-7</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>10</ref-type><contributors><secondary-authors><author><style face="normal" font="default" size="100%">L. Cañamero</style></author><author><style face="normal" font="default" size="100%">Philippe Gaussier</style></author><author><style face="normal" font="default" size="100%">M. Wilson</style></author><author><style face="normal" font="default" size="100%">Sofiane Boucenna</style></author><author><style face="normal" font="default" size="100%">N. Cuperlier</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">From Animals to Animats 16. Proceedings 16th International Conference on Simulation of Adaptive Behavior, SAB 2022</style></title><secondary-title><style face="normal" font="default" size="100%">From Animals to Animats 16. Proceedings 16th International Conference on Simulation of Adaptive Behavior, SAB 2022</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2022</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://doi.org/10.1007/978-3-031-16770-6</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Springer, LNAI, LNCS </style></publisher><pub-location><style face="normal" font="default" size="100%">CY Cergy Paris University, Cergy-Pontoise, France, September 20–23, 2022</style></pub-location><volume><style face="normal" font="default" size="100%">volume 13499</style></volume><isbn><style face="normal" font="default" size="100%">978-3-031-16769-0</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cañamero, L.</style></author><author><style face="normal" font="default" size="100%">Philippe Gaussier</style></author><author><style face="normal" font="default" size="100%">Wilson, M.</style></author><author><style face="normal" font="default" size="100%">Sofiane Boucenna</style></author><author><style face="normal" font="default" size="100%">Cuperlier, N.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Preface</style></title><secondary-title><style face="normal" font="default" size="100%">From Animals to Animats 16. Proceedings 16th International Conference on Simulation of Adaptive Behavior, SAB 2022</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2022</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://doi.org/10.1007/978-3-031-16770-6</style></url></web-urls></urls><number><style face="normal" font="default" size="100%">LNAI, LNCS, volume 13499</style></number><publisher><style face="normal" font="default" size="100%">Springer, LNAI, LNCS</style></publisher><pages><style face="normal" font="default" size="100%">v - x</style></pages><isbn><style face="normal" font="default" size="100%">978-3-031-16769-0</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Mickaëlla Grondin-Verdon</style></author><author><style face="normal" font="default" size="100%">Nezih Younsi</style></author><author><style face="normal" font="default" size="100%">Michele Grimaldi</style></author><author><style face="normal" font="default" size="100%">Catherine Pelachaud</style></author><author><style face="normal" font="default" size="100%">Laurence Chaby</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Induction of the being-seen-feeling by an embodied conversational agent in a socially interactive context</style></title><secondary-title><style face="normal" font="default" size="100%">21st ACM International Conference on Intelligent Virtual Agents</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2021</style></year><pub-dates><date><style  face="normal" font="default" size="100%">09/2021</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://hal.archives-ouvertes.fr/hal-03342893/document</style></url></web-urls></urls><language><style face="normal" font="default" size="100%">eng</style></language><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://hal.archives-ouvertes.fr/hal-03342893/document&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Hickton, Luke</style></author><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Gao, Yang</style></author><author><style face="normal" font="default" size="100%">Fallah, Saber</style></author><author><style face="normal" font="default" size="100%">Jin, Yaochu</style></author><author><style face="normal" font="default" size="100%">Lekakou, Constantina</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">A Flexible Component-Based Robot Control Architecture for Hormonal Modulation of Behaviour and Affect</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. Towards Autonomous Robotic Systems 18th Annual Conference, TAROS 2017</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">LNCS</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2017</style></year><pub-dates><date><style  face="normal" font="default" size="100%">07/2017</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://link.springer.com/chapter/10.1007/978-3-319-64107-2_36</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Springer International</style></publisher><pub-location><style face="normal" font="default" size="100%">Guildford, UK</style></pub-location><volume><style face="normal" font="default" size="100%">10454</style></volume><pages><style face="normal" font="default" size="100%">464–474</style></pages><isbn><style face="normal" font="default" size="100%">978-3-319-64106-5</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper we present the foundations of an architecture that will support the wider context of our work, which is to explore the link between affect, perception and behaviour from an embodied perspective and assess their relevance to Human Robot Interaction (HRI). Our approach builds upon existing affect-based architectures by combining artificial hormones with discrete abstract components that are designed with the explicit consideration of influencing, and being receptive to, the wider affective state of the robot.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://link.springer.com/chapter/10.1007/978-3-319-64107-2_36&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Ignasi Cos</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Gillian M Hayes</style></author><author><style face="normal" font="default" size="100%">Gillies, Andrew</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Hedonic Value: Enhancing Adaptation for Motivated Agents</style></title><secondary-title><style face="normal" font="default" size="100%">Adaptive Behavior</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">Actor-Critic</style></keyword><keyword><style  face="normal" font="default" size="100%">Grounding</style></keyword><keyword><style  face="normal" font="default" size="100%">Hedonic Value</style></keyword><keyword><style  face="normal" font="default" size="100%">Motivation</style></keyword><keyword><style  face="normal" font="default" size="100%">Reinforcement Learning</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2013</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://journals.sagepub.com/doi/10.1177/1059712313486817</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">SAGE</style></publisher><volume><style face="normal" font="default" size="100%">21</style></volume><pages><style face="normal" font="default" size="100%">465–483</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Reinforcement learning (RL) in the context of artificial agents is typically used to produce behavioural responses as a function of the reward obtained by interaction with the environment. When the problem consists of learning the shortest path to a goal, it is common to use reward functions yielding a fixed value after each decision, for example a positive value if the target location has been attained and a negative one at each intermediate step. However, this fixed strategy may be overly simplistic for agents to adapt to dynamic environments, in which resources may vary from time to time. By contrast, there is significant evidence that most living beings internally modulate reward value as a function of their context to expand their range of adaptivity. Inspired by the potential of this operation, we present a review of its underlying processes and we introduce a simplified formalisation for artificial agents. The performance of this formalism is tested by monitoring the adaptation of an agent endowed with a model of motivated actor-critic, embedded with our formalisation of value and constrained by physiological stability, to environments with different resource distribution. Our main result shows that the manner in which reward is internally processed as a function of the agent’s motivational state, strongly influences adaptivity of the behavioural cycles generated and the agent’s physiological stability.</style></abstract><issue><style face="normal" font="default" size="100%">6</style></issue><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://journals.sagepub.com/doi/10.1177/1059712313486817&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Luisa Damiano</style></author><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Tom Lenaerts</style></author><author><style face="normal" font="default" size="100%">Mario Giacobini</style></author><author><style face="normal" font="default" size="100%">Hugues Bersini</style></author><author><style face="normal" font="default" size="100%">Paul Bourgine</style></author><author><style face="normal" font="default" size="100%">Marco Dorigo</style></author><author><style face="normal" font="default" size="100%">René Doursat</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Grounding Synthetic Knowledge: An Epistemological Framework and Criteria of Relevance for the Scientific Exploration of Life, Affect and Social Cognition</style></title><secondary-title><style face="normal" font="default" size="100%">Advances In Artificial Life, ECAL 2011 (Proc. 11th European Conference on Artificial Life)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2011</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://mitpress-request.mit.edu/sites/default/files/titles/alife/0262297140chap33.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">MIT Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Paris, France</style></pub-location><pages><style face="normal" font="default" size="100%">200–207</style></pages><isbn><style face="normal" font="default" size="100%">978-0-262-29714-1</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In what ways can artificial life contribute to the scientific exploration of cognitive, affective and social processes? In what sense can synthetic models be relevant for the advancement of behavioral and cognitive sciences? This article addresses these questions by way of a case study — an interdisciplinary cooperation between developmental robotics and developmental psychology in the exploration of attachment bonds. Its main aim is to show how the synthetic study of cognition, as well as the synthetic study of life, can find in autopoietic cognitive biology more than a theory useful to inspire the synthetic modelling of the processes under inquiry. We argue that autopoiesis offers, not only to artificial life, but also to the behavioural and social sciences, an epistemological framework able to generate general criteria of relevance for synthetic models of living and cognitive processes. By “criteria of relevance” we mean criteria (a) valuable for the three main branches of artificial life (soft, hard, and wet) and (b) useful for determining the significance of the models each branch produces for the scientific exploration of life and cognition. On the basis of these criteria and their application to the case study presented, this article defines a range of different ways that synthetic, and particularly autopoiesis-based models, can be relevant to the inquiries of biological, behavioural and cognitive sciences.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://mitpress-request.mit.edu/sites/default/files/titles/alife/0262297140chap33.pdf&quot;&gt;Download&lt;/a&gt; (PDF)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Philippe Gaussier</style></author><author><style face="normal" font="default" size="100%">C Hasson</style></author><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Catherine Pelachaud</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Emotion et cognition: les robots comme outils et modèles</style></title><secondary-title><style face="normal" font="default" size="100%">Systèmes d'interaction émotionnelle</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2010</style></year></dates><publisher><style face="normal" font="default" size="100%">Lavoisier Hermes Science</style></publisher><pub-location><style face="normal" font="default" size="100%">Paris, France</style></pub-location><isbn><style face="normal" font="default" size="100%">978-2-7462-2115-4</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><section><style face="normal" font="default" size="100%">9</style></section></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Peirre Andry</style></author><author><style face="normal" font="default" size="100%">Arnaud J Blanchard</style></author><author><style face="normal" font="default" size="100%">Philippe Gaussier</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Shuzhi Sam Ge</style></author><author><style face="normal" font="default" size="100%">Haizhou Li</style></author><author><style face="normal" font="default" size="100%">John-John Cabibihan</style></author><author><style face="normal" font="default" size="100%">Yeow Kee Tan</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Using the Interaction Rhythm as a Natural Reinforcement Signal for Social Robots: A Matter of Belief</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. International Conference on Social Robotics, ICSR 2010</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lecture Notes in Computer Science</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2010</style></year></dates><publisher><style face="normal" font="default" size="100%">Springer</style></publisher><pub-location><style face="normal" font="default" size="100%">Singapore</style></pub-location><volume><style face="normal" font="default" size="100%">6414</style></volume><pages><style face="normal" font="default" size="100%">81–89</style></pages><isbn><style face="normal" font="default" size="100%">978-3-642-17247-2</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper, we present the results of a pilot study of a human robot interaction experiment where the rhythm of the interaction is used as a reinforcement signal to learn sensorimotor associations. The algorithm uses breaks and variations in the rhythm at which the human is producing actions. The concept is based on the hypothesis that a constant rhythm is an intrinsic property of a positive interaction whereas a break reflects a negative event. Subjects from various backgrounds interacted with a NAO robot where they had to teach the robot to mirror their actions by learning the correct sensorimotor associations. The results show that in order for the rhythm to be a useful reinforcement signal, the subjects have to be convinced that the robot is an agent with which they can act naturally, using their voice and facial expressions as cues to help it understand the correct behaviour to learn. When the subjects do behave naturally, the rhythm and its variations truly reflects how well the interaction is going and helps the robot learn efficiently. These results mean that non-expert users can interact naturally and fruitfully with an autonomous robot if the interaction is believed to be natural, without any technical knowledge of the cognitive capacities of the robot.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">John C Murray</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Kim A. Bard</style></author><author><style face="normal" font="default" size="100%">Ross, Marina Davila</style></author><author><style face="normal" font="default" size="100%">Thorsteinsson, Kate</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Kim, Jong-Hwan</style></author><author><style face="normal" font="default" size="100%">Ge, Shuzhi Sam</style></author><author><style face="normal" font="default" size="100%">Vadakkepat, Prahlad</style></author><author><style face="normal" font="default" size="100%">Jesse, Norbert</style></author><author><style face="normal" font="default" size="100%">Al Manum, Abdullah</style></author><author><style face="normal" font="default" size="100%">Puthusserypady K, Sadasivan</style></author><author><style face="normal" font="default" size="100%">Rückert, Ulrich</style></author><author><style face="normal" font="default" size="100%">Sitte, Joaquin</style></author><author><style face="normal" font="default" size="100%">Witkowski, Ulf</style></author><author><style face="normal" font="default" size="100%">Nakatsu, Ryohei</style></author><author><style face="normal" font="default" size="100%">Braunl, Thomas</style></author><author><style face="normal" font="default" size="100%">Baltes, Jacky</style></author><author><style face="normal" font="default" size="100%">Anderson, John</style></author><author><style face="normal" font="default" size="100%">Wong, Ching-Chang</style></author><author><style face="normal" font="default" size="100%">Verner, Igor</style></author><author><style face="normal" font="default" size="100%">Ahlgren, David</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">The Influence of Social Interaction on the Perception of Emotional Expression: A Case Study with a Robot Head</style></title><secondary-title><style face="normal" font="default" size="100%">Advances in Robotics: Proc. FIRA RoboWorld Congress 2009</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lecture Notes in Computer Science</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">08/2009</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://link.springer.com/chapter/10.1007%2F978-3-642-03983-6_10</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Springer Berlin Heidelberg</style></publisher><pub-location><style face="normal" font="default" size="100%">Incheon, Korea</style></pub-location><volume><style face="normal" font="default" size="100%">5744</style></volume><pages><style face="normal" font="default" size="100%">63–72</style></pages><isbn><style face="normal" font="default" size="100%">978-3-642-03983-6</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper we focus primarily on the influence that socio-emotional interaction has on the perception of emotional expression by a robot. We also investigate and discuss the importance of emotion expression in socially interactive situations involving human robot interaction (HRI), and show the importance of utilising emotion expression when dealing with interactive robots, that are to learn and develop in socially situated environments. We discuss early expressional development and the function of emotion in communication in humans and how this can improve HRI communications. Finally we provide experimental results showing how emotion-rich interaction via emotion expression can affect the HRI process by providing additional information.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>19</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">M Simon</style></author><author><style face="normal" font="default" size="100%">P Canet</style></author><author><style face="normal" font="default" size="100%">R Soussignan</style></author><author><style face="normal" font="default" size="100%">Philippe Gaussier</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Jacqueline Nadel</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Reconnaissance et résonance émotionnelle face à un humain et à un robot chez des enfants typiques et des enfants avec autisme de haut niveau</style></title><secondary-title><style face="normal" font="default" size="100%">Bulletin scientifique de l’Arapi</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2008</style></year></dates><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Jacqueline Nadel</style></author><author><style face="normal" font="default" size="100%">M Simon</style></author><author><style face="normal" font="default" size="100%">P Canet</style></author><author><style face="normal" font="default" size="100%">R Soussignan</style></author><author><style face="normal" font="default" size="100%">P Blancard</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Philippe Gaussier</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Human Responses to an Expressive Robot</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the Sixth International Workshop on Epigenetic Robotics</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lund University Cognitive Studies</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2006</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.lucs.lu.se/LUCS/128/Nadeletal.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Lund University</style></publisher><pub-location><style face="normal" font="default" size="100%">Paris, France</style></pub-location><volume><style face="normal" font="default" size="100%">128</style></volume><pages><style face="normal" font="default" size="100%">79–86</style></pages><isbn><style face="normal" font="default" size="100%">91-974741-6-9</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">This paper reports the results of the first study comparing subjects' responses to robotic emotional facial displays and human emotional facial displays.
It describes step by step the building of believable emotional expressions in a robotic head, the problems raised by a comparative approach of robotic and human expressions, and the solutions found in order to ensure a valid comparison. Twenty adults and 15 children aged 3 were presented static (photos) and dynamic (2-D videoclips, or 3-D live) displays of emotional expressions presented by a robot or a person.
The study compares two dependent variables: emotional resonance (automatic facial feed-back during an emotional display) and emotion recognition (emotion labeling) according to partners (robot or person) and to the nature of the display (static or dynamic). Results for emotional resonance were similar with young children and with adults. Both groups resonated significantly more to dynamic displays than to static displays, be they robotic expressions or human expressions. In both groups, emotion recognition was easier for human expressions than for robotic ones.
Unlike children that recognized more easily emotional expressions dynamically displayed, adults scored higher with static displays thus reflecting a cognitive strategy independent from emotional resonance. Results are discussed in the perspective of the therapeutic use of this comparative approach with children with autism that are described as impaired in emotion sharing and communication.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cos-Aguilera, Ignasi</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Gillian M Hayes</style></author><author><style face="normal" font="default" size="100%">Gillies, Andrew</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Joanna J Bryson</style></author><author><style face="normal" font="default" size="100%">Tony J Prescott</style></author><author><style face="normal" font="default" size="100%">Anil K Seth</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Ecological Integration of Affordances and Drives for Behaviour Selection</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. IJCAI 2005 Workshop on Modeling Natural Action Selection</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2005</style></year></dates><pub-location><style face="normal" font="default" size="100%">Edinburgh, Scotland</style></pub-location><pages><style face="normal" font="default" size="100%">225–228</style></pages><isbn><style face="normal" font="default" size="100%">1-902956-40-9</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">This paper shows a study of the integration of physiology and perception in a biologically inspired robotic architecture that learns behavioural patterns by interaction with the environment. This implements a hierarchical view of learning and behaviour selection which bases adaptation on a relationship between reinforcement and the agent’s inner motivations. This view ingrains together the basic principles necessary to explain the underlying processes of learning behavioural patterns and the way these change via interaction with the environment. These principles have been experimentally tested and the results are presented and discussed throughout the paper.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Philippe Gaussier</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Jacqueline Nadel</style></author><author><style face="normal" font="default" size="100%">Darwin Muir</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Emotion Understanding: Robots as Tools and Models</style></title><secondary-title><style face="normal" font="default" size="100%">Emotional Development: Recent Research Advances</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2005</style></year></dates><publisher><style face="normal" font="default" size="100%">Oxford University Press</style></publisher><pages><style face="normal" font="default" size="100%">235–258</style></pages><isbn><style face="normal" font="default" size="100%">0-19-85-2883-3 (Hbk) 0-19-85-2884-1 (Pbk)</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><section><style face="normal" font="default" size="100%">9</style></section></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Avila-García, Orlando</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Garijo, Francisco J</style></author><author><style face="normal" font="default" size="100%">Riquelme, José C</style></author><author><style face="normal" font="default" size="100%">Toro, Miguel</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Comparing a Voting-Based Policy with Winner-Takes-All to Perform Action Selection in Motivational Agents</style></title><secondary-title><style face="normal" font="default" size="100%">Advances in Artificial Intelligence – IBERAMIA 2002; Proc. 8th Ibero-American Conference on AI</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lecture Notes in Computer Science</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2002</style></year></dates><publisher><style face="normal" font="default" size="100%">Springer</style></publisher><pub-location><style face="normal" font="default" size="100%">Seville, Spain</style></pub-location><volume><style face="normal" font="default" size="100%">2527</style></volume><pages><style face="normal" font="default" size="100%">855–864</style></pages><isbn><style face="normal" font="default" size="100%">978-3-540-00131-7</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Embodied autonomous agents are systems that inhabit dynamic, unpredictable environments in which they try to satisfy a set of time-dependent goals or motivations in order to survive. One of the problems that this implies is action selection, the task of resolving conflicts between competing behavioral alternatives. We present an experimental comparison of two action selection mechanisms (ASM), implementing &quot;winner-takes-all&quot; (WTA) and &quot;voting-based&quot; (VB) policies respectively, modeled using a motivational behavior-based approach. This research shows the adequacy of these two ASM with respect to different sources of environmental complexity and the tendency of each of them to show different behavioral phenomena.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Eduard Giménez</style></author><author><style face="normal" font="default" size="100%">D Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">First Proposal for an Agent Architecture for Team and Multiple Task Coordination: A Case Study in Robotic Soccer</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 2nd Catalan Conference on Artificial Intelligence (CCIA'99)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">1999</style></year><pub-dates><date><style  face="normal" font="default" size="100%">10/1999</style></date></pub-dates></dates><pub-location><style face="normal" font="default" size="100%">Girona, Spain</style></pub-location><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper we propose a general agent architecture from which heterogeneous agents can be easily derived. Based on roles and ploy patterns, each agent is capable of performing its own individual duty while cooperating with other agents. We also describe a mechanism for real-time coordination of multiple behaviors.</style></abstract></record></records></xml>