<?xml version="1.0" encoding="UTF-8"?><xml><records><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Using Robots to Model Mental Disorders</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. UK-RAS Conference: 'Robots Working For &amp; Among Us', 2017</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2018</style></year></dates><pub-location><style face="normal" font="default" size="100%">Bristol, UK</style></pub-location><pages><style face="normal" font="default" size="100%">121–123</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We are currently at a point where the use of robots to model human mental disorders is possible, and this capability will only increase. By considering the lessons learned from animal models, we argue that robot models of human mental disorders can complement existing approaches in mental health research.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;http://www.emotion-modeling.info/sites/default/files/UK-RAS_2017_Robot_Models_proceedings.pdf&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Aryel Beck</style></author><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Using Perlin Noise to Generate Emotional Expressions in a Robot</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. Annual Meeting of the Cognitive Science Society (CogSci 2013)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2013</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://mindmodeling.org/cogsci2013/papers/0343/index.html</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Cognitive Science Society</style></publisher><pub-location><style face="normal" font="default" size="100%">Berlin, Germany</style></pub-location><pages><style face="normal" font="default" size="100%">1845–1850</style></pages><isbn><style face="normal" font="default" size="100%">978-0-9768318 -9-1</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">The development of social robots that convey emotion with their bodies---instead of or in conjunction with their faces---is an increasingly active research topic in the field of human-robot interaction (HRI). Rather than focusing either on postural or on dynamics aspects of bodily expression in isolation, we present a model and an empirical study where we combine both elements and produce expressive behaviors by adding dynamic elements (in the form of Perlin noise) to a subset of static postures prototypical of basic emotions, with the aim of creating expressions easily understandable by children and at the same time lively and flexible enough to be believable and engaging. Results show that the noise increases the recognition rate of the emotions portrayed by the robot.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://mindmodeling.org/cogsci2013/papers/0343/index.html&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Peirre Andry</style></author><author><style face="normal" font="default" size="100%">Arnaud J Blanchard</style></author><author><style face="normal" font="default" size="100%">Philippe Gaussier</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Shuzhi Sam Ge</style></author><author><style face="normal" font="default" size="100%">Haizhou Li</style></author><author><style face="normal" font="default" size="100%">John-John Cabibihan</style></author><author><style face="normal" font="default" size="100%">Yeow Kee Tan</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Using the Interaction Rhythm as a Natural Reinforcement Signal for Social Robots: A Matter of Belief</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. International Conference on Social Robotics, ICSR 2010</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lecture Notes in Computer Science</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2010</style></year></dates><publisher><style face="normal" font="default" size="100%">Springer</style></publisher><pub-location><style face="normal" font="default" size="100%">Singapore</style></pub-location><volume><style face="normal" font="default" size="100%">6414</style></volume><pages><style face="normal" font="default" size="100%">81–89</style></pages><isbn><style face="normal" font="default" size="100%">978-3-642-17247-2</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper, we present the results of a pilot study of a human robot interaction experiment where the rhythm of the interaction is used as a reinforcement signal to learn sensorimotor associations. The algorithm uses breaks and variations in the rhythm at which the human is producing actions. The concept is based on the hypothesis that a constant rhythm is an intrinsic property of a positive interaction whereas a break reflects a negative event. Subjects from various backgrounds interacted with a NAO robot where they had to teach the robot to mirror their actions by learning the correct sensorimotor associations. The results show that in order for the rhythm to be a useful reinforcement signal, the subjects have to be convinced that the robot is an agent with which they can act naturally, using their voice and facial expressions as cues to help it understand the correct behaviour to learn. When the subjects do behave naturally, the rhythm and its variations truly reflects how well the interaction is going and helps the robot learn efficiently. These results mean that non-expert users can interact naturally and fruitfully with an autonomous robot if the interaction is believed to be natural, without any technical knowledge of the cognitive capacities of the robot.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Arnaud J Blanchard</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Demiris, Y</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Using Visual Velocity Detection to Achieve Synchronization in Imitation</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 3rd Int. Symposium on Imitation in Animals and Artifacts</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2005</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.aisb.org.uk/publications/proceedings/aisb2005/3_Imitation_Final.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">AISB</style></publisher><pub-location><style face="normal" font="default" size="100%">Hatfield, UK</style></pub-location><pages><style face="normal" font="default" size="100%">26–29</style></pages><isbn><style face="normal" font="default" size="100%">1-902956-42-5</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Synchronization and coordination are important mechanisms involved in imitation and social interaction. In this paper, we study different methods to improve the reactivity of agents to changes in their environment in different coordination tasks. In a robot synchronization task, we compare the differences between using only position detection or velocity detection. We first test an existing position detection approach, and then we compare the results with those obtained using a novel method that takes advantage of visual detection of velocity. We test and discuss the applicability of these two methods in several coordination scenarios, to conclude by seeing how to combine the advantages of both methods.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cos-Aguilera, Ignasi</style></author><author><style face="normal" font="default" size="100%">Gillian M Hayes</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Using a SOFM to learn Object Affordances</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 5th Workshop of Physical Agents (WAF'04)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2004</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://uhra.herts.ac.uk/handle/2299/9905</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">University of Edinburgh</style></publisher><pub-location><style face="normal" font="default" size="100%">Girona, Spain</style></pub-location><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Learning affordances can be defined as learning action potentials, i.e., learning that an object exhibiting certain regularities offers the possibility of performing a particular action. We propose a method to endow an agent with the capability of acquiring this knowledge by relating the object invariants with the potentiality of performing an action via interaction episodes with each object. We introduce a biologically inspired model to test this learning hypothesis and a set of experiments to check its validity in a Webots simulator with a Khepera robot in a simple environment. The experiment set aims to show the use of a GWR network to cluster the sensory input of the agent; furthermore, that the aforementioned algorithm for neural clustering can be used as a--starting point to build agents that learn the relevant functional bindings between the cues in the environment and the internal needs of an agent.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;http://hdl.handle.net/2299/9905&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Avila-García, Orlando</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Stefan Schaal</style></author><author><style face="normal" font="default" size="100%">Auke Jan Ijspeert</style></author><author><style face="normal" font="default" size="100%">Aude Billard</style></author><author><style face="normal" font="default" size="100%">Sethu Vijayakumar</style></author><author><style face="normal" font="default" size="100%">John Hallam</style></author><author><style face="normal" font="default" size="100%">Jean-Arcady Meyer</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Using Hormonal Feedback to Modulate Action Selection in a Competitive Scenario</style></title><secondary-title><style face="normal" font="default" size="100%">From Animals to Animats 8: Proc. 8th Intl. Conf. on Simulation of Adaptive Behavior (SAB'04)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2004</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.researchgate.net/profile/Orlando_Avila-Garcia/publication/228958663_Using_Hormonal_Feedback_to_Modulate_Action_Selection_in_a_Competitive_Scenario/links/0deec533c8411ebe0c000000.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">MIT Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Los Angeles, USA</style></pub-location><pages><style face="normal" font="default" size="100%">243–252</style></pages><isbn><style face="normal" font="default" size="100%">9780262693417</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper we investigate the use of hormonal feedback as a mechanism to modulate a &quot;motivation-based,&quot; homeostatic action selection mechanism (ASM) in a robot. We have framed our study in the context of a dynamic, multirobot, competitive &quot;two-resource&quot; action selection problem. The introduction of competitors has important consequences for action selection. We first show how the interaction between robots introduces new forms of environmental complexity that affect their viability. Secondly, we propose a &quot;hormone-like&quot; mechanism that, modulating the input of the ASM, tackles these new sources of complexity.</style></abstract></record></records></xml>