@article {2013, title = {Hedonic Value: Enhancing Adaptation for Motivated Agents}, journal = {Adaptive Behavior}, volume = {21}, year = {2013}, note = {Download}, pages = {465{\textendash}483}, publisher = {SAGE}, abstract = {Reinforcement learning (RL) in the context of artificial agents is typically used to produce behavioural responses as a function of the reward obtained by interaction with the environment. When the problem consists of learning the shortest path to a goal, it is common to use reward functions yielding a fixed value after each decision, for example a positive value if the target location has been attained and a negative one at each intermediate step. However, this fixed strategy may be overly simplistic for agents to adapt to dynamic environments, in which resources may vary from time to time. By contrast, there is significant evidence that most living beings internally modulate reward value as a function of their context to expand their range of adaptivity. Inspired by the potential of this operation, we present a review of its underlying processes and we introduce a simplified formalisation for artificial agents. The performance of this formalism is tested by monitoring the adaptation of an agent endowed with a model of motivated actor-critic, embedded with our formalisation of value and constrained by physiological stability, to environments with different resource distribution. Our main result shows that the manner in which reward is internally processed as a function of the agent{\textquoteright}s motivational state, strongly influences adaptivity of the behavioural cycles generated and the agent{\textquoteright}s physiological stability.}, keywords = {Actor-Critic, Grounding, Hedonic Value, Motivation, Reinforcement Learning}, issn = {1059-7123}, doi = {10.1177/1059712313486817}, url = {https://journals.sagepub.com/doi/10.1177/1059712313486817}, author = {Ignasi Cos and Lola Ca{\~n}amero and Gillian M Hayes and Gillies, Andrew} } @article {2010, title = {Learning Affordances of Consummatory Behaviors: Motivation-Driven Adaptive Perception}, journal = {Adaptive Behavior}, volume = {18}, year = {2010}, note = {Download}, pages = {285{\textendash}314}, publisher = {SAGE}, abstract = {This article introduces a formalization of the dynamics between sensorimotor interaction and homeostasis, integrated in a single architecture to learn object affordances of consummatory behaviors. We also describe the principles necessary to learn grounded knowledge in the context of an agent and its surrounding environment, which we use to investigate the constraints imposed by the agent{\textquoteright}s internal dynamics and the environment. This is tested with an embodied, situated robot, in a simulated environment, yielding results that support this formalization. Furthermore, we show that this methodology allows learned affordances to be dynamically redefined, depending on object similarity, resource availability, and the rhythms of the agent{\textquoteright}s internal physiology. For example, if a resource becomes increasingly scarce, the value assigned by the agent to its related effect increases accordingly, encouraging a more active behavioral strategy to maintain physiological stability. Experimental results also suggest that a combination of motivation-driven and affordance learning in a single architecture should simplify its overall complexity while increasing its adaptivity.}, doi = {10.1177/1059712310375471}, url = {https://journals.sagepub.com/doi/10.1177/1059712310375471}, author = {Ignasi Cos and Lola Ca{\~n}amero and Gillian M Hayes} } @inproceedings {2005, title = {Ecological Integration of Affordances and Drives for Behaviour Selection}, booktitle = {Proc. IJCAI 2005 Workshop on Modeling Natural Action Selection}, year = {2005}, pages = {225{\textendash}228}, address = {Edinburgh, Scotland}, abstract = {This paper shows a study of the integration of physiology and perception in a biologically inspired robotic architecture that learns behavioural patterns by interaction with the environment. This implements a hierarchical view of learning and behaviour selection which bases adaptation on a relationship between reinforcement and the agent{\textquoteright}s inner motivations. This view ingrains together the basic principles necessary to explain the underlying processes of learning behavioural patterns and the way these change via interaction with the environment. These principles have been experimentally tested and the results are presented and discussed throughout the paper.}, isbn = {1-902956-40-9}, author = {Cos-Aguilera, Ignasi and Lola Ca{\~n}amero and Gillian M Hayes and Gillies, Andrew}, editor = {Joanna J Bryson and Tony J Prescott and Anil K Seth} } @inproceedings {2005, title = {Motivation Driven Learning of Action Affordances}, booktitle = {Proceedings of the Symposium on Agents that Want and Like: Motivational and Emotional Roots of Cognition and Action (SSAISB{\textquoteright}05)}, year = {2005}, note = {Download symposium proceedings (pdf)}, pages = {33{\textendash}36}, publisher = {AISB}, organization = {AISB}, address = {Hatfield, UK}, abstract = {Survival in the animal realm often depends on the ability to elucidate the potentialities for action offered by every situation. This paper argues that affordance learning is a powerful ability for adaptive, embodied, situated agents, and presents a motivation-driven method for their learning. The method proposed considers the agent and its environment as a single unit, thus intrinsically relating agent{\textquoteright}s interactions to fluctuations of the agent{\textquoteright}s internal motivation. Being that the motivational state is an expression of the agent{\textquoteright}s physiology, the existing causality of interactions and their effect on the motivational state is exploited as a principle to learn object affordances. The hypothesis is tested in a Webots 4.0 simulator with a Khepera robot.}, isbn = {1-902956-41-7}, url = {https://aisb.org.uk/wp-content/uploads/2019/12/2_Agents_Final.pdf}, author = {Cos-Aguilera, Ignasi and Lola Ca{\~n}amero and Gillian M Hayes}, editor = {Lola Ca{\~n}amero} } @inproceedings {2004, title = {Using a SOFM to learn Object Affordances}, booktitle = {Proc. 5th Workshop of Physical Agents (WAF{\textquoteright}04)}, year = {2004}, note = {Download (Open Access)}, publisher = {University of Edinburgh}, organization = {University of Edinburgh}, address = {Girona, Spain}, abstract = {Learning affordances can be defined as learning action potentials, i.e., learning that an object exhibiting certain regularities offers the possibility of performing a particular action. We propose a method to endow an agent with the capability of acquiring this knowledge by relating the object invariants with the potentiality of performing an action via interaction episodes with each object. We introduce a biologically inspired model to test this learning hypothesis and a set of experiments to check its validity in a Webots simulator with a Khepera robot in a simple environment. The experiment set aims to show the use of a GWR network to cluster the sensory input of the agent; furthermore, that the aforementioned algorithm for neural clustering can be used as a--starting point to build agents that learn the relevant functional bindings between the cues in the environment and the internal needs of an agent.}, url = {https://uhra.herts.ac.uk/handle/2299/9905}, author = {Cos-Aguilera, Ignasi and Gillian M Hayes and Lola Ca{\~n}amero} } @inproceedings {2003, title = {Learning Object Functionalities in the Context of Action Selection}, booktitle = {Towards Intelligent Mobile Robots, TIMR{\textquoteright}03: 4th British Conference on Mobile Robotics}, year = {2003}, address = {University of the West of England, Bristol}, author = {Cos-Aguilera, Ignasi and Lola Ca{\~n}amero and Gillian M Hayes}, editor = {U Nehmzow and C Melhuish} } @inproceedings {2003, title = {Motivation-driven learning of object affordances: First experiments using a simulated khepera robot}, booktitle = {Proc. 5th International Conference in Cognitive Modelling (ICCM{\textquoteright}03)}, year = {2003}, pages = {57{\textendash}62}, address = {Bamberg, Germany}, author = {Cos-Aguilera, Ignasi and Lola Ca{\~n}amero and Gillian M Hayes}, editor = {Detjer, Frank and D{\"o}rner, Dietrich and Harald Schaub} }