<?xml version="1.0" encoding="UTF-8"?><?xml-stylesheet type="text/xsl" href="static/style.xsl"?><OAI-PMH xmlns="http://www.openarchives.org/OAI/2.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/ http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd"><responseDate>2026-04-27T12:58:04Z</responseDate><request verb="GetRecord" identifier="oai:uvadoc.uva.es:10324/21061" metadataPrefix="edm">https://uvadoc.uva.es/oai/request</request><GetRecord><record><header><identifier>oai:uvadoc.uva.es:10324/21061</identifier><datestamp>2021-06-23T11:20:23Z</datestamp><setSpec>com_10324_1168</setSpec><setSpec>com_10324_931</setSpec><setSpec>com_10324_894</setSpec><setSpec>col_10324_1302</setSpec></header><metadata><rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:doc="http://www.lyncode.com/xoai" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ore="http://www.openarchives.org/ore/terms/" xmlns:dcterms="http://purl.org/dc/terms/" xmlns:ds="http://dspace.org/ds/elements/1.1/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:edm="http://www.europeana.eu/schemas/edm/" xsi:schemaLocation="http://www.w3.org/1999/02/22-rdf-syntax-ns# http://www.europeana.eu/schemas/edm/EDM.xsd">
<edm:ProvidedCHO rdf:about="http://uvadoc.uva.es/handle/10324/21061">
<dc:creator>Marcos Pablos, Samuel</dc:creator>
<dc:creator>Gómez García-Bermejo, Jaime</dc:creator>
<dc:creator>Zalama Casanova, Eduardo</dc:creator>
<dc:creator>López, Joaquín</dc:creator>
<dc:date>2015</dc:date>
<dc:description>Producción Científica</dc:description>
<dc:description>As part of a multimodal animated interface previously presented in [38], in this paper we describe a method for dynamic recognition of displayed facial emotions on low resolution streaming images. First, we address the detection of Action Units of the Facial Action Coding System upon Active Shape Models and Gabor filters. Normalized outputs of the Action Unit recognition step are then used as inputs for a neural network which is based on real cognitive systems architecture, and consists on a habituation network plus a competitive network. Both the competitive and the habituation layer use differential equations thus taking into account the dynamic information of facial expressions through time. Experimental results carried out on live video sequences and on the Cohn-Kanade face database show that the proposed method provides high recognition hit rates.</dc:description>
<dc:format>application/pdf</dc:format>
<dc:identifier>http://uvadoc.uva.es/handle/10324/21061</dc:identifier>
<dc:language>eng</dc:language>
<dc:publisher>Oxford University Press</dc:publisher>
<dc:subject>Robots</dc:subject>
<dc:subject>Realidad virtual</dc:subject>
<dc:title>Dynamic Facial Emotion Recognition Oriented to HCI Applications</dc:title>
<dc:type>info:eu-repo/semantics/article</dc:type>
<edm:type>TEXT</edm:type>
</edm:ProvidedCHO>
<ore:Aggregation rdf:about="http://uvadoc.uva.es/handle/10324/21061#aggregation">
<edm:aggregatedCHO rdf:resource="http://uvadoc.uva.es/handle/10324/21061"/>
<edm:dataProvider>UVaDOC. Repositorio Documental de la Universidad de Valladolid</edm:dataProvider>
<edm:isShownAt rdf:resource="http://uvadoc.uva.es/handle/10324/21061"/>
<edm:isShownBy rdf:resource="https://uvadoc.uva.es/bitstream/10324/21061/1/Dynamic-facial-emotion-recognition-preprint.pdf"/>
<edm:provider>Hispana</edm:provider>
<edm:rights rdf:resource="http://creativecommons.org/licenses/by-nc-nd/4.0/"/>
</ore:Aggregation>
<edm:WebResource rdf:about="https://uvadoc.uva.es/bitstream/10324/21061/1/Dynamic-facial-emotion-recognition-preprint.pdf">
<edm:rights rdf:resource="http://creativecommons.org/licenses/by-nc-nd/4.0/"/>
</edm:WebResource>
</rdf:RDF></metadata></record></GetRecord></OAI-PMH>