<?xml version="1.0" encoding="UTF-8"?><?xml-stylesheet type="text/xsl" href="static/style.xsl"?><OAI-PMH xmlns="http://www.openarchives.org/OAI/2.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/ http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd"><responseDate>2026-04-27T20:15:23Z</responseDate><request verb="GetRecord" identifier="oai:uvadoc.uva.es:10324/80154" metadataPrefix="dim">https://uvadoc.uva.es/oai/request</request><GetRecord><record><header><identifier>oai:uvadoc.uva.es:10324/80154</identifier><datestamp>2025-12-19T13:37:11Z</datestamp><setSpec>com_10324_1191</setSpec><setSpec>com_10324_931</setSpec><setSpec>com_10324_894</setSpec><setSpec>col_10324_1379</setSpec></header><metadata><dim:dim xmlns:dim="http://www.dspace.org/xmlns/dspace/dim" xmlns:doc="http://www.lyncode.com/xoai" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.dspace.org/xmlns/dspace/dim http://www.dspace.org/schema/dim.xsd">
<dim:field mdschema="dc" element="contributor" qualifier="author" authority="da4025d3-342c-4066-9d75-05f28b66027d" confidence="500" orcid_id="">Alvarez, Marcos Lazaro</dim:field>
<dim:field mdschema="dc" element="contributor" qualifier="author" authority="9fdfd9ec47984d2e" confidence="600" orcid_id="0000-0003-3370-3338">Bahillo Martínez, Alfonso</dim:field>
<dim:field mdschema="dc" element="contributor" qualifier="author" authority="ff728a09-eb94-4fbc-8616-3864866f8ae6">Arjona, Laura</dim:field>
<dim:field mdschema="dc" element="contributor" qualifier="author" authority="3e614374-94dd-44f2-98f0-1aa9499a9891">Marcelo Nogueira, Diogo</dim:field>
<dim:field mdschema="dc" element="contributor" qualifier="author" authority="a90a8e70-663d-417a-861d-9db9c9e5aca1">Ferreira Gomes, Elsa</dim:field>
<dim:field mdschema="dc" element="contributor" qualifier="author" authority="03208f00-a1c8-476f-bee7-c83de8256a0b">Jorge, Alípio M.</dim:field>
<dim:field mdschema="dc" element="date" qualifier="accessioned">2025-11-28T12:26:42Z</dim:field>
<dim:field mdschema="dc" element="date" qualifier="available">2025-11-28T12:26:42Z</dim:field>
<dim:field mdschema="dc" element="date" qualifier="issued">2025</dim:field>
<dim:field mdschema="dc" element="identifier" qualifier="citation" lang="es">IEEE Access, 2025, vol. 13, pp. 127240-127251</dim:field>
<dim:field mdschema="dc" element="identifier" qualifier="issn" lang="es">2169-3536</dim:field>
<dim:field mdschema="dc" element="identifier" qualifier="uri">https://uvadoc.uva.es/handle/10324/80154</dim:field>
<dim:field mdschema="dc" element="identifier" qualifier="doi" lang="es">10.1109/ACCESS.2025.3590626</dim:field>
<dim:field mdschema="dc" element="identifier" qualifier="publicationfirstpage" lang="es">127240</dim:field>
<dim:field mdschema="dc" element="identifier" qualifier="publicationlastpage" lang="es">127251</dim:field>
<dim:field mdschema="dc" element="identifier" qualifier="publicationtitle" lang="es">IEEE Access</dim:field>
<dim:field mdschema="dc" element="identifier" qualifier="publicationvolume" lang="es">13</dim:field>
<dim:field mdschema="dc" element="identifier" qualifier="essn" lang="es">2169-3536</dim:field>
<dim:field mdschema="dc" element="description" lang="es">Producción Científica</dim:field>
<dim:field mdschema="dc" element="description" qualifier="abstract" lang="es">Sound-based uroflowmetry (SU) is a non-invasive technique emerging as an alternative to traditional uroflowmetry (UF) to calculate the voiding flow rate based on the sound generated by the urine impacting the water in a toilet, enabling remote monitoring and reducing the patient burden and clinical costs. This study trains four different machine learning (ML) models (random forest, gradient boosting, support vector machine and convolutional neural network) using both regression and classification approaches to predict and categorize the voiding flow rate from sound events. The models were trained with a dataset that contains sounds from synthetic void events generated with a high precision peristaltic pump and a traditional toilet. Sound was simultaneously recorded with three devices: Ultramic384k, Mi A1 smartphone and Oppo Smartwatch. To extract the audio features, our analysis showed that segmenting the audio signals into 1000 ms segments with frequencies up to 16 kHz provided the best results. Results show that random forest achieved the best performance in both regression and classification tasks, with a mean absolute error (MAE) of 0.9, 0.7 and 0.9 ml/s and quadratic weighted kappa (QWK) of 0.99, 1.0 and 1.0 for the three devices. To evaluate the models in a real environment and assess the effectiveness of training with synthetic data, the best-performing models were retrained and validated using a real voiding sounds dataset. The results reported an MAE below 2.5 ml/s and a QWK above 0.86 for regression and classification tasks, respectively.</dim:field>
<dim:field mdschema="dc" element="description" qualifier="project" lang="es">Ministerio de Ciencia, Innovación y Universidades (MICIU) a través del proyecto SWALU CPP2022-010045</dim:field>
<dim:field mdschema="dc" element="description" qualifier="project" lang="es">2020 ‘‘Ayuda para contratos predoctorales’’ financiada por MICIU y la Agencia Estatal de Investigación (AEI), 10.13039/501100011033 y cofinanciada por el Fondo Social Europeo (FSE) bajo el lema ‘‘FSE invierte en tu futuro,’’ proyecto PRE2020-095612</dim:field>
<dim:field mdschema="dc" element="description" qualifier="project" lang="es">Gobierno Vasco a través del Hazitek Program bajo el proyecto BATHMIC ZL-2024/00481</dim:field>
<dim:field mdschema="dc" element="description" qualifier="project" lang="es">Ministerio a través del proyecto Aginplace financiado por MICIU, AEI/10.13039/501100011033 y por la Unión Europea (UE) a través del Fondo Europeo de Desarrollo Regional (FEDER), proyecto PID2023-146254OB-C41 y PID2023-146254OA-C44</dim:field>
<dim:field mdschema="dc" element="format" qualifier="mimetype" lang="es">application/pdf</dim:field>
<dim:field mdschema="dc" element="language" qualifier="iso" lang="es">eng</dim:field>
<dim:field mdschema="dc" element="publisher" lang="es">IEEE</dim:field>
<dim:field mdschema="dc" element="rights" qualifier="accessRights" lang="es">info:eu-repo/semantics/openAccess</dim:field>
<dim:field mdschema="dc" element="rights" qualifier="uri" lang="*">http://creativecommons.org/licenses/by/4.0/</dim:field>
<dim:field mdschema="dc" element="rights" qualifier="holder">© 2025 The Authors</dim:field>
<dim:field mdschema="dc" element="rights" lang="*">Atribución 4.0 Internacional</dim:field>
<dim:field mdschema="dc" element="subject" qualifier="classification" lang="es">Machine learning</dim:field>
<dim:field mdschema="dc" element="subject" qualifier="classification" lang="es">non-invasive voiding monitoring</dim:field>
<dim:field mdschema="dc" element="subject" qualifier="classification" lang="es">sound-based uroflowmetry</dim:field>
<dim:field mdschema="dc" element="subject" qualifier="classification" lang="es">sound voiding signals</dim:field>
<dim:field mdschema="dc" element="subject" qualifier="classification" lang="es">voiding flow estimation</dim:field>
<dim:field mdschema="dc" element="subject" qualifier="unesco">3311.10 Instrumentos Médicos</dim:field>
<dim:field mdschema="dc" element="subject" qualifier="unesco">1203 Ciencia de Los Ordenadores</dim:field>
<dim:field mdschema="dc" element="title" lang="es">Leveraging Synthetic Data to Develop a Machine Learning Model for Voiding Flow Rate Prediction From Audio Signals</dim:field>
<dim:field mdschema="dc" element="type" lang="es">info:eu-repo/semantics/article</dim:field>
<dim:field mdschema="dc" element="type" qualifier="hasVersion" lang="es">info:eu-repo/semantics/publishedVersion</dim:field>
<dim:field mdschema="dc" element="relation" qualifier="publisherversion" lang="es">https://ieeexplore.ieee.org/document/11084787</dim:field>
<dim:field mdschema="dc" element="peerreviewed" lang="es">SI</dim:field>
</dim:dim></metadata></record></GetRecord></OAI-PMH>