@article{oai:muroran-it.repo.nii.ac.jp:02000063, author = {Ishimaru, Momoko and 石丸, 桃子 and Okada, Yoshifumi and 岡田, 吉史 and Uchiyama, Ryunosuke and 内山, 竜之介 and Horiguchi, Ryo and 堀口, 凌 and Toyoshima, Itsuki and 豊島, 依槻}, issue = {4}, journal = {Diagnostics}, month = {Feb}, note = {Recent studies have revealed mutually correlated audio features in the voices of depressed patients. Thus, the voices of these patients can be characterized based on the combinatorial relationships among the audio features. To date, many deep learning–based methods have been proposed to predict the depression severity using audio data. However, existing methods have assumed that the individual audio features are independent. Hence, in this paper, we propose a new deep learning–based regression model that allows for the prediction of depression severity on the basis of the correlation among audio features. The proposed model was developed using a graph convolutional neural network. This model trains the voice characteristics using graph-structured data generated to express the correlation among audio features. We conducted prediction experiments on depression severity using the DAIC-WOZ dataset employed in several previous studies. The experimental results showed that the proposed model achieved a root mean square error (RMSE) of 2.15, a mean absolute error (MAE) of 1.25, and a symmetric mean absolute percentage error of 50.96%. Notably, RMSE and MAE significantly outperformed the existing state-of-the-art prediction methods. From these results, we conclude that the proposed model can be a promising tool for depression diagnosis.}, title = {A New Regression Model for Depression Severity Prediction Based on Correlation among Audio Features Using a Graph Convolutional Neural Network}, volume = {13}, year = {2023} }