<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Med Educ</journal-id><journal-id journal-id-type="publisher-id">mededu</journal-id><journal-id journal-id-type="index">20</journal-id><journal-title>JMIR Medical Education</journal-title><abbrev-journal-title>JMIR Med Educ</abbrev-journal-title><issn pub-type="epub">2369-3762</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v12i1e72110</article-id><article-id pub-id-type="doi">10.2196/72110</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Development of a Deep Learning&#x2013;Based Feedback Model to Assist Medical Students Learning Renal Ultrasound Acquisition: Mixed Methods Study</article-title></title-group><contrib-group><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Hwang</surname><given-names>Andy Cheuk Nam</given-names></name><degrees>MMedSc</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Singh</surname><given-names>Rahul</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Barrett</surname><given-names>Elizabeth Ann</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Cao</surname><given-names>Peng</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Vardhanabhuti</surname><given-names>Varut</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Ng</surname><given-names>Pauline Yeung</given-names></name><degrees>MBBS</degrees><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Wong</surname><given-names>Gordon Tin Chun</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff4">4</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Co</surname><given-names>Michael Tiong Hong</given-names></name><degrees>MS</degrees><xref ref-type="aff" rid="aff5">5</xref></contrib><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Lee</surname><given-names>Elaine Yuen-Phin</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib></contrib-group><aff id="aff1"><institution>Department of Diagnostic Radiology, School of Clinical Medicine, Li Ka Shing Faculty of Medicine, The University of Hong Kong</institution><addr-line>21 Sassoon Rd, Pok Fu Lam</addr-line><addr-line>Hong Kong</addr-line><country>China</country></aff><aff id="aff2"><institution>Academic Unit of Human Communication, Learning, and Development, Faculty of Education, The University of Hong Kong</institution><addr-line>Hong Kong</addr-line><country>China</country></aff><aff id="aff3"><institution>Critical Care Medicine Unit, School of Clinical Medicine, Li Ka Shing Faculty of Medicine, The University of Hong Kong</institution><addr-line>Hong Kong</addr-line><country>China</country></aff><aff id="aff4"><institution>Department of Anaesthesiology, School of Clinical Medicine, Li Ka Shing Faculty of Medicine, The University of Hong Kong</institution><addr-line>Hong Kong</addr-line><country>China</country></aff><aff id="aff5"><institution>Department of Surgery, School of Clinical Medicine, Li Ka Shing Faculty of Medicine, The University of Hong Kong</institution><addr-line>Hong Kong</addr-line><country>China</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Leung</surname><given-names>Tiffany</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Jenq</surname><given-names>Chang-Chyi</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Ma</surname><given-names>Chunwei</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Meuwly</surname><given-names>Jean-Yves</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Arockiasamy</surname><given-names>Jesu Marcus Immanuvel</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Elaine Yuen-Phin Lee, MD, Department of Diagnostic Radiology, School of Clinical Medicine, Li Ka Shing Faculty of Medicine, The University of Hong Kong, 21 Sassoon Rd, Pok Fu Lam, Hong Kong, China, +852 22553307; <email>eyplee77@hku.hk</email></corresp><fn fn-type="equal" id="equal-contrib1"><label>*</label><p>these authors contributed equally</p></fn></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>9</day><month>3</month><year>2026</year></pub-date><volume>12</volume><elocation-id>e72110</elocation-id><history><date date-type="received"><day>04</day><month>02</month><year>2025</year></date><date date-type="accepted"><day>31</day><month>01</month><year>2026</year></date></history><copyright-statement>&#x00A9; Andy Cheuk Nam Hwang, Rahul Singh, Elizabeth Ann Barrett, Peng Cao, Varut Vardhanabhuti, Pauline Yeung Ng, Gordon Tin Chun Wong, Michael Tiong Hong Co, Elaine Yuen-Phin Lee. Originally published in JMIR Medical Education (<ext-link ext-link-type="uri" xlink:href="https://mededu.jmir.org">https://mededu.jmir.org</ext-link>), 9.3.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Medical Education, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://mededu.jmir.org/">https://mededu.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://mededu.jmir.org/2026/1/e72110"/><abstract><sec><title>Background</title><p>Point-of-care ultrasound training is being increasingly integrated into undergraduate medical education, leading to a substantial demand for trained faculty to provide instruction and feedback.</p></sec><sec><title>Objective</title><p>This study aimed to develop an adjunct tool, a deep learning&#x2013;based feedback model, to facilitate student learning.</p></sec><sec sec-type="methods"><title>Methods</title><p>Renal ultrasound images (N=2807) were used to train a cascaded deep learning&#x2013;based feedback model that classified images into three categories: optimal, suboptimal, and incorrect. Suboptimal images were further subcategorized as images with artifact, incorrect gain, and/or incorrect positioning. The model was deployed among year 5 medical students receiving bedside ultrasound training, who were invited to upload renal ultrasound images to an online platform for automated image quality grading and feedback. A mixed methods analysis was used to evaluate students&#x2019; learning experience. Focus group interviews were organized to qualitatively analyze the successes and challenges of implementation. Quantitative analysis was based on responses to a 5-point Likert scale questionnaire and performance on the objective structured clinical examination (OSCE). Objective structured clinical examination scores were compared with mean OSCE scores from the 2 years preceding implementation of the deep learning&#x2013;based feedback model.</p></sec><sec sec-type="results"><title>Results</title><p>Focus group interviews identified that the deep learning&#x2013;based feedback model encouraged self-regulated learning but also recognized that discordant curricular design and hardware limitations impeded its use. The 11-item online questionnaire had a response rate of 42.4% (98/231 students). Among respondents, 32% (31/98) to 48% (47/98) found the model helpful in assisting ultrasound training (Likert score of 4&#x2010;5 for items 1-3), while 49% (48/98) to 76% (74/98) were satisfied with its usability and their interaction with the model (Likert score of 4&#x2010;5 for items 4-11). The mean OSCE score was 9.73 (SD 0.76) out of 10, compared with mean scores of 9.35 (SD 1.03; <italic>P</italic>=.06) and 9.45 (SD 0.97; <italic>P</italic>=.15) out of 10 in the 2 individual years preceding implementation of the model.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>A cascaded deep learning&#x2013;based feedback model was developed and deployed among year 5 medical students receiving bedside ultrasound training, with positive learner responses and enhanced self-regulated learning. The innovation was associated with increased student engagement and improved ultrasound skill acquisition among novice learners.</p></sec></abstract><kwd-group><kwd>point-of-care ultrasound</kwd><kwd>renal ultrasound learning</kwd><kwd>deep learning</kwd><kwd>convolutional neural networks</kwd><kwd>automated feedback system</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Point-of-care ultrasound (POCUS) refers to the use of ultrasound imaging to facilitate clinical diagnosis and management while patients are being treated. Substantial evidence supports POCUS in aiding diagnosis and improving bedside procedures and clinical management [<xref ref-type="bibr" rid="ref1">1</xref>-<xref ref-type="bibr" rid="ref3">3</xref>]. To address this clinical need, the next generation of clinicians involved in acute care should master skills such as image acquisition, interpretation, and clinical integration of POCUS findings. Therefore, ultrasound training is increasingly being introduced and incorporated into undergraduate medical education (UME). In 2019, a survey conducted in the United States reported that 72.6% of 168 accredited medical schools included an ultrasound curriculum in their UME [<xref ref-type="bibr" rid="ref4">4</xref>]. Cross-specialty POCUS training has been shown to augment physical examination skills among undergraduate learners and to lay the foundation for future postgraduate training [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref6">6</xref>]. However, the demand for trained faculty and tutors remains a major barrier to implementing ultrasound curricula in UME; 63% of medical schools in the United States reported that they did not have trained faculty for POCUS instruction [<xref ref-type="bibr" rid="ref7">7</xref>].</p><p>Ultrasound imaging across different organs has variable learning curves, reflecting different rates of skill acquisition. Among these, renal ultrasound has a relatively longer learning curve [<xref ref-type="bibr" rid="ref8">8</xref>], suggesting that it is moderately challenging to students. This underscores the need for additional practice and faculty guidance to support skill development. Different pedagogical approaches in renal and abdominal ultrasound training have demonstrated comparable improvements in learner proficiency [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref10">10</xref>]. This suggests that renal ultrasound skill acquisition is adaptable to different pedagogical approaches to support student learning.</p><p>Feedback is important to student learning because it facilitates self-reflection, understanding, and future improvement, particularly in skill acquisition and mastery [<xref ref-type="bibr" rid="ref11">11</xref>]. Effective feedback positions students as active learners in the feedback process, empowering them to understand their performance and develop evaluative judgment to improve learning [<xref ref-type="bibr" rid="ref12">12</xref>]. Feedback is most beneficial when immediate, external feedback on a specific task is provided [<xref ref-type="bibr" rid="ref11">11</xref>]. Students benefit from multiple opportunities to engage with feedback from different sources [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref14">14</xref>]. However, feedback is often neither adequately provided nor delivered effectively [<xref ref-type="bibr" rid="ref15">15</xref>]. This challenge is exacerbated in large-scale higher education [<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref17">17</xref>]. A technology-enabled feedback process may streamline practice and address challenges associated with low instructor-to-student ratios and the inability to provide on-demand feedback [<xref ref-type="bibr" rid="ref18">18</xref>]. It can customize students&#x2019; learning by allowing them to determine when and where their learning occurs and to control their pace. It may support both blended and adaptive learning strategies and provide a flexible learning environment not limited to workshops and bedside teaching sessions [<xref ref-type="bibr" rid="ref19">19</xref>-<xref ref-type="bibr" rid="ref22">22</xref>]. This approach may also promote self-regulated learning [<xref ref-type="bibr" rid="ref23">23</xref>], an important conceptual framework in education. Self-reflection is an invaluable step in preparing students for the next phase of the learning cycle [<xref ref-type="bibr" rid="ref24">24</xref>].</p><p>A deep learning model based on convolutional neural networks (CNNs) is a promising approach for image classification [<xref ref-type="bibr" rid="ref25">25</xref>]. A number of pretrained CNNs with strong general performance, such as ResNet [<xref ref-type="bibr" rid="ref26">26</xref>] and SENet [<xref ref-type="bibr" rid="ref27">27</xref>], have been developed. These pretrained CNNs can be fine-tuned and trained for classification tasks involving medical images. In this study, we hypothesized that a deep learning&#x2013;based feedback model could enhance learner motivation and support self-regulated learning in renal ultrasound acquisition. Accordingly, the aims of this study were to develop a deep learning&#x2013;based feedback model and evaluate its acceptance and impact on renal ultrasound acquisition skills.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Ethical Considerations</title><p>This study was approved by the Institutional Review Board of the University of Hong Kong/Hospital Authority Hong Kong West Cluster (HKU/HA HKW IRB; UW 22&#x2010;797). This study was conducted in accordance with the Declaration of Helsinki and the International Council for Harmonisation Good Clinical Practice guidelines. Students were invited to complete the questionnaire voluntarily and anonymously. All data generated and analyzed in this study were anonymized. The requirement for informed consent was waived by the HKU/HA HKW Institutional Review Board. No compensation was provided.</p></sec><sec id="s2-2"><title>Development of a Deep Learning Feedback Model</title><p>All renal ultrasound images (N=2807) in transverse and longitudinal views were retrospectively retrieved from the local radiology database, anonymized, and used to train the algorithm. All renal ultrasound images were classified into three main categories by a board-certified radiologist with more than 15 years of postfellowship experience. Images were classified as optimal when they were free of artifact and demonstrated appropriate brightness and positioning of the kidney (<xref ref-type="fig" rid="figure1">Figure 1A</xref>). Images were classified as suboptimal when they exhibited one or more of the following features: artifact (eg, acoustic shadowing or edge artifact) that obscured visualization of the kidney; incorrect gain (brightness), defined as improper amplification of the ultrasound signal, resulting in either very dark or very bright pixels and a degraded grayscale image; and/or incorrect positioning, in which the image of the kidney was truncated or its contour was unclear (<xref ref-type="fig" rid="figure1">Figure 1B</xref>). Images were classified as incorrect when the image showed an incorrect organ or when no kidney was visualized (<xref ref-type="fig" rid="figure1">Figure 1C</xref>).</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Examples of renal ultrasound images for model training: (A) optimal, (B) suboptimal (artifact and incorrect positioning), and (C) incorrect.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="mededu_v12i1e72110_fig01.png"/></fig><p>In the training dataset, 562 images were classified as optimal, 1288 as suboptimal, and 957 images as incorrect. Among the 1288 suboptimal images, 200 showed artifact alone, 256 showed incorrect gain alone, 202 showed incorrect position alone, 248 showed artifact and incorrect gain, 106 showed artifact and incorrect positioning, 164 showed incorrect gain and incorrect positioning, and 112 showed all 3 subcategories.</p></sec><sec id="s2-3"><title>Two-Stage Cascaded Network</title><p>All images were preprocessed with pixel resizing (224 &#x00D7; 224 &#x00D7; 1) and <italic>z</italic> score normalization before being used to fine-tune the pretrained CNNs, with an 8:2 split between the training and test sets. The resulting model formed a deep learning&#x2013;based, fully automated renal ultrasound image grading system (<xref ref-type="fig" rid="figure2">Figure 2</xref>). The cascaded classifier network was composed of two stages. In the first stage, a pretrained CNN, EfficientNet-B3 [<xref ref-type="bibr" rid="ref28">28</xref>], was fine-tuned on all 2807 ultrasound images for the classification task, labeled according to the three previously defined categories. In the second stage, images classified as suboptimal were input into another pretrained CNN, ResNet-50 [<xref ref-type="bibr" rid="ref26">26</xref>], to further subclassify them into the following subcategories: artifact, incorrect gain, and/or incorrect positioning.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Cascaded renal ultrasound image grading system with automated grading and feedback for students (demonstration video in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>).</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="mededu_v12i1e72110_fig02.png"/></fig><p>Both EfficientNet-B3 and ResNet-50 backbones pretrained on the ImageNet dataset [<xref ref-type="bibr" rid="ref29">29</xref>] were used and fine-tuned on the renal ultrasound dataset via transfer learning. Subsequently, the trained model was hosted on a website through Microsoft Azure, the cloud computing platform (<xref ref-type="fig" rid="figure2">Figure 2</xref>).</p></sec><sec id="s2-4"><title>Study Cohort and Interventions</title><p>The deep learning&#x2013;based feedback model was deployed to a cohort of novice year 5 medical students receiving training in ultrasound imaging during the 2023-2024 academic year. The students received a 1-hour didactic lecture and a 3-hour face-to-face ultrasound training session with an experienced ultrasound instructor. Students were given access to individual ultrasound handheld devices for practice during a 6-week surgical rotation. Students were encouraged to practice ultrasound scanning with their peers and to submit renal ultrasound images to the online platform during the 6-week surgical rotation. Images captured during these practice sessions were uploaded to the online platform, where the model immediately analyzed the submitted images and provided instant feedback and grading. For suboptimal or incorrect images, the platform provided comments that were cross-referenced to the current teaching material on the e-learning platform (<xref ref-type="fig" rid="figure2">Figure 2</xref>). Accordingly, students were able to assess image quality and adjust their image scanning technique for subsequent scans. Students had free access to the platform, with no limit on the number of times they could access it or the number of images they could submit, to encourage use during training.</p></sec><sec id="s2-5"><title>Mixed Methods Analysis: Qualitative Analysis</title><p>Qualitative evaluation was conducted through focus group interviews with selected subgroups within the cohort to gain insight into the successes and challenges of implementing a deep learning&#x2013;based feedback model in ultrasound training. Two teaching associates were invited to attend the focus group interviews and took notes. These notes were subsequently summarized and circulated among the instructors and teaching associates to ensure crucial points were accurately captured. Thematic analysis was then performed to identify and analyze the pertinent points discussed during the focus group interviews.</p></sec><sec id="s2-6"><title>Mixed Methods Analysis: Quantitative Analysis</title><p>Students were invited to complete an 11-item questionnaire based on a 5-point Likert scale (<xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>). All items were selected from validated instruments, including the Objective Structured Assessment of Ultrasound Skills [<xref ref-type="bibr" rid="ref30">30</xref>], the System Usability Scale [<xref ref-type="bibr" rid="ref31">31</xref>], and the Client Satisfaction Questionnaire-8 [<xref ref-type="bibr" rid="ref32">32</xref>]. The questionnaire aimed to evaluate the model&#x2019;s effectiveness in supporting ultrasound training (items 1&#x2010;3), its usability (items 4 and 5), and students&#x2019; experiences interacting with the model (items 6&#x2010;11). Three experienced medical educators skilled in ultrasound teaching (PYN, MTHC, and EYPL) rated each questionnaire item for relevance to ultrasound learning. All items achieved an Item-Content Validity Index of 1, indicating high relevance to ultrasound education. For reliability testing, 30 medical students were invited to complete the questionnaire twice with a 1-week interval. Cronbach &#x03B1; was 0.965, and the intraclass correlation coefficient was 0.971. Both measures demonstrated high questionnaire reliability.</p></sec><sec id="s2-7"><title>Objective Structured Clinical Examination</title><p>At the end of the surgical rotation, an objective structured clinical examination (OSCE) was conducted, which included a station assessing renal ultrasound acquisition skills. Students were evaluated at the standardized OSCE station and instructed to perform POCUS on a healthy volunteer to demonstrate normal renal anatomy. The skills assessed included (1) patient preparation, (2) ultrasound probe selection, (3) probe handling, (4) a systematic approach to examination, and (5) the acquisition of an optimal sonographic image. All students were assessed by a surgical specialist experienced in performing POCUS examinations.</p><p>Mean OSCE scores from the 2 academic years preceding implementation of the deep learning&#x2013;based feedback model (2021&#x2010;2022 and 2022&#x2010;2023) were retrieved and compared with the mean OSCE score of this cohort of students receiving the deep learning&#x2013;based feedback.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Overview</title><p>The cohort of year 5 medical students (n=231) receiving ultrasound training was divided into 6 groups and enrolled between October 2023 and September 2024. A total of 786 renal ultrasound images were submitted to the platform for grading after exclusion of duplicate images. The mean number of images contributed per student was 3.40 (SD 3.32). The mean interval between the first submission and the OSCE was 15.6 (SD 6.04) days. Of the 786 images, 269 images were classified as optimal, 349 as suboptimal, and 168 as incorrect.</p></sec><sec id="s3-2"><title>Thematic Analysis in Focus Group Interviews</title><p>Within the cohort, 2 subgroups were invited for focus group interviews (n=71). Through the interviews conducted in this study, highly motivated students who wanted to be more involved in the project were identified, and these students provided valuable suggestions for model refinement and implementation. The thematic framework identified three major themes: the positive impact of deep learning&#x2013;based feedback, the challenges of implementation, and suggestions to enhance the feedback model.</p><sec id="s3-2-1"><title>Positive Impact</title><p>Participants generally agreed that the model motivated their learning and helped improved ultrasound skill acquisition. Students were satisfied with the availability of immediate feedback upon submitting images to the platform, which promoted and encouraged self-regulated learning. Representative participant quotations illustrating the positive impact of the model are provided below.</p><disp-quote><p>I can know the quality of my images in a short time.</p><attrib>Participant #16, female</attrib></disp-quote><disp-quote><p>The system instantly confirmed if my images were qualified.</p><attrib>Participant #25, male</attrib></disp-quote><disp-quote><p>Great that it can classify my images immediately.</p><attrib>Participant #42, male</attrib></disp-quote><disp-quote><p>It allows me to take more images during practice and upload them later.</p><attrib>Participant #45, male</attrib></disp-quote></sec><sec id="s3-2-2"><title>Challenges</title><p>Two major challenges were identified: curricular design and hardware provision. Students reported infrequent use of the handheld ultrasound device due to limited tutor guidance at the bedside, which reduced the incentive to practice newly acquired skills. In addition, students found the ultrasound curriculum overwhelming in terms of knowledge load and skills mastery. Usability issues with the handheld devices were also reported. Representative participant quotations illustrating these challenges are presented below.</p><disp-quote><p>The handheld ultrasound device is different from the one I learned in the lesson.</p><attrib>Participant #11, female</attrib></disp-quote><disp-quote><p>I cannot set-up the software of the handheld ultrasound device on my smartphone.</p><attrib>Participant #13, male</attrib></disp-quote><disp-quote><p>The battery life of the handheld ultrasound device is not long enough.</p><attrib>Participant #27, male</attrib></disp-quote><disp-quote><p>The handheld device needs to be set-up first and it is a bit complicated.</p><attrib>Participant #52, female</attrib></disp-quote></sec><sec id="s3-2-3"><title>Student Suggestions</title><p>Students suggested that integrating real-time feedback would enhance ease of use and potentially increase learning engagement. Representative participant statements are presented below.</p><disp-quote><p>Real-time feedback would be more convenient.</p><attrib>Participant #5, female</attrib></disp-quote><disp-quote><p>If it provides real-time feedback, then there is no need to take screenshots and upload images.</p><attrib>Participant #27, male</attrib></disp-quote></sec></sec><sec id="s3-3"><title>Questionnaire</title><p>A response rate of 42.4% was achieved, with 98 of 231 students completing the online questionnaire (<xref ref-type="table" rid="table1">Table 1</xref>).</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Questionnaire and results (n=98).</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Questionnaire item</td><td align="left" valign="bottom" colspan="5">Respondent scores, n (%)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">1</td><td align="left" valign="top">2</td><td align="left" valign="top">3</td><td align="left" valign="top">4</td><td align="left" valign="top">5</td></tr></thead><tbody><tr><td align="left" valign="top">Q1. The grading system assists me in familiarizing myself with the handheld device and its function.<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="top">0 (0)</td><td align="left" valign="top">12 (12)</td><td align="left" valign="top">50 (51)</td><td align="left" valign="top">28 (29)</td><td align="left" valign="top">8 (8)</td></tr><tr><td align="left" valign="top">Q2. The grading system assists me in optimizing image quality.<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup></td><td align="left" valign="top">0 (0)</td><td align="left" valign="top">25 (25)</td><td align="left" valign="top">42 (43)</td><td align="left" valign="top">27 (28)</td><td align="left" valign="top">4 (4)</td></tr><tr><td align="left" valign="top">Q3. The grading system assists me in presenting the renal image according to instruction.<sup><xref ref-type="table-fn" rid="table1fn3">c</xref></sup></td><td align="left" valign="top">1 (1)</td><td align="left" valign="top">15 (15)</td><td align="left" valign="top">35 (36)</td><td align="left" valign="top">43 (44)</td><td align="left" valign="top">4 (4)</td></tr><tr><td align="left" valign="top">Q4. I would like to use this system frequently.<sup><xref ref-type="table-fn" rid="table1fn4">d</xref></sup></td><td align="left" valign="top">3 (3)</td><td align="left" valign="top">7 (7)</td><td align="left" valign="top">34 (35)</td><td align="left" valign="top">44 (45)</td><td align="left" valign="top">10 (10)</td></tr><tr><td align="left" valign="top">Q5. The system was easy to use.<sup><xref ref-type="table-fn" rid="table1fn4">d</xref></sup></td><td align="left" valign="top">1 (1)</td><td align="left" valign="top">2 (2)</td><td align="left" valign="top">21 (21)</td><td align="left" valign="top">53 (54)</td><td align="left" valign="top">21 (21)</td></tr><tr><td align="left" valign="top">Q6. The system was consistent with its grading.<sup><xref ref-type="table-fn" rid="table1fn4">d</xref></sup></td><td align="left" valign="top">2 (2)</td><td align="left" valign="top">4 (4)</td><td align="left" valign="top">44 (45)</td><td align="left" valign="top">42 (43)</td><td align="left" valign="top">6 (6)</td></tr><tr><td align="left" valign="top">Q7. The system offered useful comments.<sup><xref ref-type="table-fn" rid="table1fn4">d</xref></sup></td><td align="left" valign="top">1 (1)</td><td align="left" valign="top">9 (9)</td><td align="left" valign="top">36 (37)</td><td align="left" valign="top">44 (45)</td><td align="left" valign="top">8 (8)</td></tr><tr><td align="left" valign="top">Q8. The system met my needs.<sup><xref ref-type="table-fn" rid="table1fn4">d</xref></sup></td><td align="left" valign="top">1 (1)</td><td align="left" valign="top">6 (6)</td><td align="left" valign="top">37 (38)</td><td align="left" valign="top">46 (47)</td><td align="left" valign="top">8 (8)</td></tr><tr><td align="left" valign="top">Q9. The system helped me learn and improve my ultrasound skills.<sup><xref ref-type="table-fn" rid="table1fn4">d</xref></sup></td><td align="left" valign="top">1 (1)</td><td align="left" valign="top">6 (6)</td><td align="left" valign="top">26 (27)</td><td align="left" valign="top">56 (57)</td><td align="left" valign="top">9 (9)</td></tr><tr><td align="left" valign="top">Q10. I will recommend this system to other peers.<sup><xref ref-type="table-fn" rid="table1fn4">d</xref></sup></td><td align="left" valign="top">1 (1)</td><td align="left" valign="top">5 (5)</td><td align="left" valign="top">32 (33)</td><td align="left" valign="top">51 (52)</td><td align="left" valign="top">9 (9)</td></tr><tr><td align="left" valign="top">Q11. I am satisfied with the system.<sup><xref ref-type="table-fn" rid="table1fn4">d</xref></sup></td><td align="left" valign="top">1 (1)</td><td align="left" valign="top">2 (2)</td><td align="left" valign="top">35 (36)</td><td align="left" valign="top">54 (55)</td><td align="left" valign="top">6 (6)</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>Question 1 response options: 1=unable to operate equipment, 2=limited ability to operate equipment, 3=operates with some experience, 4=confident in operating equipment, 5=familiar with operating equipment.</p></fn><fn id="table1fn2"><p><sup>b</sup>Question 2 response options: 1=unable to optimize, 2=limited ability to optimize, 3=competent but optimization inconsistently done, 4=confident in optimization with minor inconsistencies, 5=consistent optimization.</p></fn><fn id="table1fn3"><p><sup>c</sup>Question 3 response options: 1=unable to achieve, 2=occasionally achieve with difficulty, 3=partially achieve, 4=frequently achieve with some consistency, 5=consistently achieve.</p></fn><fn id="table1fn4"><p><sup>d</sup>Questions 4-11 response options: 1=strongly disagree, 2=disagree, 3=neutral, 4=agree, 5=strongly agree.</p></fn></table-wrap-foot></table-wrap><p>When evaluating the feedback model for assisting ultrasound skill acquisition (questions 1&#x2010;3), 32% (31/98) to 48% (47/98) of the respondents rated the items as 4 or higher, indicating that the model helped build confidence in acquiring new skills, including use of the handheld ultrasound device, image optimization, and image presentation according to instructions. A similar proportion of students (35/98, 36% to 50/98, 51%) rated questionnaire items 1 to 3 as 3, suggesting partial improvement in ultrasound acquisition skills through the feedback from the model (<xref ref-type="table" rid="table1">Table 1</xref>).</p><p>Among respondents, at least 55% (54/98) were satisfied with the usability of the model (scores &#x2265;4 for questions 4 and 5) and more than 49% (48/98) had positive experiences interacting with the model on the cloud platform (scores &#x2265;4 for questions 6&#x2010;11). However, 6% (6/98) of students rated the model as inconsistent in its grading (scores 1 or 2 for question 6) and 45% (44/98) were neutral (score 3 for question 6) on this aspect (<xref ref-type="table" rid="table1">Table 1</xref>).</p></sec><sec id="s3-4"><title>OSCE Score Comparison</title><p>The mean OSCE score in the current cohort was 9.73 (SD 0.76) out of 10, reflecting high performance in renal ultrasound acquisition skills. In the preceding 2 academic years, when a deep learning&#x2013;based feedback model was not used, mean OCSE scores were 9.35 (SD 1.03; <italic>P</italic>=.06) out of 10 in the 2021-2022 academic year and 9.45 (SD 0.97; <italic>P</italic>=.15) out of 10 in the 2022-2023 academic year. Although the mean score was higher in the current cohort, the difference was not statistically significant. This finding may reflect the model&#x2019;s role in sustaining students&#x2019; interest in renal ultrasound learning by providing continuous access to the platform and encouraging active engagement during the clinical placement.</p></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>This study developed and deployed a deep learning&#x2013;based feedback model to assist novice learners in mastering ultrasound acquisition skills. Artificial intelligence (AI) has already substantially influenced many aspects of society, including medical education [<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref34">34</xref>]. Several POCUS studies have explored the use of AI in educational design. Medical students using AI-based tools demonstrated improved performance in acquiring cardiac views on echocardiography [<xref ref-type="bibr" rid="ref35">35</xref>-<xref ref-type="bibr" rid="ref38">38</xref>]. Artificial intelligence has also enhanced novice performance in measuring left ventricular ejection fraction, achieving diagnostic accuracy comparable to that of cardiologists [<xref ref-type="bibr" rid="ref39">39</xref>]. The diagnostic performance of inexperienced medical residents or fellows in the evaluation of thyroid nodules was improved with an AI-based computer-assisted diagnostic system [<xref ref-type="bibr" rid="ref40">40</xref>]. Integration of AI into 3-dimensional and 4-dimensional ultrasound analysis has enhanced fetal facial profiling, contributing to education in prenatal diagnosis [<xref ref-type="bibr" rid="ref41">41</xref>]. Collectively, these studies highlight the importance of AI in ultrasound education. The findings of this study are consistent with this evidence, suggesting that a deep learning&#x2013;based feedback model can serve as an effective adjunct to ultrasound learning by providing automated feedback that supports students&#x2019; self-regulated learning.</p><p>The positive experiences students reported while interacting with the model were essential in sustaining continuous interest in learning ultrasound skills. According to the 4-phase model of interest development [<xref ref-type="bibr" rid="ref42">42</xref>], learners progress from triggered situational interest to maintained situational interest and eventually to emerging and well-developed individual interest. The initial face-to-face ultrasound training may have triggered situational interest, whereas the feedback model may have contributed to maintaining that interest, thereby allowing for individual interest to develop.</p><p>The deep learning&#x2013;generated grading was not intended as a final assessment of learning; rather, ambiguous images were encouraged to be reviewed and discussed with instructors during face-to-face sessions. Feedback model analytics (ie, the number of platform accesses and image submissions) indicated that students actively engaged with the model throughout the surgical rotation. These findings suggest that the feedback model promoted self-regulated learning and allowed students to develop ultrasound skills at their own pace [<xref ref-type="bibr" rid="ref18">18</xref>].</p><p>Based on feedback model analytics, students frequently submitted more than 1 renal ultrasound image to the platform for evaluation, which may indicate that these students perceived the model as helpful in supporting their learning. Overall, sustained engagement with the feedback model suggests that it contributed to fostering novice learners&#x2019; interest and motivation, and self-regulated their development of ultrasound skills.</p><p>Information gathered from the focus group interviews prompted further in-depth discussion among the course instructors and tutors to re-examine the current ultrasound curriculum. Options for streamlining the curriculum into more focused areas are being actively explored. The framework of load reduction [<xref ref-type="bibr" rid="ref43">43</xref>] may be applied through instructional strategies such as increasing scaffolding and progressively guiding learners toward independent mastery.</p><p>To promote student engagement, the benefits of adopting the deep learning&#x2013;based feedback model and its role in the broader ultrasound curriculum can be communicated to students. Course leaders may also develop reflective tasks or prompts based on principles of self-regulated learning [<xref ref-type="bibr" rid="ref23">23</xref>] to guide students in reflecting on their learning (ie, self-efficacy, self-monitoring, self-evaluation, adaptive changes) [<xref ref-type="bibr" rid="ref44">44</xref>]. These reflections can span both face-to-face ultrasound training and interactions with the deep learning&#x2013;based feedback model during the 6-week surgical rotation. Such integrations may scaffold students&#x2019; reflection on their ultrasound learning, help close the feedback loop, and encourage feedforward learning [<xref ref-type="bibr" rid="ref45">45</xref>]. This refinement to the curriculum structure may enhance self-regulated learning and provide a sustainable, iterative process for students to develop their ultrasound skills through integration of the deep learning&#x2013;based feedback model.</p><p>Furthermore, interview findings indicated a need for enhanced tutor guidance. In addition to scaling-up the recruitment of ultrasound instructors and strengthening local training support, a midrotation tutorial or workshop may provide support to students who may be struggling. This approach may enable them to make more effective use of the feedback model and feel empowered to practice their ultrasound skills more during the rotation. Integrating near-peer feedback during the midrotation tutorial may further support struggling students and help bridge the gaps between intensive face-to-face sessions [<xref ref-type="bibr" rid="ref46">46</xref>].</p><p>As noted from the focus group interviews, full realization of the intervention&#x2019;s benefits would likely require real-time integration of the model into the hardware device. Real-time integration could address the need for immediate guidance while also providing instructional scaffolding. However, such development would be more resource intensive, requiring real-time image tracking and continuous feedback from the deep learning&#x2013;based model. With further research, clinical validation, and implementation, this type of AI application could potentially be adopted in POCUS training and possibly incorporated into the OSCE.</p><p>There is also room for further improvement such as incorporating highlighted contours or bounding boxes to delineate the kidney and associated artifacts. This could be achieved by integrating additional deep learning&#x2013;based segmentation or object detection models.</p></sec><sec id="s4-2"><title>Limitations</title><p>Although the performance of the trained cascaded network was satisfactory for the classification task, there remains room for improvement in its accuracy and efficiency. We are currently prospectively collecting ultrasound data submitted by students to enable continuous model training and refinement, in order to enhance the model&#x2019;s accuracy and relevance. Second, the response rate to the questionnaire was low, introducing a risk of sampling bias [<xref ref-type="bibr" rid="ref47">47</xref>]. Despite numerous reminders and encouragement from instructors, students who participated in the questionnaire were likely highly motivated and may have been more proficient in self-regulated learning. Future studies that include a broader range of students would provide a clearer understanding of how the feedback model supports both high- and low-achieving learners in developing ultrasound skills. Third, the lack of a preintervention questionnaire limits the strength of inferences regarding the impact of the deep learning&#x2013;based model on ultrasound learning and precludes detailed assessment of change. However, given the low postintervention response rate, adding a preintervention questionnaire might have increased the risk of attrition bias.</p></sec><sec id="s4-3"><title>Conclusion</title><p>A cascaded renal ultrasound image feedback model was successfully developed and deployed, personalizing the learning experience in medical education and providing on-demand feedback. It was well received by students and supported self-regulated learning. The innovation enhanced student engagement and improved ultrasound skill acquisition among novice learners.</p></sec></sec></body><back><ack><p>No generative artificial intelligence was used in the preparation of this manuscript.</p></ack><notes><sec><title>Funding</title><p>This research was funded by the 2023 Teaching Development Grant, University of Hong Kong.</p></sec><sec><title>Data Availability</title><p>The datasets generated or analyzed during this study are not publicly available due to patient and student privacy. Data supporting the study findings can be found in the translated quotations in the Results section.</p></sec></notes><fn-group><fn fn-type="con"><p>Co-first authors: ACNH (equal), RS (equal)</p><p>Co-corresponding authors: EYPL (equal; email: eyplee77@hku.hk), MTHC (equal; email: mcth@hku.hk), GTCW (equal; email: gordon@hku.hk)</p><p>Conceptualization: EYPL</p><p>Data curation: ACNH (equal), RS (equal)</p><p>Formal analysis: ACNH (equal), RS (equal), EYPL (equal)</p><p>Funding acquisition: EYPL (equal), PC (equal), VV (equal), PYN (equal), GTCW (equal), MTHC (equal)</p><p>Investigation: ACNH (equal), RS (equal), EYPL (equal)</p><p>Methodology: ACNH (equal), RS (equal), EYPL (equal), EAB (equal)</p><p>Project administration: ACNH</p><p>Resources: EYPL (lead), ACNH (supporting), RS (supporting)</p><p>Software: RS</p><p>Supervision: EYPL</p><p>Validation: ACNH (equal), RS (equal), EYPL (equal)</p><p>Visualization: ACNH (equal), RS (equal)</p><p>Writing &#x2013; original draft: ACNH</p><p>Writing &#x2013; review &#x0026; editing: ACNH (equal), RS (equal), EYPL (equal), PC (supporting), VV (supporting), PYN (supporting), GTCW (supporting), MTHC (supporting), EAB (supporting)</p></fn><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">CNN</term><def><p>convolutional neural network</p></def></def-item><def-item><term id="abb3">OSCE</term><def><p>objective structured clinical examination</p></def></def-item><def-item><term id="abb4">POCUS</term><def><p>point-of-care ultrasound</p></def></def-item><def-item><term id="abb5">UME</term><def><p>undergraduate medical education</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Smallwood</surname><given-names>N</given-names> </name><name name-style="western"><surname>Dachsel</surname><given-names>M</given-names> </name></person-group><article-title>Point-of-care ultrasound (POCUS): unnecessary gadgetry or evidence-based medicine?</article-title><source>Clin Med (Northfield)</source><year>2018</year><month>06</month><volume>18</volume><issue>3</issue><fpage>219</fpage><lpage>224</lpage><pub-id pub-id-type="doi">10.7861/clinmedicine.18-3-219</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Archer</surname><given-names>J</given-names> </name><name name-style="western"><surname>Beck</surname><given-names>S</given-names> </name></person-group><article-title>Accuracy and clinical use of biliary point-of-care ultrasound: a retrospective cohort study</article-title><source>Emerg Med Australas</source><year>2023</year><month>04</month><volume>35</volume><issue>2</issue><fpage>218</fpage><lpage>224</lpage><pub-id pub-id-type="doi">10.1111/1742-6723.14099</pub-id><pub-id pub-id-type="medline">36196041</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yoshida</surname><given-names>T</given-names> </name><name name-style="western"><surname>Yoshida</surname><given-names>T</given-names> </name><name name-style="western"><surname>Noma</surname><given-names>H</given-names> </name><name name-style="western"><surname>Nomura</surname><given-names>T</given-names> </name><name name-style="western"><surname>Suzuki</surname><given-names>A</given-names> </name><name name-style="western"><surname>Mihara</surname><given-names>T</given-names> </name></person-group><article-title>Diagnostic accuracy of point-of-care ultrasound for shock: a systematic review and meta-analysis</article-title><source>Crit Care</source><year>2023</year><month>05</month><day>25</day><volume>27</volume><issue>1</issue><fpage>200</fpage><pub-id pub-id-type="doi">10.1186/s13054-023-04495-6</pub-id><pub-id pub-id-type="medline">37231510</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nicholas</surname><given-names>E</given-names> </name><name name-style="western"><surname>Ly</surname><given-names>AA</given-names> </name><name name-style="western"><surname>Prince</surname><given-names>AM</given-names> </name><name name-style="western"><surname>Klawitter</surname><given-names>PF</given-names> </name><name name-style="western"><surname>Gaskin</surname><given-names>K</given-names> </name><name name-style="western"><surname>Prince</surname><given-names>LA</given-names> </name></person-group><article-title>The current status of ultrasound education in United States medical schools</article-title><source>J Ultrasound Med</source><year>2021</year><month>11</month><volume>40</volume><issue>11</issue><fpage>2459</fpage><lpage>2465</lpage><pub-id pub-id-type="doi">10.1002/jum.15633</pub-id><pub-id pub-id-type="medline">33448471</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wong</surname><given-names>CK</given-names> </name><name name-style="western"><surname>Hai</surname><given-names>J</given-names> </name><name name-style="western"><surname>Chan</surname><given-names>KYE</given-names> </name><etal/></person-group><article-title>Point-of-care ultrasound augments physical examination learning by undergraduate medical students</article-title><source>Postgrad Med J</source><year>2021</year><month>01</month><day>1</day><volume>97</volume><issue>1143</issue><fpage>10</fpage><lpage>15</lpage><pub-id pub-id-type="doi">10.1136/postgradmedj-2020-137773</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cheung</surname><given-names>ACK</given-names> </name><name name-style="western"><surname>Ng</surname><given-names>PY</given-names> </name><name name-style="western"><surname>Lam</surname><given-names>RPK</given-names> </name><name name-style="western"><surname>Wong</surname><given-names>GTC</given-names> </name></person-group><article-title>Cross-specialty point-of-care ultrasound education in The University of Hong Kong</article-title><source>Hong Kong Med J</source><year>2024</year><month>06</month><volume>30</volume><issue>3</issue><fpage>255</fpage><pub-id pub-id-type="doi">10.12809/hkmj2411666</pub-id><pub-id pub-id-type="medline">38831748</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Russell</surname><given-names>FM</given-names> </name><name name-style="western"><surname>Zakeri</surname><given-names>B</given-names> </name><name name-style="western"><surname>Herbert</surname><given-names>A</given-names> </name><name name-style="western"><surname>Ferre</surname><given-names>RM</given-names> </name><name name-style="western"><surname>Leiser</surname><given-names>A</given-names> </name><name name-style="western"><surname>Wallach</surname><given-names>PM</given-names> </name></person-group><article-title>The state of point-of-care ultrasound training in undergraduate medical education: findings from a national survey</article-title><source>Acad Med</source><year>2022</year><month>05</month><day>1</day><volume>97</volume><issue>5</issue><fpage>723</fpage><lpage>727</lpage><pub-id pub-id-type="doi">10.1097/ACM.0000000000004512</pub-id><pub-id pub-id-type="medline">34789665</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Breunig</surname><given-names>M</given-names> </name><name name-style="western"><surname>Hanson</surname><given-names>A</given-names> </name><name name-style="western"><surname>Huckabee</surname><given-names>M</given-names> </name></person-group><article-title>Learning curves for point-of-care ultrasound image acquisition for novice learners in a longitudinal curriculum</article-title><source>Ultrasound J</source><year>2023</year><month>07</month><day>5</day><volume>15</volume><issue>1</issue><fpage>31</fpage><pub-id pub-id-type="doi">10.1186/s13089-023-00329-2</pub-id><pub-id pub-id-type="medline">37402989</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Moga</surname><given-names>T</given-names> </name><name name-style="western"><surname>Dancu</surname><given-names>GM</given-names> </name><name name-style="western"><surname>Cotrau</surname><given-names>R</given-names> </name><etal/></person-group><article-title>Learning curves in abdominal ultrasound in medical students</article-title><source>Med Ultrason</source><year>2024</year><month>03</month><day>27</day><volume>26</volume><issue>1</issue><fpage>21</fpage><lpage>25</lpage><pub-id pub-id-type="doi">10.11152/mu-4235</pub-id><pub-id pub-id-type="medline">38150697</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Alerhand</surname><given-names>S</given-names> </name><name name-style="western"><surname>Choi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Ostrovsky</surname><given-names>I</given-names> </name><etal/></person-group><article-title>Integrating basic and clinical sciences using point-of-care renal ultrasound for preclerkship education</article-title><source>MedEdPORTAL</source><year>2020</year><month>12</month><day>9</day><volume>16</volume><fpage>11037</fpage><pub-id pub-id-type="doi">10.15766/mep_2374-8265.11037</pub-id><pub-id pub-id-type="medline">33324747</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Burgess</surname><given-names>A</given-names> </name><name name-style="western"><surname>van Diggele</surname><given-names>C</given-names> </name><name name-style="western"><surname>Roberts</surname><given-names>C</given-names> </name><name name-style="western"><surname>Mellis</surname><given-names>C</given-names> </name></person-group><article-title>Feedback in the clinical setting</article-title><source>BMC Med Educ</source><year>2020</year><month>12</month><day>3</day><volume>20</volume><issue>Suppl 2</issue><fpage>460</fpage><pub-id pub-id-type="doi">10.1186/s12909-020-02280-5</pub-id><pub-id pub-id-type="medline">33272265</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Carless</surname><given-names>D</given-names> </name></person-group><source>Excellence in University Assessment: Learning from Award-Winning Practice</source><year>2015</year><publisher-name>Routledge</publisher-name><pub-id pub-id-type="other">9781315740621</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tai</surname><given-names>J</given-names> </name><name name-style="western"><surname>Ajjawi</surname><given-names>R</given-names> </name><name name-style="western"><surname>Boud</surname><given-names>D</given-names> </name><name name-style="western"><surname>Dawson</surname><given-names>P</given-names> </name><name name-style="western"><surname>Panadero</surname><given-names>E</given-names> </name></person-group><article-title>Developing evaluative judgement: enabling students to make decisions about the quality of work</article-title><source>High Educ</source><year>2018</year><month>09</month><volume>76</volume><issue>3</issue><fpage>467</fpage><lpage>481</lpage><pub-id pub-id-type="doi">10.1007/s10734-017-0220-3</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bienstock</surname><given-names>JL</given-names> </name><name name-style="western"><surname>Katz</surname><given-names>NT</given-names> </name><name name-style="western"><surname>Cox</surname><given-names>SM</given-names> </name><etal/></person-group><article-title>To the point: medical education reviews--providing feedback</article-title><source>Am J Obstet Gynecol</source><year>2007</year><month>06</month><volume>196</volume><issue>6</issue><fpage>508</fpage><lpage>513</lpage><pub-id pub-id-type="doi">10.1016/j.ajog.2006.08.021</pub-id><pub-id pub-id-type="medline">17547874</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Reddy</surname><given-names>ST</given-names> </name><name name-style="western"><surname>Zegarek</surname><given-names>MH</given-names> </name><name name-style="western"><surname>Fromme</surname><given-names>HB</given-names> </name><name name-style="western"><surname>Ryan</surname><given-names>MS</given-names> </name><name name-style="western"><surname>Schumann</surname><given-names>SA</given-names> </name><name name-style="western"><surname>Harris</surname><given-names>IB</given-names> </name></person-group><article-title>Barriers and facilitators to effective feedback: a qualitative analysis of data from multispecialty resident focus groups</article-title><source>J Grad Med Educ</source><year>2015</year><month>06</month><volume>7</volume><issue>2</issue><fpage>214</fpage><lpage>219</lpage><pub-id pub-id-type="doi">10.4300/JGME-D-14-00461.1</pub-id><pub-id pub-id-type="medline">26221437</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Henderson</surname><given-names>M</given-names> </name><name name-style="western"><surname>Phillips</surname><given-names>M</given-names> </name><name name-style="western"><surname>Ryan</surname><given-names>T</given-names> </name><etal/></person-group><article-title>Conditions that enable effective feedback</article-title><source>High Educ Res Dev</source><year>2019</year><month>11</month><day>10</day><volume>38</volume><issue>7</issue><fpage>1401</fpage><lpage>1416</lpage><pub-id pub-id-type="doi">10.1080/07294360.2019.1657807</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Henderson</surname><given-names>M</given-names> </name><name name-style="western"><surname>Ryan</surname><given-names>T</given-names> </name><name name-style="western"><surname>Phillips</surname><given-names>M</given-names> </name></person-group><article-title>The challenges of feedback in higher education</article-title><source>Assess Eval High Educ</source><year>2019</year><month>11</month><day>17</day><volume>44</volume><issue>8</issue><fpage>1237</fpage><lpage>1252</lpage><pub-id pub-id-type="doi">10.1080/02602938.2019.1599815</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fuller</surname><given-names>R</given-names> </name><name name-style="western"><surname>Goddard</surname><given-names>VCT</given-names> </name><name name-style="western"><surname>Nadarajah</surname><given-names>VD</given-names> </name><etal/></person-group><article-title>Technology enhanced assessment: Ottawa consensus statement and recommendations</article-title><source>Med Teach</source><year>2022</year><month>08</month><volume>44</volume><issue>8</issue><fpage>836</fpage><lpage>850</lpage><pub-id pub-id-type="doi">10.1080/0142159X.2022.2083489</pub-id><pub-id pub-id-type="medline">35771684</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cutrer</surname><given-names>WB</given-names> </name><name name-style="western"><surname>Miller</surname><given-names>B</given-names> </name><name name-style="western"><surname>Pusic</surname><given-names>MV</given-names> </name><etal/></person-group><article-title>Fostering the development of master adaptive learners: a conceptual model to guide skill acquisition in medical education</article-title><source>Acad Med</source><year>2017</year><volume>92</volume><issue>1</issue><fpage>70</fpage><lpage>75</lpage><pub-id pub-id-type="doi">10.1097/ACM.0000000000001323</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pickering</surname><given-names>JD</given-names> </name></person-group><article-title>Anatomy drawing screencasts: enabling flexible learning for medical students</article-title><source>Anat Sci Educ</source><year>2015</year><volume>8</volume><issue>3</issue><fpage>249</fpage><lpage>257</lpage><pub-id pub-id-type="doi">10.1002/ase.1480</pub-id><pub-id pub-id-type="medline">25091417</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fitzgerald</surname><given-names>DA</given-names> </name><name name-style="western"><surname>Scott</surname><given-names>KM</given-names> </name><name name-style="western"><surname>Ryan</surname><given-names>MS</given-names> </name></person-group><article-title>Blended and e-learning in pediatric education: harnessing lessons learned from the COVID-19 pandemic</article-title><source>Eur J Pediatr</source><year>2022</year><month>02</month><volume>181</volume><issue>2</issue><fpage>447</fpage><lpage>452</lpage><pub-id pub-id-type="doi">10.1007/s00431-021-04149-1</pub-id><pub-id pub-id-type="medline">34322730</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Challis</surname><given-names>M</given-names> </name></person-group><article-title>AMEE medical education guide no. 19: personal learning plans</article-title><source>Med Teach</source><year>2000</year><month>01</month><volume>22</volume><issue>3</issue><fpage>225</fpage><lpage>236</lpage><pub-id pub-id-type="doi">10.1080/01421590050006160</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zimmerman</surname><given-names>BJ</given-names> </name></person-group><article-title>Becoming a self-regulated learner: an overview</article-title><source>Theory Pract</source><year>2002</year><month>05</month><volume>41</volume><issue>2</issue><fpage>64</fpage><lpage>70</lpage><pub-id pub-id-type="doi">10.1207/s15430421tip4102_2</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Panadero</surname><given-names>E</given-names> </name></person-group><article-title>A review of self-regulated learning: six models and four directions for research</article-title><source>Front Psychol</source><year>2017</year><volume>8</volume><issue>422</issue><fpage>422</fpage><pub-id pub-id-type="doi">10.3389/fpsyg.2017.00422</pub-id><pub-id pub-id-type="medline">28503157</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Krizhevsky</surname><given-names>A</given-names> </name><name name-style="western"><surname>Sutskever</surname><given-names>I</given-names> </name><name name-style="western"><surname>Hinton</surname><given-names>GE</given-names> </name></person-group><article-title>ImageNet classification with deep convolutional neural networks</article-title><source>Commun ACM</source><year>2017</year><month>05</month><day>24</day><volume>60</volume><issue>6</issue><fpage>84</fpage><lpage>90</lpage><pub-id pub-id-type="doi">10.1145/3065386</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>He</surname><given-names>K</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>X</given-names> </name><name name-style="western"><surname>Ren</surname><given-names>S</given-names> </name><name name-style="western"><surname>Sun</surname><given-names>J</given-names> </name></person-group><article-title>Deep residual learning for image recognition</article-title><conf-name>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</conf-name><conf-date>Jun 27-30, 2016</conf-date><pub-id pub-id-type="doi">10.1109/CVPR.2016.90</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Hu</surname><given-names>J</given-names> </name><name name-style="western"><surname>Shen</surname><given-names>L</given-names> </name><name name-style="western"><surname>Sun</surname><given-names>G</given-names> </name></person-group><article-title>Squeeze-and-excitation networks</article-title><conf-name>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</conf-name><conf-date>Jun 18-23, 2018</conf-date><pub-id pub-id-type="doi">10.1109/CVPR.2018.00745</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tan</surname><given-names>M</given-names> </name><name name-style="western"><surname>Le</surname><given-names>QV</given-names> </name></person-group><article-title>EfficientNet: rethinking model scaling for convolutional neural networks</article-title><source>Proc Machine Learning Res</source><year>2019</year><access-date>2026-03-05</access-date><volume>97</volume><fpage>6105</fpage><lpage>6114</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://proceedings.mlr.press/v97/tan19a.html?ref=ji">https://proceedings.mlr.press/v97/tan19a.html?ref=ji</ext-link></comment></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Deng</surname><given-names>J</given-names> </name><name name-style="western"><surname>Dong</surname><given-names>W</given-names> </name><name name-style="western"><surname>Socher</surname><given-names>R</given-names> </name><name name-style="western"><surname>Li</surname><given-names>LJ</given-names> </name></person-group><article-title>ImageNet: a large-scale hierarchical image database</article-title><conf-name>2009 IEEE Conference on Computer Vision and Pattern Recognition</conf-name><conf-date>Jun 20-25, 2009</conf-date><pub-id pub-id-type="doi">10.1109/CVPR.2009.5206848</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tolsgaard</surname><given-names>MG</given-names> </name><name name-style="western"><surname>Todsen</surname><given-names>T</given-names> </name><name name-style="western"><surname>Sorensen</surname><given-names>JL</given-names> </name><etal/></person-group><article-title>International multispecialty consensus on how to evaluate ultrasound competence: a Delphi consensus survey</article-title><source>PLoS One</source><year>2013</year><volume>8</volume><issue>2</issue><fpage>e57687</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0057687</pub-id><pub-id pub-id-type="medline">23469051</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Brooke J.</surname><given-names>SUS</given-names> </name></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Jordan</surname><given-names>PW</given-names> </name><name name-style="western"><surname>Thomas</surname><given-names>B</given-names> </name><name name-style="western"><surname>McClelland</surname><given-names>IL</given-names> </name><name name-style="western"><surname>Weerdmeester</surname><given-names>B</given-names> </name></person-group><article-title>SUS &#x2013; a quick and dirty usability scale</article-title><source>Usability Evaluation in Industry</source><year>1996</year><edition>1</edition><fpage>189</fpage><lpage>194</lpage><pub-id pub-id-type="other">978-0748404605</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Larsen</surname><given-names>DL</given-names> </name><name name-style="western"><surname>Attkisson</surname><given-names>CC</given-names> </name><name name-style="western"><surname>Hargreaves</surname><given-names>WA</given-names> </name><name name-style="western"><surname>Nguyen</surname><given-names>TD</given-names> </name></person-group><article-title>Assessment of client/patient satisfaction: development of a general scale</article-title><source>Eval Program Plann</source><year>1979</year><volume>2</volume><issue>3</issue><fpage>197</fpage><lpage>207</lpage><pub-id pub-id-type="doi">10.1016/0149-7189(79)90094-6</pub-id><pub-id pub-id-type="medline">10245370</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tolsgaard</surname><given-names>MG</given-names> </name><name name-style="western"><surname>Pusic</surname><given-names>MV</given-names> </name><name name-style="western"><surname>Sebok-Syer</surname><given-names>SS</given-names> </name><etal/></person-group><article-title>The fundamentals of artificial intelligence in medical education research: AMEE Guide No. 156</article-title><source>Med Teach</source><year>2023</year><month>06</month><volume>45</volume><issue>6</issue><fpage>565</fpage><lpage>573</lpage><pub-id pub-id-type="doi">10.1080/0142159X.2023.2180340</pub-id><pub-id pub-id-type="medline">36862064</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gordon</surname><given-names>M</given-names> </name><name name-style="western"><surname>Daniel</surname><given-names>M</given-names> </name><name name-style="western"><surname>Ajiboye</surname><given-names>A</given-names> </name><etal/></person-group><article-title>A scoping review of artificial intelligence in medical education: BEME guide no. 84</article-title><source>Med Teach</source><year>2024</year><month>04</month><volume>46</volume><issue>4</issue><fpage>446</fpage><lpage>470</lpage><pub-id pub-id-type="doi">10.1080/0142159X.2024.2314198</pub-id><pub-id pub-id-type="medline">38423127</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gohar</surname><given-names>E</given-names> </name><name name-style="western"><surname>Herling</surname><given-names>A</given-names> </name><name name-style="western"><surname>Mazuz</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Artificial intelligence (AI) versus POCUS expert: a validation study of three automatic AI-based, real-time, hemodynamic echocardiographic assessment tools</article-title><source>J Clin Med</source><year>2023</year><month>02</month><day>8</day><volume>12</volume><issue>4</issue><fpage>36835888</fpage><pub-id pub-id-type="doi">10.3390/jcm12041352</pub-id><pub-id pub-id-type="medline">36835888</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Aronovitz</surname><given-names>N</given-names> </name><name name-style="western"><surname>Hazan</surname><given-names>I</given-names> </name><name name-style="western"><surname>Jedwab</surname><given-names>R</given-names> </name><etal/></person-group><article-title>The effect of real-time EF automatic tool on cardiac ultrasound performance among medical students</article-title><source>PLoS One</source><year>2024</year><volume>19</volume><issue>3</issue><fpage>e0299461</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0299461</pub-id><pub-id pub-id-type="medline">38547257</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Baum</surname><given-names>E</given-names> </name><name name-style="western"><surname>Tandel</surname><given-names>MD</given-names> </name><name name-style="western"><surname>Ren</surname><given-names>C</given-names> </name><etal/></person-group><article-title>Acquisition of cardiac point-of-care ultrasound images with deep learning: a randomized trial for educational outcomes with novices</article-title><source>Chest Pulmonary</source><year>2023</year><month>09</month><day>23</day><volume>1</volume><issue>3</issue><fpage>100023</fpage><pub-id pub-id-type="doi">10.1016/j.chpulm.2023.100023</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Soliman-Aboumarie</surname><given-names>H</given-names> </name><name name-style="western"><surname>Geers</surname><given-names>J</given-names> </name><name name-style="western"><surname>Lowcock</surname><given-names>D</given-names> </name><etal/></person-group><article-title>Artificial intelligence-assisted focused cardiac ultrasound training: a survey among undergraduate medical students</article-title><source>Ultrasound</source><year>2025</year><month>05</month><volume>33</volume><issue>2</issue><fpage>123</fpage><lpage>128</lpage><pub-id pub-id-type="doi">10.1177/1742271X241287923</pub-id><pub-id pub-id-type="medline">39555149</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dadon</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Orlev</surname><given-names>A</given-names> </name><name name-style="western"><surname>Butnaru</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Empowering medical students: harnessing artificial intelligence for precision point-of-care echocardiography assessment of left ventricular ejection fraction</article-title><source>Int J Clin Pract</source><year>2023</year><fpage>5225872</fpage><pub-id pub-id-type="doi">10.1155/2023/5225872</pub-id><pub-id pub-id-type="medline">38078051</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lee</surname><given-names>SE</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>HJ</given-names> </name><name name-style="western"><surname>Jung</surname><given-names>HK</given-names> </name><etal/></person-group><article-title>Improving the diagnostic performance of inexperienced readers for thyroid nodules through digital self-learning and artificial intelligence assistance</article-title><source>Front Endocrinol (Lausanne)</source><year>2024</year><volume>15</volume><fpage>1372397</fpage><pub-id pub-id-type="doi">10.3389/fendo.2024.1372397</pub-id><pub-id pub-id-type="medline">39015174</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bachnas</surname><given-names>MA</given-names> </name><name name-style="western"><surname>Andonotopo</surname><given-names>W</given-names> </name><name name-style="western"><surname>Dewantiningrum</surname><given-names>J</given-names> </name><name name-style="western"><surname>Adi Pramono</surname><given-names>MB</given-names> </name><name name-style="western"><surname>Stanojevic</surname><given-names>M</given-names> </name><name name-style="western"><surname>Kurjak</surname><given-names>A</given-names> </name></person-group><article-title>The utilization of artificial intelligence in enhancing 3D/4D ultrasound analysis of fetal facial profiles</article-title><source>J Perinat Med</source><year>2024</year><month>11</month><day>26</day><volume>52</volume><issue>9</issue><fpage>899</fpage><lpage>913</lpage><pub-id pub-id-type="doi">10.1515/jpm-2024-0347</pub-id><pub-id pub-id-type="medline">39383043</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hidi</surname><given-names>S</given-names> </name><name name-style="western"><surname>Renninger</surname><given-names>KA</given-names> </name></person-group><article-title>The four-phase model of interest development</article-title><source>Educ Psychol</source><year>2006</year><month>06</month><volume>41</volume><issue>2</issue><fpage>111</fpage><lpage>127</lpage><pub-id pub-id-type="doi">10.1207/s15326985ep4102_4</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Martin</surname><given-names>AJ</given-names> </name><name name-style="western"><surname>Evans</surname><given-names>P</given-names> </name></person-group><article-title>Load reduction instruction: exploring a framework that assesses explicit instruction through to independent learning</article-title><source>Teach Teach Educ</source><year>2018</year><month>07</month><volume>73</volume><fpage>203</fpage><lpage>214</lpage><pub-id pub-id-type="doi">10.1016/j.tate.2018.03.018</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Leggett</surname><given-names>H</given-names> </name><name name-style="western"><surname>Sandars</surname><given-names>J</given-names> </name><name name-style="western"><surname>Roberts</surname><given-names>T</given-names> </name></person-group><article-title>Twelve tips on how to provide self-regulated learning (SRL) enhanced feedback on clinical performance</article-title><source>Med Teach</source><year>2019</year><month>02</month><volume>41</volume><issue>2</issue><fpage>147</fpage><lpage>151</lpage><pub-id pub-id-type="doi">10.1080/0142159X.2017.1407868</pub-id><pub-id pub-id-type="medline">29228830</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fuentes-Cimma</surname><given-names>J</given-names> </name><name name-style="western"><surname>Sluijsmans</surname><given-names>D</given-names> </name><name name-style="western"><surname>Riquelme</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Designing feedback processes in the workplace-based learning of undergraduate health professions education: a scoping review</article-title><source>BMC Med Educ</source><year>2024</year><month>04</month><day>23</day><volume>24</volume><issue>1</issue><fpage>440</fpage><pub-id pub-id-type="doi">10.1186/s12909-024-05439-6</pub-id><pub-id pub-id-type="medline">38654360</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sader</surname><given-names>J</given-names> </name><name name-style="western"><surname>Cerutti</surname><given-names>B</given-names> </name><name name-style="western"><surname>Meynard</surname><given-names>L</given-names> </name><etal/></person-group><article-title>The pedagogical value of near-peer feedback in online OSCEs</article-title><source>BMC Med Educ</source><year>2022</year><month>07</month><day>25</day><volume>22</volume><issue>1</issue><fpage>572</fpage><pub-id pub-id-type="doi">10.1186/s12909-022-03629-8</pub-id><pub-id pub-id-type="medline">35879752</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Spooner</surname><given-names>M</given-names> </name><name name-style="western"><surname>Pawlikowska</surname><given-names>T</given-names> </name></person-group><article-title>Feedback literacy as a model to explore how learners respond to feedback</article-title><source>Br J Hosp Med</source><year>2023</year><month>07</month><day>2</day><volume>84</volume><issue>7</issue><fpage>1</fpage><lpage>9</lpage><pub-id pub-id-type="doi">10.12968/hmed.2022.0446</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Demonstration video of the cascaded renal ultrasound image grading system.</p><media xlink:href="mededu_v12i1e72110_app1.mp4" xlink:title="MP4 File, 22456 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Sample questionnaire.</p><media xlink:href="mededu_v12i1e72110_app2.docx" xlink:title="DOCX File, 19 KB"/></supplementary-material></app-group></back></article>