<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Med Educ</journal-id><journal-id journal-id-type="publisher-id">mededu</journal-id><journal-id journal-id-type="index">20</journal-id><journal-title>JMIR Medical Education</journal-title><abbrev-journal-title>JMIR Med Educ</abbrev-journal-title><issn pub-type="epub">2369-3762</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v12i1e84091</article-id><article-id pub-id-type="doi">10.2196/84091</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>An AI-Driven Virtual Patient Platform (CBT Trainer) for Training Cognitive Behavioral Therapy Practitioners Against Competencies: Mixed Methods Pilot Study</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Zhang</surname><given-names>Tianyu Terry</given-names></name><degrees>MSc, MPS</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Saunders</surname><given-names>Rob</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Pilling</surname><given-names>Stephen</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>O'Driscoll</surname><given-names>Ciar&#x00E1;n</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib></contrib-group><aff id="aff1"><institution>Research Department of Clinical, Educational &#x0026; Health Psychology, Centre for Outcomes Research and Effectiveness (CORE), University College London</institution><addr-line>1-19 Torrington Place</addr-line><addr-line>London</addr-line><country>United Kingdom</country></aff><aff id="aff2"><institution>Camden and Islington NHS Foundation Trust</institution><addr-line>London</addr-line><country>United Kingdom</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Tsuei</surname><given-names>Sian</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Churchard</surname><given-names>Alasdair</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Presley</surname><given-names>Vickie L</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Tianyu Terry Zhang, MSc, MPS, Research Department of Clinical, Educational &#x0026; Health Psychology, Centre for Outcomes Research and Effectiveness (CORE), University College London, 1-19 Torrington Place, London, WC1E 7HB, United Kingdom, 44 07398556250; <email>tianyu.zhang.20@ucl.ac.uk</email></corresp></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>6</day><month>3</month><year>2026</year></pub-date><volume>12</volume><elocation-id>e84091</elocation-id><history><date date-type="received"><day>22</day><month>09</month><year>2025</year></date><date date-type="accepted"><day>30</day><month>01</month><year>2026</year></date></history><copyright-statement>&#x00A9; Tianyu Terry Zhang, Rob Saunders, Stephen Pilling, Ciar&#x00E1;n O'Driscoll. Originally published in JMIR Medical Education (<ext-link ext-link-type="uri" xlink:href="https://mededu.jmir.org">https://mededu.jmir.org</ext-link>), 6.3.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Medical Education, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://mededu.jmir.org/">https://mededu.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://mededu.jmir.org/2026/1/e84091"/><abstract><sec><title>Background</title><p>Cognitive behavioral therapy (CBT) training faces significant challenges, including supervised practice with diverse cases, inconsistent feedback, resource-intensive supervision, and difficulties standardizing competence assessment.</p></sec><sec><title>Objective</title><p>This study evaluated the acceptability and feasibility of CBT Trainer (TTZ), the first virtual patient platform to provide real-time feedback aligned with established competence frameworks. The mobile app trains psychological practitioners using standardized artificial intelligence patient interactions and the evaluation of therapist responses against competence frameworks to enable structured skill development in a controlled, repeatable environment that complements traditional training methods.</p></sec><sec sec-type="methods"><title>Methods</title><p>This mixed methods pilot study used a 2-stage approach. Stage 1 involved usability testing with 4 participants. Stage 2 included 59 participants from psychological practitioner training programs (a Low Intensity CBT Interventions Program and a Doctorate in Clinical Psychology) who engaged with the CBT Trainer voluntarily for over 1 month. Measures of impact included the System Usability Scale (SUS), platform naturalistic engagement, poststudy questionnaire on perceived competency development, comparative evaluation against traditional role-play, and qualitative feedback.</p></sec><sec sec-type="results"><title>Results</title><p>Participants engaged voluntarily with the platform for an average of 95.24 (SD 134.58; median 45.34, IQR 11.57&#x2013;105.15) minutes of active role-play. Platform usability was rated as excellent (mean SUS 82.20, SD 12.93). Self-reported competence improvement improved most in assessment skills (96.7%) and information gathering (66.7%). When compared to traditional peer role-play exercises, participants rated CBT Trainer moderately favorably (mean 5.90/10, SD 1.94). Qualitative feedback highlighted strengths in competency-aligned feedback, on-demand accessibility, and a psychologically safe practice space.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>This pilot study provides evidence that an artificial intelligence&#x2013;based patient simulation shows promise as a supplementary training tool for psychological therapists who use CBT in their practice, particularly regarding accessibility and immediate feedback. Future research should use randomized controlled designs with objective competence assessments.</p></sec><sec><title>Trial Registration</title><p>OSF Registries 10.17605/OSF.IO/MSKB7; https://osf.io/mskb7</p></sec></abstract><kwd-group><kwd>cognitive behavioral therapy</kwd><kwd>artificial intelligence</kwd><kwd>virtual patients</kwd><kwd>competence assessment</kwd><kwd>competency-based education</kwd><kwd>clinical training</kwd><kwd>psychological therapy training</kwd><kwd>simulation-based learning</kwd><kwd>mobile app</kwd><kwd>digital health</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><sec id="s1-1"><title>Overview</title><p>The training of mental health professionals represents a critical challenge in addressing the global mental health crisis. Within the National Health Service (NHS), there have been initiatives aimed at improving the quality of care and mental health outcomes by improving access to evidence-based psychological therapies [<xref ref-type="bibr" rid="ref1">1</xref>]. This requires developing a skilled workforce. In addition, as demand for psychological services continues to rise, there is increasing pressure on training programs to develop competent practitioners efficiently and effectively. Technological innovations, particularly machine learning and natural language processing, offer promising new avenues for enhancing mental health training through virtual patient simulations with competency-based feedback, potentially addressing long-standing challenges in traditional approaches.</p></sec><sec id="s1-2"><title>Training Competent Therapists: The Role of Competence Frameworks</title><p>Competency-based education has emerged as a dominant paradigm in mental health care training, providing structured frameworks essential for skill acquisition, assessment, and professional development [<xref ref-type="bibr" rid="ref2">2</xref>]. Across therapeutic modalities, the University College London (UCL) Center for Outcomes Research and Effectiveness (CORE) competence frameworks provide a comprehensive hierarchical model of competencies required for effective practice, distinguishing between generic therapeutic competencies, basic therapy-specific technique (eg, CBT [cognitive behavioral therapy]) competencies, and meta-competencies [<xref ref-type="bibr" rid="ref3">3</xref>]. These frameworks are operationalized through validated competency scales, which translate professional standards into measurable assessment tools. For example, the Cognitive Therapy Scale-Revised (CTS-R) [<xref ref-type="bibr" rid="ref4">4</xref>], the revised form of Cognitive Therapy Rating Scale (CTRS) [<xref ref-type="bibr" rid="ref5">5</xref>], is one of the main treatment competence measurement tools used for CBT therapist accreditation internationally, comprising 12 distinct competency domains, including agenda setting, feedback, collaboration, pacing, and interpersonal effectiveness [<xref ref-type="bibr" rid="ref6">6</xref>]. Competence frameworks and scales like these serve multiple functions, such as providing structured developmental pathways [<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref8">8</xref>], facilitating formative assessment [<xref ref-type="bibr" rid="ref9">9</xref>], supporting summative evaluation [<xref ref-type="bibr" rid="ref10">10</xref>], and promoting training consistency [<xref ref-type="bibr" rid="ref11">11</xref>].</p><p>Nevertheless, implementing competency-based training and evaluation faces considerable practical and methodological challenges. First, the reliability of competence assessment remains inconsistent; although tools such as the CTS-R can achieve high interrater reliability under controlled conditions, such as when groups of raters work together, independent ratings often show poor-to-moderate agreement [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref13">13</xref>]. Second, resource limitations create substantial barriers to effective competence development. This includes demands on clinical supervisors, limited access for trainees to diverse clinical presentations, reliance on time-demanding assessment methods, and resource constraints affecting feedback quality [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref14">14</xref>]. Third, ethical tensions arise as trainees must develop competence &#x201C;on the job,&#x201D; with a need to ensure patient safety and adhere to professional codes of conduct [<xref ref-type="bibr" rid="ref15">15</xref>]. While this is managed through informed consent, supervision, adherence to professional codes of conduct, and a constant focus on the trainee&#x2019;s competence and ethical decision-making [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref16">16</xref>], it requires a high level of supervisory support, which can be resource-intensive for training programs managing increasing cohort sizes. The expansion of the psychological professions workforce places additional demands on supervision infrastructure, potentially affecting the quality and availability of supervisory support [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref14">14</xref>].</p></sec><sec id="s1-3"><title>Traditional Role-Play Methods in Mental Health Training</title><p>Role-play exercises are a cornerstone of competency-based training, offering structured opportunities to practice and receive feedback on specific therapeutic skills [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref18">18</xref>]. While role-plays provide valuable opportunities for skill development, they face substantial limitations in terms of ecological validity, scalability, and consistency [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref20">20</xref>]. Within psychological practitioner training, standardized role-plays using trained actors offer structured learning, though validity concerns persist [<xref ref-type="bibr" rid="ref10">10</xref>]. Student-to-student role-plays face practical challenges such as inconsistencies in role-play quality due to student anxiety when role-playing with peers, authenticity limitations as students may deliberately facilitate their peers&#x2019; success rather than presenting realistic clinical challenges, ethical concerns regarding self-disclosure, and logistical difficulties across large student cohorts [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref21">21</xref>-<xref ref-type="bibr" rid="ref23">23</xref>]. Research has identified 4 authenticity barriers, namely, students&#x2019; insufficient knowledge of psychiatric symptoms, reluctance to portray distressing emotions, artificial dynamics from preexisting relationships, and inadequate environmental settings [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref24">24</xref>]. Interestingly, studies show higher-performing students often experience greater anxiety during role-plays than lower-performing peers&#x2014;a &#x201C;competence-anxiety paradox&#x201D; linked to skilled trainees&#x2019; greater awareness of therapeutic complexity [<xref ref-type="bibr" rid="ref18">18</xref>]. Teacher demonstration models, where instructors model therapeutic techniques with volunteer &#x201C;clients,&#x201D; provide excellent teaching opportunities but limited hands-on practice for students [<xref ref-type="bibr" rid="ref18">18</xref>]. These limitations contribute to the &#x201C;theory-practice gap&#x201D; where classroom knowledge fails to translate effectively to clinical application [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref25">25</xref>].</p></sec><sec id="s1-4"><title>Technological Innovations in Mental Health Training</title><p>Traditional role-play limitations have driven advancements in simulation technology for clinical skills training. Innovations included decision tree&#x2013;based interactive videos that improved the standardization of training experiences and assessment procedures [<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref27">27</xref>]. This approach has reduced psychological strain on actors playing patient roles, as they no longer needed to maintain complex character portrayals for extended periods and improved scalability by eliminating the need for extensive simulant training that required 10 hours for students or 3&#x2010;4 hours for clinicians [<xref ref-type="bibr" rid="ref27">27</xref>]. While research shows skills can improve through these methods, there is variability in the degree to which students find interactive client systems engaging and useful [<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref28">28</xref>]. Machine learning and language models have advanced patient simulations through contextual understanding and human-like text generation, which can increase realism comparable to human patients [<xref ref-type="bibr" rid="ref29">29</xref>], across diverse scenarios, though responses sometimes appeared overly idealized [<xref ref-type="bibr" rid="ref30">30</xref>]. While these approaches facilitate practice, they do not provide competency-based feedback to further skill development, which is essential for meeting professional training standards and ensuring therapists develop the specific competencies required for NHS service delivery.</p></sec><sec id="s1-5"><title>Rationale for This Study</title><p>Key gaps remain in the literature on virtual patient simulations for CBT training. First, to our knowledge, although existing tools have achieved realistic patient interactions [<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref28">28</xref>-<xref ref-type="bibr" rid="ref30">30</xref>], none have systematically translated established competence frameworks, such as the CTS-R or UCL competence frameworks, into real-time feedback during simulated practice. This limits their alignment with professional training standards and NHS service requirements. While artificial intelligence (AI) systems have been developed to assess CBT competence from recorded therapy sessions [<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref32">32</xref>], the integration of such frameworks into simulation-based training represents a critical unmet need. Given that competence frameworks underpin evidence-based CBT training and professional accreditation [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref3">3</xref>], bridging this gap is essential for advancing scalable, standards-aligned training tools.</p><p>Approaches to operationalize competence development through frameworks and scales need to build on learning models such as experiential learning [<xref ref-type="bibr" rid="ref33">33</xref>], deliberate practice methodologies [<xref ref-type="bibr" rid="ref34">34</xref>], and technology-enhanced training [<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref35">35</xref>]. Therefore, an AI-driven simulation platform that incorporates competency-based feedback mechanisms offers a promising solution to address the identified limitations in traditional training approaches while maintaining alignment with established professional frameworks and educational principles. Such a platform fosters experiential learning by enabling safe, active experimentation alongside structured reflection, while also supporting deliberate practice through systematic and repeatable skill development, representing a novel approach that addresses longstanding limitations in CBT training technology.</p><p>A second significant gap pertains to voluntary engagement with training technologies in psychological practitioner education. Existing studies on simulation-based learning have largely examined mandatory curricular components or controlled experimental settings, focusing primarily on perceived fidelity or effectiveness [<xref ref-type="bibr" rid="ref28">28</xref>-<xref ref-type="bibr" rid="ref30">30</xref>]. Understanding how trainees voluntarily engage with supplementary training technologies provides critical evidence for implementation feasibility and ecological validity. Unlike previous research, this study investigates naturalistic engagement patterns within real-world training programs, integration with existing curricula, and specific mechanisms through which such simulations might enhance learning. By combining quantitative engagement metrics with qualitative feedback, this research can provide preliminary evidence regarding the practical utility of AI simulations in psychological therapist training.</p></sec><sec id="s1-6"><title>Study Aims</title><p>This pilot study aimed to evaluate the educational potential of the first virtual patient platform providing competence framework&#x2013;aligned feedback in psychological therapist training. Specifically, the study aimed:</p><list list-type="order"><list-item><p>to assess the acceptability of simulation-based learning integrated with competency-based feedback as a supplemental tool within established training programs for psychological therapists who use CBT in their practice.</p></list-item><list-item><p>to examine patterns of voluntary engagement with simulation-based practice among trainees from different professional training backgrounds.</p></list-item><list-item><p>to gather preliminary evidence regarding self-reported competence development following simulation-based practice experiences.</p></list-item><list-item><p>to explore trainees&#x2019; perceptions of the educational value of framework-aligned feedback during simulated therapeutic interactions compared to traditional training methods.</p></list-item></list></sec></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>The CBT Trainer Platform</title><p>The CBT Trainer platform (TTZ) [<xref ref-type="bibr" rid="ref36">36</xref>] is an AI-based virtual patient platform designed for mental health training. The app presented participants with interactive scenarios replicating therapeutic sessions for common mental health conditions, including depression, generalized anxiety disorder, social anxiety disorder, posttraumatic stress disorder, and agoraphobia. The platform primarily enabled voice- and text-based interaction with virtual patients exhibiting realistic clinical presentations. Users began by selecting from 6 virtual patients representing diverse demographics and clinical presentations. During voice- or text-based interactions, the system continuously evaluated responses against competence frameworks, providing real-time feedback visible in the interface.</p><p>CBT Trainer represents the first virtual patient simulation to operationalize established competence frameworks into real-time automated feedback. The platform was designed to support the practice of both assessment and intervention skills. Users could select which type of session to practice (assessment or intervention) and choose the corresponding competence framework for feedback. The CBT Trainer assessed trainees against a selected competency scale&#x2014;either the Cognitive Therapy Scale-Revised (CTS-R) [<xref ref-type="bibr" rid="ref4">4</xref>] or the UCL Psychological Wellbeing Practitioner (PWP) Assessment Competence Scale [<xref ref-type="bibr" rid="ref37">37</xref>]. CBT Trainer indicates whether specific competencies were demonstrated or required further development. After practice sessions, participants received structured feedback, highlighting competencies met and those not met, with specific guidance to support their development of key therapeutic competencies. The platform was designed to complement, not replace, traditional training methods by providing additional opportunities for independent skill practice. The CBT Trainer app&#x2019;s content and features were kept unchanged throughout the data collection period to ensure consistency across all participant interactions. <xref ref-type="fig" rid="figure1">Figures 1</xref> and <xref ref-type="fig" rid="figure2">2</xref> illustrate the app&#x2019;s user interface diagrams, including the patient selection screen displaying diverse clinical presentations, the interactive role-play screen, and the competency feedback screens.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>CBT Trainer patient selection (left) and role-play interfaces (right).</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="mededu_v12i1e84091_fig01.png"/></fig><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Competence assessment and progress tracking interfaces. (Left) University College London (UCL) Psychological Wellbeing Practitioner (PWP) Assessment Competence Scale interface shows real-time competence evaluation across core domains. (Right) Cognitive Therapy Scale-Revised (CTS-R) progress tracking screen.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="mededu_v12i1e84091_fig02.png"/></fig></sec><sec id="s2-2"><title>Platform Development and Validation</title><p>The CBT Trainer platform was developed in collaboration with UCL&#x2019;s CORE, the research group that created the UCL competence frameworks upon which the platform is built. The development team comprised a clinical psychologist who designed patient profiles and competency-based feedback mechanisms, and a machine learning specialist who built the competence assessment algorithms.</p><p>Six diverse virtual patient profiles were created representing common mental health presentations, including depression, generalized anxiety disorder, social anxiety disorder, posttraumatic stress disorder, agoraphobia with panic, and obsessive-compulsive disorder (<xref ref-type="table" rid="table1">Table 1</xref>). Each profile was developed by a clinical psychologist to reflect realistic clinical presentations.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Virtual patient profiles in CBT<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup> Trainer.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Name</td><td align="left" valign="bottom">Age (years)</td><td align="left" valign="bottom">Background</td><td align="left" valign="bottom">Primary presentation</td><td align="left" valign="bottom">PHQ-9<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup></td><td align="left" valign="bottom">GAD-7<sup><xref ref-type="table-fn" rid="table1fn3">c</xref></sup></td></tr></thead><tbody><tr><td align="left" valign="top">Henry</td><td align="char" char="." valign="top">35</td><td align="left" valign="top">British Chinese, Liverpool</td><td align="left" valign="top">Depression with risk</td><td align="char" char="." valign="top">21</td><td align="char" char="." valign="top">18</td></tr><tr><td align="left" valign="top">Jayden</td><td align="char" char="." valign="top">20</td><td align="left" valign="top">Afro-Caribbean, London</td><td align="left" valign="top">Social anxiety</td><td align="char" char="." valign="top">12</td><td align="char" char="." valign="top">16</td></tr><tr><td align="left" valign="top">Penelope</td><td align="char" char="." valign="top">46</td><td align="left" valign="top">British, dyslexia</td><td align="left" valign="top">Generalized anxiety</td><td align="char" char="." valign="top">15</td><td align="char" char="." valign="top">19</td></tr><tr><td align="left" valign="top">Siobhan</td><td align="char" char="." valign="top">55</td><td align="left" valign="top">Irish</td><td align="left" valign="top">Agoraphobia with panic</td><td align="char" char="." valign="top">15</td><td align="char" char="." valign="top">19</td></tr><tr><td align="left" valign="top">Yasir</td><td align="char" char="." valign="top">56</td><td align="left" valign="top">Middle Eastern (Bidoon), asylum seeker</td><td align="left" valign="top">PTSD<sup><xref ref-type="table-fn" rid="table1fn4">d</xref></sup></td><td align="char" char="." valign="top">19</td><td align="char" char="." valign="top">18</td></tr><tr><td align="left" valign="top">Sara</td><td align="char" char="." valign="top">31</td><td align="left" valign="top">Indian Hindu, London</td><td align="left" valign="top">OCD<sup><xref ref-type="table-fn" rid="table1fn5">e</xref></sup></td><td align="char" char="." valign="top">17</td><td align="char" char="." valign="top">19</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>CBT: cognitive behavioral therapy.</p></fn><fn id="table1fn2"><p><sup>b</sup>PHQ-9: Patient Health Questionnaire-9.</p></fn><fn id="table1fn3"><p><sup>c</sup>GAD-7: Generalized Anxiety Disorder-7.</p></fn><fn id="table1fn4"><p><sup>d</sup>PTSD: posttraumatic stress disorder.</p></fn><fn id="table1fn5"><p><sup>e</sup>OCD: obsessive compulsive disorder.</p></fn></table-wrap-foot></table-wrap><p>Prior to the pilot study, the platform underwent iterative testing and refinement. Patient interactions were tested by course tutors from the Doctorate in Clinical Psychology and PWP training programs at UCL to assess clinical authenticity and educational appropriateness. Early testing revealed that initial response patterns provided too much information too quickly compared to authentic patient presentations (where patients disclose more over time as the therapeutic relationship evolves). Based on this feedback, we adjusted response length and detail level to match typical patient communication patterns.</p><p>Throughout development and deployment, several measures ensured appropriate platform responses. The system development includes alignment efforts focused on maintaining task adherence to therapeutic contexts. All feedback was explicitly grounded in established competence frameworks (CTS-R and UCL PWP Assessment Competence Scale), limiting assessment to observable, framework-specified competencies.</p></sec><sec id="s2-3"><title>Study Design</title><p>This pilot study used a 2-stage mixed methods approach to evaluate an AI-based virtual patient platform for training cognitive behavioral therapists. This included usability testing, digital engagement data, and questionnaires to assess acceptability and educational impact.</p></sec><sec id="s2-4"><title>Participants</title><p>Participants were recruited from psychological practitioner training programs at UCL during the 2024&#x2010;2025 academic year, targeting 2 specific programs, namely, the Doctorate in Clinical Psychology and the Low-intensity PWP training. Eligible participants were required to be enrolled in one of these programs and have access to an iOS device with internet connectivity capable of running the CBT Trainer app.</p></sec><sec id="s2-5"><title>Procedure</title><p>The study was conducted in 2 stages. Stage 1 involved 4 participants in 1-hour in-person usability testing sessions and engaged with the platform under the researcher&#x2019;s observation. This allowed us to examine CBT trainer functionality and intuitiveness based on user actions and responses. Stage 1 findings informed Stage 2 by identifying needed platform refinements (eg, user experience improvements and bug fixes).</p><p>Stage 2 expanded the study to include 101 participants who completed an initial online screening. After completing online screening, informed consent, and demographic questionnaires, eligible participants downloaded the app to their personal device. Participants were encouraged to use the platform for at least 60 minutes during the study period to gain meaningful experience, though they were explicitly informed that there was no mandatory usage requirement and they could engage with the app as much or as little as they preferred. Two reminder emails were sent to all participants during weeks 1 and 3 of the 4-week study period, providing tips on integrating the platform with their training program activities. The stage concluded with a comprehensive online survey.</p><p>All 101 participants completing the screening survey were entered into a draw for 1 of 3 &#x00A3;45 (approximately US $59) vouchers, but no incentives were provided for app download or engagement to preserve the ecological validity of usage patterns.</p><p>The research version of the CBT Trainer app was distributed through Apple&#x2019;s TestFlight beta testing platform to maintain separation between research data and any future public release, which required participants to complete multiple steps, including receiving and accepting email invitations, downloading the TestFlight app, and then installing CBT Trainer.</p></sec><sec id="s2-6"><title>Measures</title><sec id="s2-6-1"><title>Survey Instruments</title><sec id="s2-6-1-1"><title>System Usability Scale</title><p>A standardized 10-item questionnaire measuring platform usability, with scores ranging from 0 to 100 [<xref ref-type="bibr" rid="ref38">38</xref>]. Scores above 68 are considered above average, and scores above 80 are considered excellent [<xref ref-type="bibr" rid="ref39">39</xref>].</p></sec><sec id="s2-6-1-2"><title>PostStudy Questionnaire</title><p>A custom questionnaire designed to capture participants&#x2019; experiences with the CBT Trainer platform. The questionnaire comprised four main sections: (1) 3 subjective learning outcome measures assessing helpfulness, skill improvement, and perceived realism using 0&#x2010;100 rating scales; (2) a checklist assessment of specific competence development across therapeutic domains; (3) comparative evaluation questions rating CBT Trainer against traditional role-play methods; and (4) open-ended qualitative questions exploring learning impact and user experience. The complete poststudy questionnaire is available in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p></sec></sec></sec><sec id="s2-7"><title>Engagement Metrics</title><p>Platform usage was tracked through several dimensions, including role-play time (time spent in active interaction with simulated patients), session quantity (total number of role-play sessions created per user), and interaction depth (average exchanges between trainees and virtual patients per session, where each exchange comprised 1 trainee input paired with 1 AI patient response).</p></sec><sec id="s2-8"><title>Data Analysis</title><p>Descriptive statistics were computed for all continuous variables (eg, System Usability Scale [SUS] scores, engagement metrics, and self-rated learning outcomes) and are presented as means, SDs, medians, and ranges. Categorical data (eg, demographic variables, competence development checklist items, and comparative advantages) are presented as frequencies (n) and percentages (%). Missing data were addressed by using complete case analysis, resulting in varying sample sizes across different analyses, which are explicitly reported alongside each result (eg, n=33 for SUS scores and n=31 for competence ratings).</p><p>Qualitative responses from an open-ended survey question, &#x201C;Please provide one or two specific examples of how CBT Trainer impacted your clinical skills or knowledge,&#x201D; were analyzed using qualitative content analysis [<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref41">41</xref>]. Qualitative content analysis was chosen as it enables describing and quantifying qualitative data while preserving participants&#x2019; expressed experiences&#x2014;appropriate for this acceptability study where we sought to identify the frequency and range of participant perspectives on platform utility while preserving participants&#x2019; expressed experiences [<xref ref-type="bibr" rid="ref42">42</xref>]. Analysis focused on identifying manifest (ie, surface level) meanings in the data following an inductive approach [<xref ref-type="bibr" rid="ref43">43</xref>]. Analysis was conducted by TTZ, a clinical psychology doctoral research student and one of the platform developers, who maintained reflexive awareness of potential bias throughout the analytic process. The questionnaire responses were read several times by TTZ. All descriptions of specific ways the CBT Trainer impacted participants&#x2019; clinical skills and training experiences were considered meaning units. If an impact was described multiple times within a single participant&#x2019;s response, these descriptions were conjoined into a single meaning unit. Codes (ie, specific impacts on clinical skills and knowledge) were then grouped into categories and subcategories on the basis of similarities and differences in their educational and learning functions.</p></sec><sec id="s2-9"><title>Outcomes</title><p>The research team established prespecified outcomes prior to study commencement, following implementation of study guidelines [<xref ref-type="bibr" rid="ref44">44</xref>]:</p><list list-type="bullet"><list-item><p>Primary outcomes</p></list-item></list><list list-type="bullet"><list-item><p>Platform engagement: target of &#x003E;50% of participants using the app for &#x2265;10 minutes, or &#x003E;25% using it for &#x2265;30 minutes (Aim 2)</p></list-item><list-item><p>Perceived educational value: target of &#x003E;70% of participants reporting positive impact on training (Aim 4)</p></list-item></list><list list-type="bullet"><list-item><p>Secondary outcomes</p></list-item></list><list list-type="bullet"><list-item><p>Recruitment and retention: target of &#x2265;22 participants across both study phases (Aim 1)</p></list-item><list-item><p>System usability: target SUS score &#x003E;68 (Aim 1)</p></list-item></list><list list-type="bullet"><list-item><p>Exploratory</p></list-item></list><list list-type="bullet"><list-item><p>Self-reported improvements across competency domains (Aim 3)</p></list-item></list></sec><sec id="s2-10"><title>Ethical Considerations</title><p>The study received approval from the UCL Research Ethics Committee (21883/006). The study protocol was preregistered with the Open Science Framework [<xref ref-type="bibr" rid="ref45">45</xref>].</p><p>Several safeguards were implemented to address ethical concerns. Course directors from participating training programs were aware of the study. Meetings with program staff occurred at study commencement and debriefing. A human-in-the-loop design was implemented with weekly research team meetings reviewing engagement patterns to identify any concerning patterns. Specifically, the team monitored for (1) misuse of the platform for noneducational purposes through sampled transcript review, (2) inappropriate content or off-task interactions, (3) technical issues (error reports and crashes) that could impede learning, and (4) instances where AI feedback appeared inappropriate or misaligned with competence frameworks. No significant concerning engagement patterns requiring intervention were identified during the study period. Reminder emails sent at weeks 1 and 3 provided opportunities to report concerns. The platform included a direct link to the study information sheet. Participants were informed that the platform was experimental and supplementary to the traditional training, with in-app reminders to bring concerns to their supervisors and reminders of the limitations of AI-generated competency feedback. Survey questions provided opportunities for participants to report any concerns or difficulties. Support was available from researchers for technical issues or concerns about platform feedback. Participants who ceased engagement were contacted during survey collection to check whether issues had arisen. Coercion was mitigated through voluntary self-selection recruitment via email advertisement, no incentive payments for engagement, explicit consent procedures reminding that no minimum usage is required, and reminding the right to withdraw. Data privacy and security were prioritized, with personal identifiers kept separate from research data. Multiple contact pathways were provided within the app, including principal investigator contact, ethics committee contact, and UCL Data Protection Office contact.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Phase 1: Initial Usability Testing</title><p>Phase 1 involved 4 doctoral clinical psychology students (all women, aged 25&#x2010;34 years). PWP trainees were not included in Phase 1 usability testing due to their later program start date. Participants engaged with the platform under the researcher&#x2019;s observation. Researchers monitored interactions for technical issues and clinical appropriateness. Several bugs and user experience navigation issues were revealed and fixed before Phase 2 implementation.</p></sec><sec id="s3-2"><title>Phase 2: Extended Implementation</title><sec id="s3-2-1"><title>Participant Characteristics</title><p><xref ref-type="fig" rid="figure3">Figure 3</xref> illustrates participant flow and attrition throughout the study phases. Of 101 invited, 92 (91.1%) were eligible based on iOS device compatibility. Attrition occurred primarily at app download, where 25 eligible participants (27.2%) did not proceed likely due to TestFlight app download complexity and technical barriers, and at survey completion, where 28 active users (47.5%) did not complete follow-up assessments, likely mistaking the end-of-study survey as optional educational monitoring rather than mandatory research data collection. Technical difficulties with older iOS devices prevented 8 participants (8.7% of the eligible) from engaging despite successful installation. Overall, 59 participants (64.1% of the eligible) successfully engaged with the platform, with 31&#x2010;33 completing comprehensive survey assessments for secondary analyses.</p><p>Of the 59 engaging participants, 84.7% (n=50) are from the Low Intensity Cognitive Behavioral Interventions program and 15.3% (n=9) are from the Doctorate in Clinical Psychology program. Participants were predominantly women (49/59, 83.1%) with a mean age of 28.37 (SD=7.25; range 22&#x2010;62) years. The ethnic distribution included White (34/59, 57.6%), Black or African or Caribbean or Black British (9/59, 15.3%), Asian or Asian British (7/59, 11.9%), Other ethnic groups (5/59, 8.5%), and Mixed or Multiple ethnic groups (4/59, 6.8%). The majority of participants were at the early stages of their training, with a mean enrollment time of 1.61 months in their respective programs (SD 2.65; median 1.0, IQR 0&#x2010;18 months). Regarding familiarity with competency assessment frameworks, 39% (n=23) reported being somewhat familiar, 25.4% (n=15) were neutral, 16.9% (n=10) were somewhat unfamiliar, 16.9% (n=10) were very unfamiliar or had never heard of them, and 1.7% (n=1) were very familiar. Exploratory analyses revealed no substantial differences in engagement patterns, demographics, or outcomes between the 2 training programs, supporting combined analysis of the sample.</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Participant flow diagram; CBT: cognitive behavioral therapy.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="mededu_v12i1e84091_fig03.png"/></fig></sec><sec id="s3-2-2"><title>Platform Engagement</title><p>Of the 59 engaging participants, they spent an average of 95.24 (SD 134.58; median 45.34, IQR 11.57-105.15) minutes engaging in role-plays. Users created an average of 4.24 role-play sessions each (SD 3.72; range 1&#x2010;14), with an average session duration of 21.48 (SD 23.24; median 14.21, IQR 5.93-32.82) minutes. Each session contained an average of 48.58 (SD 57.11, median 21, IQR 4&#x2013;82) interactions. The data were right-skewed (most values clustered at the low end with a long tail of high values). While most participants engaged for shorter periods, a smaller number engaged for very long periods (several hours).</p></sec></sec><sec id="s3-3"><title>System Usability</title><p>The CBT Trainer platform achieved high usability ratings with a mean SUS score of 82.2 (SD 12.93; median 82.5, IQR 72.5-92.5, n=33), placing it in the &#x201C;excellent&#x201D; category according to established benchmarks [<xref ref-type="bibr" rid="ref39">39</xref>].</p></sec><sec id="s3-4"><title>Self-Reported Competence Development</title><p>Participants reported high agreement that &#x201C;The simulated patient interactions were helpful for my learning&#x201D; (mean 79.35, SD 17.49; median 80.0, IQR 69.5-95.5, n=31) and &#x201C;My clinical skills has improved after using CBT Trainer&#x201D; (mean 73.67, SD 23.03; median 81.0, IQR 60.0-90.0 ,n=30), as illustrated in <xref ref-type="fig" rid="figure4">Figure 4</xref>. The statement &#x201C;The simulated patients were realistic to real patients&#x201D; received moderately strong agreement (mean 68.45, SD 20.45; median=70.0, IQR 49.5-87.5, n=31).</p><fig position="float" id="figure4"><label>Figure 4.</label><caption><p>Educational impact ratings reported by CBT (cognitive behavioral therapy) Trainer users (n=31).</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="mededu_v12i1e84091_fig04.png"/></fig><p>The distribution of self-reported competence development varied considerably across the assessed domains (<xref ref-type="fig" rid="figure5">Figure 5</xref>). Some competency areas showed limited improvement, including ethical decision making (0/31, 0%) and cultural competence (1/31, 3.2%). The most frequently reported competence improvements (<xref ref-type="fig" rid="figure5">Figure 5</xref>) were in assessment skills (30/31, 96.8%), followed by information gathering on cognitions, behaviors, autonomy, and emotions (21/31, 67.7%), and information giving and shared decision-making (517/31, 4.8%).</p><fig position="float" id="figure5"><label>Figure 5.</label><caption><p>Competence development areas reported by CBT (cognitive behavioral therapy) Trainer users.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="mededu_v12i1e84091_fig05.png"/></fig></sec><sec id="s3-5"><title>Qualitative Feedback on Learning Impact</title><p>Content analysis of qualitative feedback gathered through the open-ended question &#x201C;Please provide one or two specific examples of how CBT Trainer impacted your clinical skills or knowledge&#x201D; (n=21 responses) in the poststudy questionnaire revealed 38 distinct codes describing specific impacts on clinical skills and knowledge. These codes were clustered into 8 main categories, with a full description of each code provided in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>.</p><sec id="s3-5-1"><title>Category 1: Skill Development</title><p>Participants reported improvements in specific clinical techniques, communication skills, assessment abilities, and session management. This was the most frequently cited category (16/21, 76.2%). Reported improvements in specific clinical techniques include risk assessment (4/21, 19%), funneling (3/21, 14.3%), and structured information gathering using frameworks like 4Ws and ABCs (2/21, 9.5%). As one participant described:</p><disp-quote><p>The function of being able to analyse your conversation with the patient and compare against the specific competencies was incredibly helpful... This really helped me to improve my information gathering skills using the 4Ws and ABCs.&#x201D;</p><attrib>P19</attrib></disp-quote><p>Communication skills improvements included developing more flexible and adaptive responses (2/21, 9.5%) and refining questioning styles (2/21, 9.5%). One participant noted:</p><disp-quote><p>Normally I stutter when talking to patients and this helped me to think before I speak</p><attrib>P12</attrib></disp-quote><p>Many participants reported enhanced assessment skills, both generally (4/21, 19%) and specifically in understanding competency-based assessment (4/21, 19%). Improvements in session management were frequently described, including better pacing (4/21, 19%), time management (2/21, 9.5%), and session structure (2/21, 9.5%). Participant explained:</p><disp-quote><p>It also really helped me to improve my pacing of the assessment, as I was able to look at my timings afterwards to work out how much time I spent in each section.</p><attrib>P19</attrib></disp-quote></sec><sec id="s3-5-2"><title>Category 2: Practice Accessibility and Autonomy</title><p>Nearly half of the participants (9/21, 42.9%) valued the accessibility and autonomy the platform provided. The most commonly cited benefit was independent practice (7/21, 33.3%), which reduced dependence on peers or family members. As one participant described:</p><disp-quote><p>I found it difficult to continuously practise with my friends and family, and it was a bit unfair, as it would take about 45mins of their free time... Being able to use CBT Trainer meant that I could practise an entire assessment repeatedly.</p><attrib>P3</attrib></disp-quote><p>Flexible scheduling (4/21, 19%) and the ability to engage in repeated practice (3/21, 14.3%) were also valued features.</p></sec><sec id="s3-5-3"><title>Category 3: Feedback and Competence Assessment</title><p>More than two-thirds of participants (13/21, 61.9%) referenced the value of structured feedback aligned with competence frameworks. Competence gap identification was the most frequently mentioned benefit (8/21, 38.1%), followed by competence framework alignment (6/21, 28.6%). A participant explained:</p><disp-quote><p>It gave me an understanding of where I was and wasn&#x2019;t hitting the mark scheme. For example, where there wasn&#x2019;t enough evidence of covering the confidentiality criteria.</p><attrib>P10</attrib></disp-quote><p>Constructive, personalized feedback was valued by several participants (5/21, 23.8%), with Participant 19 noting the platform &#x201C;gave personalized tips for each competence.&#x201D;</p></sec><sec id="s3-5-4"><title>Category 4: Confidence, Preparedness, and Anxiety Reduction</title><p>More than half of participants (11/21, 52.4%) reported that the CBT Trainer enhanced their confidence and preparedness while reducing anxiety. Exam and OSCE preparation (5/21, 23.8%) and confidence building (5/21, 23.8%) were equally prominent. Participant described:</p><disp-quote><p>Knowing that I could practice anytime of the day made me feel so much more calm about my exam as I didn&#x2019;t have to rely on other people to practice with.</p><attrib>P15</attrib></disp-quote><p>Anxiety reduction was specifically mentioned by 3 participants (14.3%), while preparation for clinical placements was noted by 2 (9.5%).</p></sec><sec id="s3-5-5"><title>Category 5: Psychological Safety and Judgment-Free Learning</title><p>A small number of participants (2/21, 9.5%) explicitly referenced the psychological safety the platform provided. Participant 1 valued being able to &#x201C;practice and experiment with new techniques and skills during training without fear of judgment.&#x201D;</p></sec><sec id="s3-5-6"><title>Category 6: Diversity and Presentation-Specific Learning</title><p>Approximately one-third of participants (6/21, 28.6%) valued exposure to diverse patient presentations (4/21, 19%) and different presenting problems (3/21, 14.3%). Participant 15 noted: &#x201C;The diversity of the patients was also helpful to prepare for real life.&#x201D; Some participants specifically valued practice with challenging scenarios (3/21, 14.3%) and opportunities for culturally sensitive practice (1/21, 4.8%), such as working with patients from different religious backgrounds.</p></sec><sec id="s3-5-7"><title>Category 7: Reflection and Self-Awareness</title><p>Over one-third of participants (7/21, 33.3%) reported enhanced reflection and self-awareness. Self-awareness of performance was the most common impact (5/21, 23.8%), followed by the ability to engage in conversation analysis (3/21, 14.3%). One participant described:</p><disp-quote><p>The competence section was extremely helpful as it helped me to recognize areas I missed or did not go over during my roleplay.</p><attrib>P20</attrib></disp-quote></sec><sec id="s3-5-8"><title>Category 8: No Perceived Impact</title><p>One participant (4.8%) reported no perceived impact on clinical skills or knowledge from using CBT Trainer. Participants also identified limitations of the platform. The most frequently cited limitation was the absence of nonverbal communication cues (7/21, 33%), with participants noting they &#x201C;can&#x2019;t read body language&#x201D; (P15) and missing &#x201C;micro expressions&#x201D; that inform real clinical work. Several participants found patient responses occasionally unrealistic (5/21, 24%), describing virtual patients as &#x201C;too willing to engage compared to real clients&#x201D; (P9) or &#x201C;overly positive about interventions&#x201D; (P1). Technical issues with voice recognition were mentioned by 14% (n=3), and some desired greater diversity in clinical presentations (2/21, 10%).</p></sec></sec><sec id="s3-6"><title>Comparison to Traditional Role-Play</title><p>Participants were asked to evaluate &#x201C;How does CBT Trainer compare to traditional role-play exercises with peers? (0=Traditional methods much better, 10=CBT Trainer much better)&#x201D; and rated CBT Trainer against traditional classroom role-play with a mean score of 5.90 (SD 1.94; median 6.0, IQR 5.0-6.0, n=31) on a 10-point scale favoring CBT Trainer, indicating moderate comparability. <xref ref-type="fig" rid="figure6">Figure 6</xref> shows participants&#x2019; perceptions of the comparative advantages and disadvantages of CBT Trainer relative to traditional role-play methods.</p><fig position="float" id="figure6"><label>Figure 6.</label><caption><p>Perceived advantages and disadvantages of CBT Trainer compared to traditional training methods by CBT (cognitive behavioral therapy) Trainer users (n=31).</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="mededu_v12i1e84091_fig06.png"/></fig></sec><sec id="s3-7"><title>Primary and Secondary Outcomes</title><p>All prespecified implementation targets were achieved. For platform engagement, 79.66% (n=47) of 59 active users engaged with the platform for &#x2265;10 minutes, and 59.32% (n=35) used it for &#x2265;30 minutes, both exceeding target thresholds. Perceived educational value similarly exceeded the target of &#x003E;70% reporting positive impact (rating &#x003E;50/100) across all 3 educational value statements: 90.3% (28/31) for simulated patient interactions being helpful for learning, 71.0% (22/31) for simulated patients being realistic to real patients, and 83.3% (25/30) for clinical skills improved after using CBT Trainer of participants rating each statement above 50/100, respectively. Recruitment and retention substantially exceeded targets with 101 initial participants and 59 active users. The mean SUS score of 82.2 (SD 12.93) was well above the target threshold of &#x003E;68.</p></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>This pilot study evaluated CBT Trainer, the first AI-driven virtual patient platform to provide real-time feedback aligned with established competence frameworks (CTS-R and UCL PWP Assessment Scale) in psychological practitioner training. It is also the first study to examine voluntary engagement patterns with such simulations within real-world training programs. The platform demonstrated strong feasibility and acceptability, with most participants voluntarily engaging in extended practice sessions&#x2014;indicating meaningful opportunities for therapeutic skill development. Excellent usability ratings were achieved, and participants reported substantial educational value, particularly in foundational clinical skills. Self-reported improvements were most notable in assessment competencies, where nearly all participants reported enhanced assessment skills and most noted gains in information gathering. Qualitative analysis revealed that participants valued the platform&#x2019;s immediate feedback, convenience, and judgment-free practice environment as key advantages over traditional peer role-plays, while acknowledging limitations such as the absence of nonverbal cues. We discuss these results in relation to existing literature, consider their implications for psychological training, and discuss limitations and future research directions.</p></sec><sec id="s4-2"><title>Feasibility and Acceptability (Aim 1)</title><p>The study provides evidence supporting the acceptability and feasibility of implementing AI-based virtual patient simulations with competency-aligned feedback in psychological therapist training programs that use CBT. Participant engagement exceeded prespecified targets, with substantial uptake and active involvement in role-play interactions. The platform achieved excellent usability ratings, further supporting its feasibility for integration into clinical training environments [<xref ref-type="bibr" rid="ref39">39</xref>]. These findings align with prior research on AI-based simulated patients in health care education [<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref29">29</xref>] and extend them to psychological therapist trainees who use CBT specifically, demonstrating that this population similarly values interactive simulations as learning tools.</p></sec><sec id="s4-3"><title>Patterns of Voluntary Engagement (Aim 2)</title><p>During the voluntary 1-month access period, engagement patterns were diverse, reflecting individualized approaches to simulation-based learning. While most participants opted for shorter, focused practice sessions, a smaller subset engaged in extended periods of use, spanning several hours. This variability suggests that trainees may adopt distinct engagement strategies&#x2014;whether for brief skill rehearsal or more immersive, in-depth practice. The duration and interaction density of role-play sessions were substantial enough to facilitate meaningful skill development across multiple competency domains. No substantial differences were observed in engagement patterns, demographics, or outcomes between the 2 training programs. This overall engagement profile indicates that trainees perceived the platform as a valuable supplement to traditional training and that the simulations provided a sufficiently authentic approximation of therapeutic dialogue in both depth and communicative substance.</p></sec><sec id="s4-4"><title>Self-Reported Competence Development (Aim 3)</title><p>Participants reported substantial improvement in assessment skills and information gathering, indicating that the platform effectively supports the development of fundamental therapeutic competencies. These improvements in core clinical skills, supported by strong ratings for educational value and perceived clinical skill improvement, mirror a similar study that observed significant gains in reflection skills among users of their patient-like conversational agent [<xref ref-type="bibr" rid="ref28">28</xref>]. The reported benefits extended beyond assessment to include domains such as risk assessment, therapeutic pacing, and interpersonal effectiveness, suggesting that virtual patient interactions can facilitate development across multiple competence areas, in line with evidence that structured AI simulations can enhance targeted clinical skills [<xref ref-type="bibr" rid="ref46">46</xref>]. However, the variability in perceived improvement competency domains implies that certain therapeutic skills may be more amenable to technological simulation than others, pointing to both the potential and the boundaries of AI-driven training tools.</p></sec><sec id="s4-5"><title>Educational Impact and Perceived Value (Aim 4)</title><sec id="s4-5-1"><title>Self-Reported Educational Impact</title><p>CBT Trainer&#x2019;s capacity to facilitate immediate reflection on therapeutic interactions aligns with the principles of experiential learning [<xref ref-type="bibr" rid="ref33">33</xref>], particularly enhancing the critical reflective observation and abstract conceptualization phases that bridge concrete experience and active experimentation. This immediacy addresses a fundamental requirement of deliberate practice [<xref ref-type="bibr" rid="ref34">34</xref>], where skill acquisition depends on timely opportunities for reflection and refinement following performance attempts.</p><p>Participants consistently highlighted the benefit of practicing in a judgment-free environment. This aspect addresses several persistent challenges in traditional training, including performance anxiety during role-plays [<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref22">22</xref>], evaluation apprehension inhibiting reflection [<xref ref-type="bibr" rid="ref47">47</xref>] and delayed feedback cycles that hamper systematic improvement of therapeutic microskills [<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref48">48</xref>]. Participants&#x2019; emphasis on the judgment-free practice environment suggests that the platform can mitigate these barriers concurrently, thereby fostering the deliberate, reflective skill development that is often constrained in conventional training formats.</p></sec><sec id="s4-5-2"><title>Comparative Advantages and Challenges</title><p>The balanced perspective offered by participants when comparing AI-based practice to traditional role-play methods highlights the complementary nature of these approaches. When asked about the platform&#x2019;s strengths, participants most frequently identified immediate feedback and convenience as significant educational benefits. Many users also valued the reduced performance anxiety that virtual practice offered compared to peer-based role-plays. Consistent with CBT principles of graded exposure, the platform may serve as an initial step that builds foundational skills before trainees progress to potentially more anxiety-provoking live role-play practice. The pattern of responses regarding comparative advantages aligns with the &#x201C;pedagogical affordances&#x201D; framework [<xref ref-type="bibr" rid="ref49">49</xref>], which suggests that different educational technologies offer unique benefits for specific learning objectives. The participants&#x2019; nuanced assessment of when and how the platform added value to their training reflects a sophisticated understanding of how technological tools fit within their broader educational experience.</p><p>Participants also recognized certain limitations of the technology, particularly regarding interpersonal elements. The recognized lack of nonverbal cues and potential for unrealistic responses echo concerns raised regarding the authenticity of AI-based clinical simulations [<xref ref-type="bibr" rid="ref30">30</xref>]. These limitations reinforce that current AI technology serves as a complement to, rather than a replacement for, conventional training models incorporating interpersonal engagement and supervision.</p></sec></sec><sec id="s4-6"><title>Limitations and Future Directions</title><p>Our findings should be interpreted with several limitations in mind. Technical limitations restricting participation to iOS users may have introduced selection bias. A substantial proportion of eligible participants who initially expressed interest did not progress to downloading or using the app. This pattern of attrition suggests the importance of understanding barriers to technology adoption in clinical training contexts, whether technical, motivational, or practical in nature. Future research should systematically investigate factors influencing technology adoption among clinical trainees, including surveys of nonusers to identify specific barriers and preferences.</p><p>Participants were predominantly early-stage trainees who had not yet received formal training in intervention techniques. This may explain why self-reported improvements were concentrated in assessment and information gathering skills rather than intervention delivery competencies. In addition, participants were trainee PWPs and clinical psychologists using CBT, not trainees from CBT-specific programs. Future research should evaluate the platform&#x2019;s utility in specialized CBT training contexts.</p><p>Methodologically, our mixed methods approach provided rich experiential data, but the absence of a control group prevents causal inferences about the platform&#x2019;s impact on skill development. The study relied on self-reported skill improvement rather than objective competence assessment, weakening conclusions about educational efficacy. The 1-month evaluation period offers limited insight into long-term engagement patterns. While exploratory analyses revealed no substantial differences in engagement between the 2 training programs, we did not systematically examine how prior CBT experience might influence engagement patterns. It is plausible that trainees with more CBT experience might engage differently than those new to CBT, either showing reduced engagement due to perceived redundancy or increased engagement due to greater appreciation of competence frameworks. Future evaluations should incorporate standardized competence evaluations, longitudinal designs across training stages, control of prior experience, and randomized controlled trials.</p><p>While the platform operationalized general CBT competencies through established frameworks such as the CTS-R and UCL PWP Assessment Scale, it did not explicitly integrate disorder-specific evidence-based models into its feedback mechanisms. For example, in interactions with the virtual patient presenting with generalized anxiety disorder, the platform assessed competencies such as application of change methods (CTS-R item 11) but did not evaluate whether the trainee&#x2019;s intervention strategies aligned with a specific evidence-based model for generalized anxiety disorder (eg, the Dugas intolerance of uncertainty model). Future studies could incorporate disorder-specific competence criteria and enable feedback on the appropriateness and fidelity of evidence-based intervention models, thereby enhancing their alignment with specialized CBT training requirements.</p><p>While we implemented several safeguards, the current model still places some responsibility on trainees to identify inappropriate feedback. This represents a limitation, as novice trainees may lack the expertise to critically evaluate AI-generated feedback. Future implementations should consider automated feedback auditing systems, routine expert review of AI-generated feedback, and structured guidance for trainees on how to critically engage with AI feedback within supervision.</p><p>While AI-driven simulations offer significant benefits for CBT training, their deployment must address known risks, including algorithmic bias, privacy concerns, and overreliance on technology that could erode professional judgment [<xref ref-type="bibr" rid="ref50">50</xref>]. Comprehensive evaluation of AI training tools requires continued attention to fairness metrics, cultural appropriateness across diverse populations, and environmental sustainability&#x2014;areas warranting further research as the field evolves.</p><p>The core methodology&#x2014;AI simulation with competence framework&#x2013;aligned feedback&#x2014;may extend beyond CBT to other health care disciplines with established competence frameworks, including medicine, nursing, social work, and allied health professions. Adaptation would require profession-specific scenarios, alignment with relevant competence standards, and validation that simulation skills transfer to clinical practice. Disciplines with well-defined assessment criteria would be particularly suited to this approach, and empirical evaluation across training contexts is needed.</p></sec><sec id="s4-7"><title>Conclusions</title><p>The pilot study demonstrates that CBT Trainer&#x2014;an AI-driven virtual patient platform delivering real-time competency-aligned feedback&#x2014;is both acceptable and feasible within psychological training programs that use CBT. Participants engaged voluntarily and extensively with the platform, reporting high usability and significant educational value, particularly in core assessment and information gathering skills. Qualitative analysis revealed that participants highly valued the competence framework&#x2013;aligned feedback, a judgment-free practice environment that reduced anxiety, and the flexibility for independent, repeated skill rehearsal. These features were seen as key advantages over traditional peer-based role-plays. The findings indicate that AI-based virtual patient simulations with competence framework&#x2013;aligned feedback can effectively complement traditional training methods, particularly for developing foundational clinical skills in a flexible, accessible format. Supplementary tools like this can be valuable in contexts facing supervision shortages or growing demand for mental health services. We recommend phased adoption with appropriate oversight mechanisms while simultaneously conducting the rigorous trials necessary to build the evidence base. Additionally, we emphasize the importance of involving course leaders, clinical supervisors, competency scale and framework experts, and policymakers in the ongoing refinement and validation. Future research should use randomized controlled designs with objective competence assessments and evaluate the platform&#x2019;s utility in specialized mental health training programs with more advanced trainees.</p></sec></sec></body><back><ack><p>We thank Dr Rachel Newman and Dr Joshua Buckman for their support during participant recruitment, and thank all participants for their participation.</p></ack><notes><sec><title>Funding</title><p>No external financial support or grants were received from any public, commercial, or not-for-profit entities for the research, authorship, or publication of this article.</p></sec><sec><title>Data Availability</title><p>The datasets generated or analyzed during this study are available from the corresponding author on reasonable request.</p></sec></notes><fn-group><fn fn-type="conflict"><p>TTZ is the founder and director of Soy Mental Health Educational Technologies. TTZ is a PhD student working under the supervision of the other authors who maintain independent scrutiny of the study. The other authors do not have a conflict of interest.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">CBT</term><def><p>cognitive behavioral therapy</p></def></def-item><def-item><term id="abb3">CORE</term><def><p>Center for Outcomes Research and Effectiveness</p></def></def-item><def-item><term id="abb4">CTRS</term><def><p>Cognitive Therapy Rating Scale</p></def></def-item><def-item><term id="abb5">CTS-R</term><def><p>Cognitive Therapy Scale-Revised</p></def></def-item><def-item><term id="abb6">NHS</term><def><p>National Health Service</p></def></def-item><def-item><term id="abb7">PWP</term><def><p>Psychological Wellbeing Practitioner</p></def></def-item><def-item><term id="abb8">SUS</term><def><p>System Usability Scale</p></def></def-item><def-item><term id="abb9">UCL</term><def><p>University College London</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="web"><article-title>NHS long term workforce plan</article-title><source>NHS England</source><year>2023</year><access-date>2025-05-23</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.england.nhs.uk/long-read/nhs-long-term-workforce-plan-2/">https://www.england.nhs.uk/long-read/nhs-long-term-workforce-plan-2/</ext-link></comment></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Roth</surname><given-names>AD</given-names> </name><name name-style="western"><surname>Pilling</surname><given-names>S</given-names> </name></person-group><article-title>Using an evidence-based methodology to identify the competences required to deliver effective cognitive and behavioural therapy for depression and anxiety disorders</article-title><source>Behav Cogn Psychother</source><year>2008</year><month>03</month><volume>36</volume><issue>2</issue><fpage>129</fpage><lpage>147</lpage><pub-id pub-id-type="doi">10.1017/S1352465808004141</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Roth</surname><given-names>AD</given-names> </name></person-group><article-title>A new scale for the assessment of competences in cognitive and behavioural therapy</article-title><source>Behav Cogn Psychother</source><year>2016</year><month>09</month><volume>44</volume><issue>5</issue><fpage>620</fpage><lpage>624</lpage><pub-id pub-id-type="doi">10.1017/S1352465816000011</pub-id><pub-id pub-id-type="medline">26898543</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Blackburn</surname><given-names>IM</given-names> </name><name name-style="western"><surname>James</surname><given-names>IA</given-names> </name><name name-style="western"><surname>Milne</surname><given-names>DL</given-names> </name><etal/></person-group><article-title>Revised cognitive therapy scale (CTS-r)</article-title><source>APA PsycTests</source><year>2001</year><access-date>2026-02-25</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1037/t65807-000">https://doi.org/10.1037/t65807-000</ext-link></comment></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Young</surname><given-names>J</given-names> </name><name name-style="western"><surname>Beck</surname><given-names>A</given-names> </name></person-group><article-title>Cognitive Therapy Scale (CTS)</article-title><source>APA PsycTests</source><year>1980</year><access-date>2026-02-25</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1037/t00834-000">https://doi.org/10.1037/t00834-000</ext-link></comment></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Reichelt</surname><given-names>FK</given-names> </name><name name-style="western"><surname>James</surname><given-names>IA</given-names> </name><name name-style="western"><surname>Blackburn</surname><given-names>IM</given-names> </name></person-group><article-title>Impact of training on rating competence in cognitive therapy</article-title><source>J Behav Ther Exp Psychiatry</source><year>2003</year><month>06</month><volume>34</volume><issue>2</issue><fpage>87</fpage><lpage>99</lpage><pub-id pub-id-type="doi">10.1016/s0005-7916(03)00022-3</pub-id><pub-id pub-id-type="medline">12899893</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bennett-Levy</surname><given-names>J</given-names> </name></person-group><article-title>Therapist skills: a cognitive model of their acquisition and refinement</article-title><source>Behav Cogn Psychother</source><year>2006</year><month>01</month><volume>34</volume><issue>1</issue><fpage>57</fpage><lpage>78</lpage><pub-id pub-id-type="doi">10.1017/S1352465805002420</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Muse</surname><given-names>K</given-names> </name><name name-style="western"><surname>Kennerley</surname><given-names>H</given-names> </name><name name-style="western"><surname>McManus</surname><given-names>F</given-names> </name></person-group><article-title>The why, what, when, who and how of assessing CBT competence to support lifelong learning</article-title><source>tCBT</source><year>2022</year><volume>15</volume><fpage>e57</fpage><pub-id pub-id-type="doi">10.1017/S1754470X22000502</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Muse</surname><given-names>K</given-names> </name><name name-style="western"><surname>McManus</surname><given-names>F</given-names> </name></person-group><article-title>A systematic review of methods for assessing competence in cognitive-behavioural therapy</article-title><source>Clin Psychol Rev</source><year>2013</year><month>04</month><volume>33</volume><issue>3</issue><fpage>484</fpage><lpage>499</lpage><pub-id pub-id-type="doi">10.1016/j.cpr.2013.01.010</pub-id><pub-id pub-id-type="medline">23454222</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liness</surname><given-names>S</given-names> </name><name name-style="western"><surname>Beale</surname><given-names>S</given-names> </name><name name-style="western"><surname>Lea</surname><given-names>S</given-names> </name><name name-style="western"><surname>Byrne</surname><given-names>S</given-names> </name><name name-style="western"><surname>Hirsch</surname><given-names>CR</given-names> </name><name name-style="western"><surname>Clark</surname><given-names>DM</given-names> </name></person-group><article-title>Evaluating CBT clinical competence with standardised role plays and patient therapy sessions</article-title><source>Cogn Ther Res</source><year>2019</year><month>12</month><volume>43</volume><issue>6</issue><fpage>959</fpage><lpage>970</lpage><pub-id pub-id-type="doi">10.1007/s10608-019-10024-z</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sharpless</surname><given-names>BA</given-names> </name><name name-style="western"><surname>Barber</surname><given-names>JP</given-names> </name></person-group><article-title>The Examination for Professional Practice in Psychology (EPPP) in the era of evidence-based practice</article-title><source>Prof Psychol Res Pr</source><year>2009</year><volume>40</volume><issue>4</issue><fpage>333</fpage><lpage>340</lpage><pub-id pub-id-type="doi">10.1037/a0013983</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Marriott</surname><given-names>BR</given-names> </name><name name-style="western"><surname>Cho</surname><given-names>E</given-names> </name><name name-style="western"><surname>Tugendrajch</surname><given-names>SK</given-names> </name><etal/></person-group><article-title>Role-play assessment of therapist adherence and skill in implementation of trauma-focused cognitive-behavioral therapy</article-title><source>Adm Policy Ment Health</source><year>2022</year><month>05</month><volume>49</volume><issue>3</issue><fpage>374</fpage><lpage>384</lpage><pub-id pub-id-type="doi">10.1007/s10488-021-01169-9</pub-id><pub-id pub-id-type="medline">34546482</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Loades</surname><given-names>ME</given-names> </name><name name-style="western"><surname>Armstrong</surname><given-names>P</given-names> </name></person-group><article-title>The challenge of training supervisors to use direct assessments of clinical competence in CBT consistently: a systematic review and exploratory training study</article-title><source>Cogn Behav Ther</source><year>2016</year><volume>9</volume><fpage>e27</fpage><pub-id pub-id-type="doi">10.1017/S1754470X15000288</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Johnson</surname><given-names>J</given-names> </name><name name-style="western"><surname>Corker</surname><given-names>C</given-names> </name><name name-style="western"><surname>O&#x2019;connor</surname><given-names>DB</given-names> </name></person-group><article-title>Burnout in psychological therapists: a cross&#x2010;sectional study investigating the role of supervisory relationship quality</article-title><source>Clin Psychol (Aust Psychol Soc)</source><year>2020</year><month>11</month><day>1</day><volume>24</volume><issue>3</issue><fpage>223</fpage><lpage>235</lpage><pub-id pub-id-type="doi">10.1111/cp.12206</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="book"><person-group person-group-type="author"><collab>The British Psychological Society</collab></person-group><source>Code of Ethics and Conduct</source><year>2021</year><publisher-name>The British Psychological Society</publisher-name><pub-id pub-id-type="doi">10.53841/bpsrep.2021.inf94</pub-id><pub-id pub-id-type="other">978-1-85433-804-4</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="book"><person-group person-group-type="author"><collab>The British Psychological Society</collab></person-group><source>Supervision Guidance for Psychologists</source><year>2024</year><publisher-name>The British Psychological Society</publisher-name><pub-id pub-id-type="doi">10.53841/bpsrep.2024.rep178</pub-id><pub-id pub-id-type="other">978-1-85433-904-1</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Caltabiano</surname><given-names>M</given-names> </name><name name-style="western"><surname>Errington</surname><given-names>E</given-names> </name><name name-style="western"><surname>Sorin</surname><given-names>R</given-names> </name><name name-style="western"><surname>Nickson</surname><given-names>A</given-names> </name><etal/></person-group><article-title>The potential of role-play in undergraduate psychology training / marie caltabiano</article-title><source>Asian J Univ Educ</source><access-date>2026-02-21</access-date><comment>Preprint posted online on  Jun 30, 2018</comment><comment><ext-link ext-link-type="uri" xlink:href="https://files.eric.ed.gov/fulltext/EJ1207768.pdf">https://files.eric.ed.gov/fulltext/EJ1207768.pdf</ext-link></comment></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Melluish</surname><given-names>S</given-names> </name><name name-style="western"><surname>Crossley</surname><given-names>J</given-names> </name><name name-style="western"><surname>Tweed</surname><given-names>A</given-names> </name></person-group><article-title>An evaluation of the use of simulated patient role-plays in the teaching and assessment of clinical consultation skills in clinical psychologists&#x2019; training</article-title><source>Psychol Learn Teach</source><year>2007</year><month>09</month><volume>6</volume><issue>2</issue><fpage>104</fpage><lpage>113</lpage><pub-id pub-id-type="doi">10.2304/plat.2007.6.2.104</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Doolen</surname><given-names>J</given-names> </name><name name-style="western"><surname>Giddings</surname><given-names>M</given-names> </name><name name-style="western"><surname>Johnson</surname><given-names>M</given-names> </name><name name-style="western"><surname>Guizado de Nathan</surname><given-names>G</given-names> </name><name name-style="western"><surname>O Badia</surname><given-names>L</given-names> </name></person-group><article-title>An evaluation of mental health simulation with standardized patients</article-title><source>Int J Nurs Educ Scholarsh</source><year>2014</year><month>03</month><day>12</day><volume>11</volume><issue>1</issue><fpage>55</fpage><lpage>62</lpage><pub-id pub-id-type="doi">10.1515/ijnes-2013-0075</pub-id><pub-id pub-id-type="medline">24620017</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Himmelbauer</surname><given-names>M</given-names> </name><name name-style="western"><surname>Seitz</surname><given-names>T</given-names> </name><name name-style="western"><surname>Seidman</surname><given-names>C</given-names> </name><name name-style="western"><surname>L&#x00F6;ffler-Stastka</surname><given-names>H</given-names> </name></person-group><article-title>Standardized patients in psychiatry - the best way to learn clinical skills?</article-title><source>BMC Med Educ</source><year>2018</year><month>04</month><day>6</day><volume>18</volume><issue>1</issue><fpage>72</fpage><pub-id pub-id-type="doi">10.1186/s12909-018-1184-4</pub-id><pub-id pub-id-type="medline">29625572</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Badger</surname><given-names>LW</given-names> </name><name name-style="western"><surname>Macneil</surname><given-names>G</given-names> </name></person-group><article-title>Rationale for utilizing standardized clients in the training and evaluation of social work students</article-title><source>J Teach Soc Work</source><year>1998</year><month>01</month><day>26</day><volume>16</volume><issue>1-2</issue><fpage>203</fpage><lpage>218</lpage><pub-id pub-id-type="doi">10.1300/J067v16n01_13</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fall</surname><given-names>KA</given-names> </name><name name-style="western"><surname>Levitov</surname><given-names>JE</given-names> </name></person-group><article-title>Using actors in experiential group counseling leadership training</article-title><source>J Spec Group Work</source><year>2002</year><month>06</month><volume>27</volume><issue>2</issue><fpage>122</fpage><lpage>135</lpage><pub-id pub-id-type="doi">10.1177/0193392202027002002</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pilnick</surname><given-names>A</given-names> </name><name name-style="western"><surname>Trusson</surname><given-names>D</given-names> </name><name name-style="western"><surname>Beeke</surname><given-names>S</given-names> </name><name name-style="western"><surname>O&#x2019;Brien</surname><given-names>R</given-names> </name><name name-style="western"><surname>Goldberg</surname><given-names>S</given-names> </name><name name-style="western"><surname>Harwood</surname><given-names>RH</given-names> </name></person-group><article-title>Using conversation analysis to inform role play and simulated interaction in communications skills training for healthcare professionals: identifying avenues for further development through a scoping review</article-title><source>BMC Med Educ</source><year>2018</year><month>11</month><day>19</day><volume>18</volume><issue>1</issue><fpage>267</fpage><pub-id pub-id-type="doi">10.1186/s12909-018-1381-1</pub-id><pub-id pub-id-type="medline">30453956</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ruiz Rodr&#x00ED;guez</surname><given-names>J</given-names> </name><name name-style="western"><surname>Bados L&#x00F3;pez</surname><given-names>A</given-names> </name><name name-style="western"><surname>Fust&#x00E9; Escolano</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Peer counselling versus role-playing: two training methods of therapeutic skills in clinical psychology</article-title><source>Psicothema</source><year>2018</year><month>02</month><volume>30</volume><issue>1</issue><fpage>21</fpage><lpage>26</lpage><pub-id pub-id-type="doi">10.7334/psicothema2016.286</pub-id><pub-id pub-id-type="medline">29363466</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Englar</surname><given-names>RE</given-names> </name></person-group><article-title>Using a standardized client encounter to practice death notification after the unexpected death of a feline patient following routine ovariohysterectomy</article-title><source>J Vet Med Educ</source><year>2019</year><volume>46</volume><issue>4</issue><fpage>489</fpage><lpage>505</lpage><pub-id pub-id-type="doi">10.3138/jvme.0817-111r1</pub-id><pub-id pub-id-type="medline">30806560</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Prescott</surname><given-names>J</given-names> </name><name name-style="western"><surname>Ogilvie</surname><given-names>L</given-names> </name><name name-style="western"><surname>Hanley</surname><given-names>T</given-names> </name></person-group><article-title>Student therapists&#x2019; experiences of learning using a machine client: a proof&#x2010;of&#x2010;concept exploration of an emotionally responsive interactive client (ERIC)</article-title><source>Couns and Psychother Res</source><year>2024</year><month>06</month><volume>24</volume><issue>2</issue><fpage>524</fpage><lpage>531</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://onlinelibrary.wiley.com/toc/17461405/24/2">https://onlinelibrary.wiley.com/toc/17461405/24/2</ext-link></comment><pub-id pub-id-type="doi">10.1002/capr.12685</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zalewski</surname><given-names>B</given-names> </name><name name-style="western"><surname>Guziak</surname><given-names>M</given-names> </name><name name-style="western"><surname>Walkiewicz</surname><given-names>M</given-names> </name></person-group><article-title>Developing simulated and virtual patients in psychological assessment - method, insights and recommendations</article-title><source>Perspect Med Educ</source><year>2023</year><volume>12</volume><issue>1</issue><fpage>455</fpage><lpage>461</lpage><pub-id pub-id-type="doi">10.5334/pme.493</pub-id><pub-id pub-id-type="medline">37901884</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tanana</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>Soma</surname><given-names>CS</given-names> </name><name name-style="western"><surname>Srikumar</surname><given-names>V</given-names> </name><name name-style="western"><surname>Atkins</surname><given-names>DC</given-names> </name><name name-style="western"><surname>Imel</surname><given-names>ZE</given-names> </name></person-group><article-title>Development and evaluation of ClientBot: patient-like conversational agent to train basic counseling skills</article-title><source>J Med Internet Res</source><year>2019</year><month>07</month><day>15</day><volume>21</volume><issue>7</issue><fpage>e12529</fpage><pub-id pub-id-type="doi">10.2196/12529</pub-id><pub-id pub-id-type="medline">31309929</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>De Mattei</surname><given-names>L</given-names> </name><name name-style="western"><surname>Morato</surname><given-names>MQ</given-names> </name><name name-style="western"><surname>Sidhu</surname><given-names>V</given-names> </name><etal/></person-group><article-title>Are Artificial Intelligence Virtual Simulated Patients (AI-VSP) a valid teaching modality for health professional students?</article-title><source>Clin Simul Nurs</source><year>2024</year><month>07</month><volume>92</volume><fpage>101536</fpage><pub-id pub-id-type="doi">10.1016/j.ecns.2024.101536</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Maurya</surname><given-names>RK</given-names> </name></person-group><article-title>A qualitative content analysis of chatgpt&#x2019;s client simulation role play for practicing counseling skills</article-title><source>PsyArXiv</source><comment>Preprint posted online on  Jun, 2023</comment><pub-id pub-id-type="doi">10.31234/osf.io/vwuxh</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Creed</surname><given-names>TA</given-names> </name><name name-style="western"><surname>Salama</surname><given-names>L</given-names> </name><name name-style="western"><surname>Slevin</surname><given-names>R</given-names> </name><etal/></person-group><article-title>Enhancing the quality of cognitive behavioral therapy in community mental health through artificial intelligence generated fidelity feedback (Project AFFECT): a study protocol</article-title><source>BMC Health Serv Res</source><year>2022</year><month>09</month><day>20</day><volume>22</volume><issue>1</issue><fpage>1177</fpage><pub-id pub-id-type="doi">10.1186/s12913-022-08519-9</pub-id><pub-id pub-id-type="medline">36127689</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Flemotomos</surname><given-names>N</given-names> </name><name name-style="western"><surname>Martinez</surname><given-names>VR</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Creed</surname><given-names>TA</given-names> </name><name name-style="western"><surname>Atkins</surname><given-names>DC</given-names> </name><name name-style="western"><surname>Narayanan</surname><given-names>S</given-names> </name></person-group><article-title>Automated quality assessment of cognitive behavioral therapy sessions through highly contextualized language representations</article-title><source>PLoS One</source><year>2021</year><volume>16</volume><issue>10</issue><fpage>e0258639</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0258639</pub-id><pub-id pub-id-type="medline">34679105</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Kolb</surname><given-names>D</given-names> </name></person-group><source>Experiential Learning: Experience As The Source Of Learning And Development</source><year>1984</year><publisher-name>Prentice Hall</publisher-name><pub-id pub-id-type="other">978-0-13-295261-3</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ericsson</surname><given-names>KA</given-names> </name><name name-style="western"><surname>Krampe</surname><given-names>RT</given-names> </name><name name-style="western"><surname>Tesch-R&#x00F6;mer</surname><given-names>C</given-names> </name></person-group><article-title>The role of deliberate practice in the acquisition of expert performance</article-title><source>Psychol Rev</source><year>1993</year><volume>100</volume><issue>3</issue><fpage>363</fpage><lpage>406</lpage><pub-id pub-id-type="doi">10.1037/0033-295X.100.3.363</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rakovshik</surname><given-names>SG</given-names> </name><name name-style="western"><surname>McManus</surname><given-names>F</given-names> </name></person-group><article-title>Establishing evidence-based training in cognitive behavioral therapy: a review of current empirical findings and theoretical guidance</article-title><source>Clin Psychol Rev</source><year>2010</year><month>07</month><volume>30</volume><issue>5</issue><fpage>496</fpage><lpage>516</lpage><pub-id pub-id-type="doi">10.1016/j.cpr.2010.03.004</pub-id><pub-id pub-id-type="medline">20488599</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="web"><article-title>AI-powered psychotherapy training | virtual patient roleplay simulation</article-title><source>CBT Trainer</source><year>2025</year><access-date>2026-02-21</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.soymh.com">https://www.soymh.com</ext-link></comment></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="web"><article-title>National Curriculum for Psychological Wellbeing Practitioner (PWP) programmes</article-title><source>NHS England</source><year>2022</year><access-date>2026-02-21</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.hee.nhs.uk/sites/default/files/2026-01/PWP%20Curriculum%204th%20Edition%202022%20%28updated%20May%202025%29%20v4.4.docx">https://www.hee.nhs.uk/sites/default/files/2026-01/PWP%20Curriculum%204th%20Edition%202022%20%28updated%20May%202025%29%20v4.4.docx</ext-link></comment></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Brooke</surname><given-names>J</given-names> </name></person-group><article-title>SUS- a quick and dirty usability scale</article-title><source>Usability Evaluation in Industry</source><year>1996</year><publisher-name>Taylor &#x0026; Francis</publisher-name><fpage>189</fpage><lpage>194</lpage></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bangor</surname><given-names>A</given-names> </name><name name-style="western"><surname>Kortum</surname><given-names>PT</given-names> </name><name name-style="western"><surname>Miller</surname><given-names>JT</given-names> </name></person-group><article-title>An empirical evaluation of the System Usability Scale</article-title><source>Int J Hum Comput Int</source><year>2008</year><month>07</month><day>29</day><volume>24</volume><issue>6</issue><fpage>574</fpage><lpage>594</lpage><pub-id pub-id-type="doi">10.1080/10447310802205776</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hsieh</surname><given-names>HF</given-names> </name><name name-style="western"><surname>Shannon</surname><given-names>SE</given-names> </name></person-group><article-title>Three approaches to qualitative content analysis</article-title><source>Qual Health Res</source><year>2005</year><month>11</month><volume>15</volume><issue>9</issue><fpage>1277</fpage><lpage>1288</lpage><pub-id pub-id-type="doi">10.1177/1049732305276687</pub-id><pub-id pub-id-type="medline">16204405</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Graneheim</surname><given-names>UH</given-names> </name><name name-style="western"><surname>Lundman</surname><given-names>B</given-names> </name></person-group><article-title>Qualitative content analysis in nursing research: concepts, procedures and measures to achieve trustworthiness</article-title><source>Nurse Educ Today</source><year>2004</year><month>02</month><volume>24</volume><issue>2</issue><fpage>105</fpage><lpage>112</lpage><pub-id pub-id-type="doi">10.1016/j.nedt.2003.10.001</pub-id><pub-id pub-id-type="medline">14769454</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Krippendorff</surname><given-names>K</given-names> </name></person-group><source>Content Analysis: An Introduction to Its Methodology</source><year>2019</year><publisher-name>SAGE Publications, Inc</publisher-name><pub-id pub-id-type="doi">10.4135/9781071878781</pub-id><pub-id pub-id-type="other">978-1-0718-7878-1</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Elo</surname><given-names>S</given-names> </name><name name-style="western"><surname>Kyng&#x00E4;s</surname><given-names>H</given-names> </name></person-group><article-title>The qualitative content analysis process</article-title><source>J Adv Nurs</source><year>2008</year><month>04</month><volume>62</volume><issue>1</issue><fpage>107</fpage><lpage>115</lpage><pub-id pub-id-type="doi">10.1111/j.1365-2648.2007.04569.x</pub-id><pub-id pub-id-type="medline">18352969</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pinnock</surname><given-names>H</given-names> </name><name name-style="western"><surname>Barwick</surname><given-names>M</given-names> </name><name name-style="western"><surname>Carpenter</surname><given-names>CR</given-names> </name><etal/></person-group><article-title>Standards for Reporting Implementation Studies (StaRI) Statement</article-title><source>BMJ</source><year>2017</year><month>03</month><day>6</day><volume>356</volume><fpage>i6795</fpage><pub-id pub-id-type="doi">10.1136/bmj.i6795</pub-id><pub-id pub-id-type="medline">28264797</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="web"><article-title>AI role-plays for improving trainee competence in CBT: an feasibility and assessment study</article-title><source>OSF</source><access-date>2026-02-14</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://osf.io/mskb7">https://osf.io/mskb7</ext-link></comment></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>R</given-names> </name><name name-style="western"><surname>Milani</surname><given-names>S</given-names> </name><name name-style="western"><surname>Chiu</surname><given-names>JC</given-names> </name><etal/></person-group><article-title>PATIENT-&#x03C8;: using large language models to simulate patients for training mental health professionals</article-title><source>arXiv</source><comment>Preprint posted online on  Oct 3, 2024</comment><pub-id pub-id-type="doi">10.48550/arXiv.2405.19660</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rapisarda</surname><given-names>C</given-names> </name><name name-style="western"><surname>Jencius</surname><given-names>M</given-names> </name><name name-style="western"><surname>McGlothlin</surname><given-names>J</given-names> </name></person-group><article-title>Master&#x2019;s students&#x2019; experiences in a multicultural counseling role-play</article-title><source>Int J Adv Counselling</source><year>2011</year><month>12</month><volume>33</volume><issue>4</issue><fpage>361</fpage><lpage>375</lpage><pub-id pub-id-type="doi">10.1007/s10447-011-9139-z</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chow</surname><given-names>DL</given-names> </name><name name-style="western"><surname>Miller</surname><given-names>SD</given-names> </name><name name-style="western"><surname>Seidel</surname><given-names>JA</given-names> </name><name name-style="western"><surname>Kane</surname><given-names>RT</given-names> </name><name name-style="western"><surname>Thornton</surname><given-names>JA</given-names> </name><name name-style="western"><surname>Andrews</surname><given-names>WP</given-names> </name></person-group><article-title>The role of deliberate practice in the development of highly effective psychotherapists</article-title><source>Psychotherapy (Chic)</source><year>2015</year><month>09</month><volume>52</volume><issue>3</issue><fpage>337</fpage><lpage>345</lpage><pub-id pub-id-type="doi">10.1037/pst0000015</pub-id><pub-id pub-id-type="medline">26301425</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Laurillard</surname><given-names>D</given-names> </name></person-group><source>Rethinking University Teaching</source><edition>2</edition><publisher-name>London: Routledge</publisher-name><pub-id pub-id-type="doi">10.4324/9781315012940</pub-id><pub-id pub-id-type="other">978-1-315-01294-0</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shafran</surname><given-names>R</given-names> </name><name name-style="western"><surname>Bond</surname><given-names>L</given-names> </name><name name-style="western"><surname>Carlbring</surname><given-names>P</given-names> </name><etal/></person-group><article-title>From innovation to implementation: artificial intelligence in cognitive behaviour therapy training and supervision</article-title><source>Behav Res Ther</source><year>2026</year><month>02</month><volume>197</volume><pub-id pub-id-type="doi">10.1016/j.brat.2025.104945</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Poststudy survey instruments.</p><media xlink:href="mededu_v12i1e84091_app1.docx" xlink:title="DOCX File, 19 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Description and frequencies of impact codes.</p><media xlink:href="mededu_v12i1e84091_app2.docx" xlink:title="DOCX File, 20 KB"/></supplementary-material></app-group></back></article>