<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Med Educ</journal-id><journal-id journal-id-type="publisher-id">mededu</journal-id><journal-id journal-id-type="index">20</journal-id><journal-title>JMIR Medical Education</journal-title><abbrev-journal-title>JMIR Med Educ</abbrev-journal-title><issn pub-type="epub">2369-3762</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v12i1e85243</article-id><article-id pub-id-type="doi">10.2196/85243</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Health Professional Students&#x2019; Use of Generative Artificial Intelligence During Clinical Placements: Cross-Sectional Online Survey Study</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Kotzki</surname><given-names>Sylvain</given-names></name><degrees>PharmD, PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Massonnet Turner</surname><given-names>Calvin</given-names></name><degrees>MEng</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Gauthier</surname><given-names>Kim</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Minoves</surname><given-names>M&#x00E9;lanie</given-names></name><degrees>PharmD, PhD</degrees><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="aff" rid="aff4">4</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Vuillerme</surname><given-names>Nicolas</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff5">5</xref></contrib></contrib-group><aff id="aff1"><institution>LIG SANGRIA, Grenoble INP, CNRS, Univ. Grenoble Alpes</institution><addr-line>Bureau 19, Centre de Recherche en Sant&#x00E9; Int&#x00E9;gr&#x00E9;e (CReSI)</addr-line><addr-line>Grenoble</addr-line><country>France</country></aff><aff id="aff2"><institution>Faculty of Medicine, Univ. Grenoble Alpes</institution><addr-line>Grenoble</addr-line><country>France</country></aff><aff id="aff3"><institution>HP2, Inserm, Univ. Grenoble Alpes</institution><addr-line>Grenoble</addr-line><country>France</country></aff><aff id="aff4"><institution>Department of Pharmacy, CHU Grenoble Alpes</institution><addr-line>Grenoble</addr-line><country>France</country></aff><aff id="aff5"><institution>Institut Universitaire de France</institution><addr-line>Paris</addr-line><country>France</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Kanzow</surname><given-names>Philipp</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Su</surname><given-names>Chen-Yang</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Fukuzawa</surname><given-names>Fumitoshi</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Sylvain Kotzki, PharmD, PhD, LIG SANGRIA, Grenoble INP, CNRS, Univ. Grenoble Alpes, Bureau 19, Centre de Recherche en Sant&#x00E9; Int&#x00E9;gr&#x00E9;e (CReSI), Grenoble, 38000, France, 33 476637119; <email>sylvain.kotzki@univ-grenoble-alpes.fr</email></corresp></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>27</day><month>4</month><year>2026</year></pub-date><volume>12</volume><elocation-id>e85243</elocation-id><history><date date-type="received"><day>03</day><month>10</month><year>2025</year></date><date date-type="rev-recd"><day>15</day><month>02</month><year>2026</year></date><date date-type="accepted"><day>15</day><month>02</month><year>2026</year></date></history><copyright-statement>&#x00A9; Sylvain Kotzki, Calvin Massonnet Turner, Kim Gauthier, M&#x00E9;lanie Minoves, Nicolas Vuillerme. Originally published in JMIR Medical Education (<ext-link ext-link-type="uri" xlink:href="https://mededu.jmir.org">https://mededu.jmir.org</ext-link>), 27.4.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Medical Education, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://mededu.jmir.org/">https://mededu.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://mededu.jmir.org/2026/1/e85243"/><abstract><sec><title>Background</title><p>Generative artificial intelligence (GenAI) is rapidly expanding in higher education and clinical practice. However, its use during clinical placements, where cognitive demands and responsibility for patient care increase, remains insufficiently documented.</p></sec><sec><title>Objective</title><p>This study aimed to characterize self-reported GenAI use during clinical placements, perceived benefits and risks, and related training and governance needs.</p></sec><sec sec-type="methods"><title>Methods</title><p>We conducted a cross-sectional online survey at a French university (July 17 to September 30, 2025). Eligible participants were students in medicine, pharmacy, nursing, midwifery, or physiotherapy who were currently in, or had completed within the past 18 months, a clinical placement. A 61-item questionnaire (comprising closed- and open-ended items) assessed GenAI use, task patterns, perceived benefits or risks, and training or governance needs. A composite index classified self-perceived GenAI maturity as minimal, limited, moderate, or high. Group comparisons used &#x03C7;<sup>2</sup> tests; maturity gradients used trend tests.</p></sec><sec sec-type="results"><title>Results</title><p>A total of 388 students responded (n=308, 79.4% women), mainly nursing students (n=217, 55.9%). Overall, 204 (52.6%) students reported using GenAI during clinical placements. Use differed across disciplines (<italic>&#x03C7;</italic><sup>2</sup><sub>4</sub>=10.71; <italic>P</italic>=.03), with lower uptake in midwifery (6/23, 26%; odds ratio 0.30, 95% CI 0.11&#x2010;0.77). Adoption increased markedly with self-perceived maturity (minimal: 2/22, 9% vs high: 22/29, 76%; trend <italic>P</italic>&#x003C;.001). Among the 204 users, the most commonly reported uses were information retrieval (n=159, 77.9%), bibliographic search (n=152, 74.5%), and translation or rephrasing (n=145, 71.1%); patient-facing activities were less frequently reported (eg, patient-document drafting or communication preparation: n=78, 38.2%). Although most users reported never entering direct patient identifiers, 48 (23.5%) reported at least 1 disclosure of patient-identifying information, and 96 (47.1%) reported processing real medical content perceived as anonymized. The most endorsed perceived benefits among the 388 students were documentation support (n=315, 81.2%) and improved access to information (n=266, 68.5%). The most endorsed risks were dependency (n=353, 90.9%), skill erosion (n=329, 84.8%), and confidentiality breaches (n=339, 87.4%). Training needs were highest for ethics or regulatory training (294/378, 77.7%) and a best-practice clinical guide (292/373, 78.3%).</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>GenAI is already used by a substantial proportion of French students in health professions during clinical placements, predominantly for information and documentation support rather than patient-facing activities. Self-perceived readiness is strongly associated with adoption. Reported disclosures and concurrent concerns about dependency, skill erosion, and confidentiality support the need for structured curricula and clear governance frameworks to enable responsible, patient-centered integration of GenAI into clinical education.</p></sec></abstract><kwd-group><kwd>generative artificial intelligence</kwd><kwd>AI use</kwd><kwd>artificial intelligence</kwd><kwd>health professional students</kwd><kwd>clinical placements</kwd><kwd>medical education</kwd><kwd>ethics</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Generative artificial intelligence (GenAI) has entered the global health landscape at an unprecedented pace. The widely cited example of ChatGPT (GPT 3.5), which in 2023 achieved passing scores on all 3 components of the United States Medical Licensing Examination, illustrates the capacity of large language models to engage with complex medical knowledge. Concurrently, adoption among health professional students has expanded rapidly. In the United States, just over half of medical students (52%) reported using ChatGPT for medical school-related tasks, based on a survey of 415 students, with about 1 in 6 doing so on a weekly basis [<xref ref-type="bibr" rid="ref1">1</xref>]. In Israel, 86% of undergraduate students from medicine, nursing, and allied health professions reported being familiar with ChatGPT [<xref ref-type="bibr" rid="ref2">2</xref>]. Across Europe, a recent survey of 487 medical students across Germany, Austria, and Switzerland revealed that 38.8% of them reported prior experience with artificial intelligence (AI) chatbots such as ChatGPT [<xref ref-type="bibr" rid="ref3">3</xref>]. In nursing, an umbrella review emphasized both opportunities and substantial pedagogical, ethical, and organizational challenges in integrating AI into education and practice [<xref ref-type="bibr" rid="ref4">4</xref>]. Consistently, a qualitative study among undergraduate nursing students found that while GenAI was valued as a supportive learning aid, students also expressed concerns over dependency, weakened critical thinking, and threats to academic integrity [<xref ref-type="bibr" rid="ref5">5</xref>]. In Italy, nearly all physiotherapy students (&#x2248;95.3%) were aware of AI chatbots, though more than half reported never using them in academic settings [<xref ref-type="bibr" rid="ref6">6</xref>].</p><p>From an educational perspective, 3 primary applications of GenAI have been identified: the generation and reformulation of academic content, instant adaptive tutoring, and participation in simulation or problem-based learning scenarios [<xref ref-type="bibr" rid="ref7">7</xref>-<xref ref-type="bibr" rid="ref9">9</xref>]. A recent systematic review further synthesized evidence across health professional education, confirming the growing influence of GenAI on student learning, particularly in acquisition, inquiry, practice, and production [<xref ref-type="bibr" rid="ref10">10</xref>]. Importantly, this growing body of evidence extends to experimental research, with a randomized controlled trial in Norway reporting a modest improvement in knowledge test performance when pharmacy students had access to ChatGPT, although the effect did not reach statistical significance [<xref ref-type="bibr" rid="ref11">11</xref>]. However, this rapid adoption is unfolding amid a marked institutional vacuum: more than 60% of surveyed pharmacy students reported no formal curricular exposure to GenAI [<xref ref-type="bibr" rid="ref12">12</xref>], while in 2024, an analysis of 116 US research universities found that less than half provided classroom guidance and many still lacked formal institutional policies [<xref ref-type="bibr" rid="ref13">13</xref>]. As a result, health professional education is evolving in a state of continuous adaptation, where the promising capabilities of GenAI coexist with substantial risks, including facilitated plagiarism, overreliance, diminished critical thinking, weakened communication, and threats to the development of clinical reasoning and problem-solving skills [<xref ref-type="bibr" rid="ref14">14</xref>].</p><p>Beyond classroom-based learning, the training of health professionals also depends heavily on clinical practice, as their training does not take place solely in lecture halls but also through clinical placements. These clinical placements, while essential to professional development, place students in a dual role: learning in real-world conditions while gradually assuming responsibility for patient care. Such placements are frequently marked by heightened stress, significant emotional demands, feelings of inadequacy [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref16">16</xref>], and in some cases, early signs of burnout [<xref ref-type="bibr" rid="ref17">17</xref>]. Meanwhile, in clinical practice, GenAI is increasingly being applied across health care settings such as hospitals, clinics, private practices, and pharmacies. Recent reports describe its use to accelerate documentation, simplify radiology reports, draft consultation letters, and even support aspects of diagnostic reasoning [<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref19">19</xref>]. An integrative review in nursing similarly highlighted applications of AI across education, clinical practice, workload management, and professional perceptions, pointing to opportunities but also to enduring structural and ethical challenges [<xref ref-type="bibr" rid="ref20">20</xref>]. In this environment, health professional students in clinical placements are likely to turn to GenAI to ease their daily workload, such as checking information, translating a consent form, or rephrasing a discharge summary. While such assistance can provide immediate support when human feedback is unavailable, it also raises concerns about overreliance, the weakening of critical learning processes, and the potential for errors.</p><p>A recent survey found that 1 in 5 UK general practitioners reported using GenAI, most commonly to generate documentation after patient encounters. Clinicians acknowledged its potential to reduce administrative burden but also raised concerns regarding patient safety, data privacy, and the risk of errors [<xref ref-type="bibr" rid="ref21">21</xref>]. Other authors caution, however, that adoption may remain limited or prove transient in the absence of robust clinical validation [<xref ref-type="bibr" rid="ref22">22</xref>]. Health professional students in clinical placements stand at the intersection of 3 evolving contexts: higher education, where GenAI is rapidly becoming mainstream; clinical environments, where health care professionals are beginning to adopt it; and a learning experience often marked by uncertainty, stress, and substantial cognitive demands. Yet, to the best of our knowledge, no survey has systematically examined how students use GenAI during clinical placements, what motivates their engagement, and the perceived benefits, risks, and governance needs associated with this pivotal stage of professional development.</p><p>To address this gap, we conducted an anonymous survey among 388 health professional students from multiple health care programs at the Universit&#x00E9; Grenoble Alpes (UGA, France), all of whom had recently completed or were currently engaged in clinical placements. The aim of this study was to characterize health professional students&#x2019; self-reported use of generative AI in clinical situations. Specifically, we sought to (1) estimate adoption prevalence overall and by discipline, (2) describe the tasks, tools, and frequency of use, including self-reported handling of patient-related information, and (3) assess perceived benefits, risks, and training and governance needs to inform structured educational and institutional responses.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Study Design</title><p>We conducted a web-based descriptive cross-sectional study in the form of an online survey targeting health professional students at UGA (France). The aim was to document actual use, perceived benefits and risks, as well as training needs related to GenAI in the context of clinical placements. Data collection took place between July 17 and September 30, 2025. This survey was carried out as part of a national initiative, funded by the French National Research Agency, to support the development and dissemination of digital health and AI in health education in French university health programs (France 2030 program, ANR n&#x00B0;23-CMAS-0035).</p></sec><sec id="s2-2"><title>Ethical Considerations</title><p>This study has received ethical approval from the &#x201C;Ethics Committee for the Integrity and Ethics of Research in Health Professions Education&#x201D; of the &#x201C;<italic>Soci&#x00E9;t&#x00E9; Internationale Francophone d&#x2019;Education M&#x00E9;dicale</italic>&#x201D; (Notice n&#x00B0;1905&#x2010;2025, issued July 17, 2025). Entry into the survey was conditional upon acceptance of an electronic informed consent form, accompanied by an information sheet detailing the study objectives, data confidentiality, voluntary nature of participation, right to withdraw, and potential risks. The survey was fully anonymous, and no incentives were offered for participation. Participation was voluntary, and informed consent was obtained electronically from all participants.</p></sec><sec id="s2-3"><title>Recruitment Procedure</title><p>Following approval by the deans of the Faculties of Medicine and Pharmacy at UGA, an email invitation via the official academic mailing list was distributed to potential participants enrolled in medical or allied health training programs. Overall, approximately 4553 eligible students (n=2196 in medicine, n=283 in pharmacy, n=1904 in nursing, n=120 in midwifery, and n=50 in physiotherapy) were invited (see <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref> for details). In addition, a flyer was posted on student information boards at the entrance of lecture halls and classrooms throughout the recruitment period, ensuring a reinforced exposure to the survey. A total of 388 students completed the survey, corresponding to an overall response rate of 8.5%, ranging from 3.1% in medicine to 26% in physiotherapy. Interested students accessed the survey via a LimeSurvey link.</p></sec><sec id="s2-4"><title>Clinical Placement Context</title><p>In the French health professions education system, clinical placements are mandatory components of curricula. Rotations typically last 2 to 10 weeks. Students progressively assume responsibilities according to their year of study, under the supervision of licensed health professionals from the relevant discipline, with day-to-day oversight and validation of key care and documentation tasks. Early-year students mainly observe and perform basic tasks, while senior students (in the final years of training) take a more active role in discipline-specific activities, including patient care tasks, documentation, information-seeking, care coordination, and patient communication or education.</p></sec><sec id="s2-5"><title>Data Collection</title><p>We collected data through an anonymous online survey administered with LimeSurvey. Students interested in the study were invited to access the anonymous link, which contained 61 items: 18 single-choice questions, 38 single-choice questions on a 4-point Likert scale (1=strongly disagree to 4=strongly agree), 1 multiple-choice question, and 4 open-ended questions. The full survey questionnaire is available in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p><p>The items were developed based on 4 published studies: Blease et al [<xref ref-type="bibr" rid="ref21">21</xref>] on generative GenAI in primary care, Lobet et al [<xref ref-type="bibr" rid="ref23">23</xref>] on the spontaneous use of ChatGPT by university students, Karaca et al [<xref ref-type="bibr" rid="ref24">24</xref>] on the Medical Artificial Intelligence Readiness Scale for Medical Students, and Boillat et al [<xref ref-type="bibr" rid="ref25">25</xref>] on physician and student preparedness for AI. Items were then adapted by consensus to the clinical context.</p><p>We organized the instrument into 4 domains:</p><list list-type="bullet"><list-item><p>Sociodemographic characteristics</p></list-item><list-item><p>Declared uses of GenAI in clinical practice</p></list-item><list-item><p>Perceptions and attitudes in clinical contexts</p></list-item><list-item><p>Knowledge and training related to GenAI</p></list-item></list><p>We conducted this web-based survey in accordance with the CHERRIES (Checklist for Reporting Results of Internet E-Surveys) checklist (<xref ref-type="supplementary-material" rid="app2">Checklist 1</xref>) [<xref ref-type="bibr" rid="ref26">26</xref>]. The questionnaire was hosted on LimeSurvey and included a single skip logic based on declared GenAI use versus nonuse; all other sections were shown to all participants. A total of 38 items were mandatory; missing responses were otherwise allowed, and respondents could review or change answers before submission. Only fully submitted questionnaires were analyzed, and no technical measures (cookies, IP checks, or registration) were used to prevent multiple entries.</p><p>Content validity was assessed through expert review by 2 clinicians and health-professions education specialists (SK and MM), who evaluated item relevance, clarity, and coverage of the 4 intended domains. Minor wording changes were made to improve clarity and contextual alignment with clinical placements (4 items required rewording for better understanding; no items were removed or added). A pilot test with 10 students (n=4 students in medicine, n=3 students in pharmacy, and n=3 students in nursing) confirmed intelligibility. Feedback from experts and students was incorporated to produce the definitive version. The average completion time was 15 minutes.</p></sec><sec id="s2-6"><title>GenAI Maturity Index</title><p>To capture students&#x2019; self-perceived maturity toward GenAI, considered as their perceived readiness to understand, evaluate, and appropriately use GenAI in educational and professional contexts, we constructed a composite GenAI maturity index from 9 Likert-type items [<xref ref-type="bibr" rid="ref24">24</xref>]. This approach follows previous work on AI readiness and familiarity in health professions, where multiple items are aggregated into a single readiness or familiarity score [<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref25">25</xref>]. One item assessed general digital comfort (&#x201C;I feel comfortable with digital technologies in general&#x201D;), and 8 items targeted GenAI-specific knowledge and self-efficacy (basic GenAI concepts, distinction between GenAI and other AI, ability to judge the reliability of GenAI outputs, awareness of appropriate use contexts and current limitations, and perceived ability to use GenAI in one&#x2019;s education and future practice (full wording in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>).</p><p>All 9 items used a 4-point agreement scale (1=strongly disagree, 4=strongly agree). To respect the ordinal nature of the 4-point Likert responses, we computed, for each respondent, the median of the 9 item scores and rounded it down to the nearest integer. This yielded a 4-level ordinal GenAI maturity score: 1 (minimal), 2 (limited), 3 (moderate), and 4 (high maturity). We required at least 5 nonmissing responses out of 9 (&#x2265;50%) to compute the index, consistent with commonly used &#x201C;half-rule&#x201D; approaches for scoring multi-item scales in the presence of item-level missingness [<xref ref-type="bibr" rid="ref27">27</xref>].</p><p>In this sample, the 9 items showed good internal consistency (Cronbach <italic>&#x03B1;</italic>=0.869). Internal consistency was similar when excluding the general digital-comfort item (<italic>&#x03B1;</italic>=0.870 for items 2&#x2010;9), suggesting that digital comfort functions as a foundational component of the same underlying construct. Inspired by previous AI-readiness instruments that combine several facets into a single readiness score [<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref25">25</xref>], we therefore use this composite GenAI maturity index pragmatically as an ordinal stratification of self-perceived GenAI maturity.</p></sec><sec id="s2-7"><title>Qualitative Analysis of Open-Ended Responses</title><p>Open-ended responses were analyzed using a qualitative descriptive approach with descriptive content analysis. Responses were segmented into 213 meaning units (eg, items separated by semicolons or distinct ideas) and analyzed by question (reasons for nonuse, additional tasks, suggested uses, suggested risks, and training or support needs). A hybrid coding strategy was used, combining a first deductive framework aligned with these domains and inductive identification of recurrent subthemes. Two researchers (SK and MM) independently coded all segments using the agreed codebook. Initial agreement was 83.1% (177/213); disagreements (36/213, 16.9%) were resolved through discussion until consensus, and the final coding was used for thematic reporting and choice of illustrative quotations. Quotations were anonymized and translated from French to English. Qualitative findings are reported as recurring themes with illustrative excerpts to complement the quantitative results rather than as a stand-alone qualitative study.</p></sec><sec id="s2-8"><title>Data Processing and Statistical Analyses</title><p>Data were exported to Excel and analyzed in Python (version 3.11) using pandas (2.2.2), NumPy (1.26.4), SciPy (1.13.1), statsmodels (0.14.2), and matplotlib (3.9.2). Quantitative variables were described using means or medians with IQRs, while qualitative variables were presented as counts and percentages.</p><p>We performed overall group comparisons using Pearson chi-square test to assess the heterogeneity of distributions across academic disciplines and maturity levels. When expected cell counts were small, Fisher exact test was applied. For pairwise comparisons between a given discipline and all others, odds ratios with 95% CIs were calculated.</p><p>To assess gradients across maturity levels, 4-point Likert responses were dichotomized into 2 categories (disagree=1&#x2010;2; agree=3&#x2010;4) to facilitate interpretation and to enable proportion-based trend testing across ordered groups [<xref ref-type="bibr" rid="ref28">28</xref>-<xref ref-type="bibr" rid="ref30">30</xref>]. Linear trends were then assessed using the Cochran-Armitage trend test. When multiple simultaneous comparisons were conducted, the family-wise error rate was controlled using the Holm method. Statistical significance was set at <italic>P</italic> less than .05. We acknowledge that dichotomizing ordinal responses entails information loss compared with ordinal models [<xref ref-type="bibr" rid="ref28">28</xref>].</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Respondent Demographics and Academic Profile</title><p>The survey included 388 health professional students (<xref ref-type="table" rid="table1">Table 1</xref>), predominantly women (n=308, 79.4%), with most respondents aged 20&#x2010;24 years (n=198, 51.0%). The sample was largely composed of nursing students (n=217, 55.9%), with additional representation from medicine, pharmacy, midwifery, and physiotherapy programs (<xref ref-type="table" rid="table1">Table 1</xref>). Years of study covered the full curriculum, with a higher proportion of early-year students (y 1&#x2010;2) and smaller proportions in later years (<xref ref-type="table" rid="table1">Table 1</xref>).</p><p>Among the 388 respondents, 380 (97.9%) provided sufficient data to compute the GenAI maturity index. Most students reported limited to moderate maturity (levels 2&#x2010;3: 329/380, 86.6%), while relatively few reported minimal or high maturity (levels 1 and 4: 51/380, 13.4%; <xref ref-type="table" rid="table1">Table 1</xref>). Item-level details are provided in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Descriptive table of responder characteristics (n=388).</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Characteristics</td><td align="left" valign="bottom">Participants, n (%)</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="2">Gender</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Man</td><td align="left" valign="top">76 (19.6)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Woman</td><td align="left" valign="top">308 (79.4)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Prefer not to answer</td><td align="left" valign="top">4 (1.0)</td></tr><tr><td align="left" valign="top" colspan="2">Age (y)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>&#x003C;20</td><td align="left" valign="top">55 (14.2)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>20&#x2010;24</td><td align="left" valign="top">198 (51.0)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>25&#x2010;29</td><td align="left" valign="top">55 (14.2)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>30&#x2010;34</td><td align="left" valign="top">22 (5.7)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>&#x003E;34</td><td align="left" valign="top">58 (15.0)</td></tr><tr><td align="left" valign="top" colspan="2">Disciplines</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Medicine</td><td align="left" valign="top">69 (17.8)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Midwifery</td><td align="left" valign="top">23 (5.9)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Nursing</td><td align="left" valign="top">217 (55.9)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Pharmacy</td><td align="left" valign="top">66 (17.0)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Physiotherapy</td><td align="left" valign="top">13 (3.4)</td></tr><tr><td align="left" valign="top" colspan="2">Year of study</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>First year</td><td align="left" valign="top">82 (21.1)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Second year</td><td align="left" valign="top">97 (25.0)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Third year</td><td align="left" valign="top">77 (19.9)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Fourth year</td><td align="left" valign="top">29 (7.5)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Fifth year</td><td align="left" valign="top">52 (13.4)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Sixth year</td><td align="left" valign="top">10 (2.6)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Beyond the sixth year</td><td align="left" valign="top">41 (10.6)</td></tr><tr><td align="left" valign="top" colspan="2">Self-reported GenAI<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup> maturity</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Minimal (1)</td><td align="left" valign="top">22 (5.7)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Limited (2)</td><td align="left" valign="top">130 (33.5)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Moderate (3)</td><td align="left" valign="top">199 (51.3)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>High (4)</td><td align="left" valign="top">29 (7.5)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>NA<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup></td><td align="left" valign="top">8 (2.1)</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>GenAI: generative artificial intelligence.</p></fn><fn id="table1fn2"><p><sup>b</sup>NA: not applicable.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s3-2"><title>Self-Reported Uses</title><sec id="s3-2-1"><title>GenAI Use in Clinical Placement</title><p>Overall, 204 of 388 (52.6%) students reported using GenAI during clinical placements (<xref ref-type="table" rid="table2">Table 2</xref>). Adoption differed across disciplines (&#x03C7;<sup>2</sup><sub>4</sub>=10.71; <italic>P</italic>=.03), primarily reflecting lower uptake among midwifery students (odds ratio 0.30, 95% CI 0.11&#x2010;0.77; Holm-adjusted <italic>P</italic>=.049). Pharmacy students showed the highest unadjusted proportion of use, but this difference was not statistically significant after correction (<xref ref-type="table" rid="table2">Table 2</xref>). GenAI use increased markedly with higher self-perceived GenAI maturity, from 2 of 22 (9%) in the minimal group to 22 of 29 (76%) in the high-maturity group (trend test <italic>z</italic> score=6.01, <italic>P</italic>&#x003C;.001; <xref ref-type="table" rid="table3">Table 3</xref>).</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Prevalence of GenAI<sup><xref ref-type="table-fn" rid="table2fn1">a</xref></sup> use in clinical placements by discipline.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="top"/><td align="left" valign="top">Yes, n (%)</td><td align="left" valign="top">No, n (%)</td><td align="left" valign="top">OR<sup><xref ref-type="table-fn" rid="table2fn2">b</xref></sup> (95% CI) vs rest</td><td align="left" valign="top"><italic>P</italic> value</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="5">GenAI use in clinical placements</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Overall</td><td align="left" valign="top">204 (52.6)</td><td align="left" valign="top">184 (47.4)</td><td align="left" valign="top">&#x2014;<sup><xref ref-type="table-fn" rid="table2fn3">c</xref></sup></td><td align="left" valign="top">&#x2014;</td></tr><tr><td align="left" valign="top" colspan="5">GenAI use by discipline</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Medicine</td><td align="left" valign="top">33 (47.8)</td><td align="left" valign="top">36 (52.2)</td><td align="left" valign="top">0.79 (0.47-1.34)</td><td align="left" valign="top">.99</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Midwifery</td><td align="left" valign="top">6 (26.1)</td><td align="left" valign="top">17 (73.9)</td><td align="left" valign="top">0.3 (0.11-0.77)</td><td align="left" valign="top">.049<sup><xref ref-type="table-fn" rid="table2fn4">d</xref></sup></td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Nursing</td><td align="left" valign="top">117 (53.9)</td><td align="left" valign="top">100 (46.1)</td><td align="left" valign="top">1.13 (0.76-1.69)</td><td align="left" valign="top">.99</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Pharmacy</td><td align="left" valign="top">42 (63.6)</td><td align="left" valign="top">24 (36.4)</td><td align="left" valign="top">1.73 (1.00-2.99)</td><td align="left" valign="top">.23</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Physiotherapy</td><td align="left" valign="top">6 (46.1)</td><td align="left" valign="top">7 (53.9)</td><td align="left" valign="top">0.77 (0.25-2.32)</td><td align="left" valign="top">.78</td></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>a</sup>GenAI: generative artificial intelligence.</p></fn><fn id="table2fn2"><p><sup>b</sup>OR: odds ratio.</p></fn><fn id="table2fn3"><p><sup>c</sup>Not applicable.</p></fn><fn id="table2fn4"><p><sup>d</sup>Statistically significant after Holm adjustment.</p></fn></table-wrap-foot></table-wrap><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Prevalence of GenAI<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup> use in clinical placements by self-perceived GenAI maturity.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="top">GenAI use by self-perceived GenAI maturity</td><td align="left" valign="top">Yes, n (%)</td><td align="left" valign="top">No, n (%)</td><td align="left" valign="top"><italic>&#x03C7;</italic><sup>2</sup> Pearson (<italic>df</italic>; <italic>P</italic> value)</td><td align="left" valign="top">Cochran-Armitage trend: <italic>z</italic> score (<italic>P</italic> value)</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="3">Maturity score</td><td align="left" valign="top">37.25 (3; &#x003C;.001)</td><td align="left" valign="top">6.01 (&#x003C;.001)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Minimal (1)</td><td align="left" valign="top">2 (9.1)</td><td align="left" valign="top">20 (90.9)</td><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Limited (2)</td><td align="left" valign="top">52 (40.0)</td><td align="left" valign="top">78 (60.0)</td><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Moderate (3)</td><td align="left" valign="top">122 (61.3)</td><td align="left" valign="top">77 (38.7)</td><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>High (4)</td><td align="left" valign="top">22 (75.9)</td><td align="left" valign="top">7 (24.1)</td><td align="left" valign="top"/><td align="left" valign="top"/></tr></tbody></table><table-wrap-foot><fn id="table3fn1"><p><sup>a</sup>GenAI: generative artificial intelligence.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s3-2-2"><title>Reasons for Not Adopting GenAI During Clinical Placements Situations</title><p>Among nonusers (184/388), the most often reported reasons were not perceiving a need (59/184, 32.1%), not knowing best practices for use (38/184, 20.7%), and lack of interest (34/184, 18.5%; <xref ref-type="table" rid="table4">Table 4</xref>). Fewer respondents reported not knowing how to use the tools (18/184, 9.8%) or not being aware of them (10/184, 5.4%); &#x201C;Other&#x201D; was selected by 25/184 (13.6%).</p><table-wrap id="t4" position="float"><label>Table 4.</label><caption><p>Reasons for not adopting generative artificial intelligence during clinical placements among nonuser respondents (n=184).</p></caption><table id="table4" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Reason for nonuse</td><td align="left" valign="bottom">Overall,<break/>n (%)</td><td align="left" valign="bottom">Nursing,<break/>n (%)</td><td align="left" valign="bottom">Medicine,<break/>n (%)</td><td align="left" valign="bottom">Pharmacy,<break/>n (%)</td><td align="left" valign="bottom">Midwifery,<break/>n (%)</td><td align="left" valign="bottom">Physio,<break/>n (%)</td></tr></thead><tbody><tr><td align="left" valign="top">It does not interest me</td><td align="left" valign="top">34 (18.5)</td><td align="left" valign="top">13 (13)</td><td align="left" valign="top">10 (27.8)</td><td align="left" valign="top">3 (12.5)</td><td align="left" valign="top">5 (29.4)</td><td align="left" valign="top">3 (42.9)</td></tr><tr><td align="left" valign="top">I do not need it</td><td align="left" valign="top">59 (32.1)</td><td align="left" valign="top">32 (32)</td><td align="left" valign="top">10 (27.8)</td><td align="left" valign="top">9 (37.5)</td><td align="left" valign="top">6 (35.3)</td><td align="left" valign="top">2 (28.6)</td></tr><tr><td align="left" valign="top">I do not know how to use it</td><td align="left" valign="top">18 (9.8)</td><td align="left" valign="top">12 (12)</td><td align="left" valign="top">4 (11.1)</td><td align="left" valign="top">2 (8.3)</td><td align="left" valign="top">0 (0)</td><td align="left" valign="top">0 (0)</td></tr><tr><td align="left" valign="top">I don&#x2019;t know the best practices for using it</td><td align="left" valign="top">38 (20.7)</td><td align="left" valign="top">25 (25)</td><td align="left" valign="top">7 (19.4)</td><td align="left" valign="top">2 (8.3)</td><td align="left" valign="top">3 (17.6)</td><td align="left" valign="top">1 (14.3)</td></tr><tr><td align="left" valign="top">I was not aware of it</td><td align="left" valign="top">10 (5.4)</td><td align="left" valign="top">6 (6)</td><td align="left" valign="top">0 (0)</td><td align="left" valign="top">3 (12.5)</td><td align="left" valign="top">1 (5.9)</td><td align="left" valign="top">0 (0)</td></tr><tr><td align="left" valign="top">Other</td><td align="left" valign="top">25 (13.6)</td><td align="left" valign="top">12 (12)</td><td align="left" valign="top">5 (13.9)</td><td align="left" valign="top">5 (20.8)</td><td align="left" valign="top">2 (11.8)</td><td align="left" valign="top">1 (14.3)</td></tr></tbody></table></table-wrap><p>Reasons varied across disciplines, but subgroup sizes were small for some programs. Overall, responses suggested two main profiles: (1) attitudinal barriers (eg, no perceived need or lack of interest) and (2) guidance-related barriers (eg, lack of best practices or technical know-how), with their relative weight differing by discipline (<xref ref-type="table" rid="table4">Table 4</xref>).</p><p>Open-ended responses under &#x201C;Other&#x201D; (29 meaning units) mostly referred to perceived unreliability and risk of errors, environmental impact, or broader socio-technical critique, and a preference for official sources or local protocols over GenAI. The full thematic breakdown with illustrative quotations is provided in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p></sec><sec id="s3-2-3"><title>Self-Reported Use of GenAI During Clinical Placements Situations</title><p>Among GenAI users (204/388), reported use was primarily oriented toward information-related and documentation tasks (<xref ref-type="table" rid="table5">Table 5</xref>). The most common uses included information extraction (159/204, 77.9%) and bibliographic search (152/204, 74.5%), while patient-facing activities were less frequently reported (eg, drafting patient documents or preparing patient communication: 78/204, 38.2% each; <xref ref-type="table" rid="table5">Table 5</xref>). This pattern was mirrored in usage frequency: weekly-or-more use was more common for information-related tasks (eg, information extraction: 53/204, 26.0%) than for patient-facing documentation or communication (13/204, 6.4% and 15/204, 7.4%; <xref ref-type="table" rid="table5">Table 5</xref>).</p><p>Free-text comments on additional tasks (106 meaning units) most often described learning support or pedagogical explanations and writing or text revision; the full thematic breakdown with illustrative quotations is provided in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p><table-wrap id="t5" position="float"><label>Table 5.</label><caption><p>Frequency of generative artificial intelligence usage by task (n=204).</p></caption><table id="table5" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Item</td><td align="left" valign="bottom">Ever used, n (%)</td><td align="left" valign="bottom">At least once a week, n (%)</td></tr></thead><tbody><tr><td align="left" valign="top">Content translation</td><td align="left" valign="top">145 (71.1)</td><td align="left" valign="top">19 (9.3)</td></tr><tr><td align="left" valign="top">Report or article writing</td><td align="left" valign="top">141 (69.1)</td><td align="left" valign="top">27 (13.2)</td></tr><tr><td align="left" valign="top">Info extraction</td><td align="left" valign="top">159 (77.9)</td><td align="left" valign="top">53 (26)</td></tr><tr><td align="left" valign="top">Bibliographic search</td><td align="left" valign="top">152 (74.5)</td><td align="left" valign="top">47 (23)</td></tr><tr><td align="left" valign="top">Clinical simulation</td><td align="left" valign="top">112 (54.9)</td><td align="left" valign="top">31 (15.2)</td></tr><tr><td align="left" valign="top">Drafting patient documents</td><td align="left" valign="top">78 (38.2)</td><td align="left" valign="top">13 (6.4)</td></tr><tr><td align="left" valign="top">Preparing patient communication</td><td align="left" valign="top">78 (38.2)</td><td align="left" valign="top">15 (7.4)</td></tr></tbody></table></table-wrap></sec><sec id="s3-2-4"><title>Disclosure of Sensitive Information to GenAI</title><p>Among GenAI users, most reported that entering direct patient identifiers had &#x201C;never happened&#x201D; (151/204, 74.0%; <xref ref-type="table" rid="table6">Table 6</xref>). However, 48 of 204 (23.5%) students reported at least 1 instance (once, a few times, often, or unintentionally), 5 (2.5%) were unsure, and only 3 (1.5%) reported that this occurs &#x201C;often.&#x201D;</p><p>Self-disclosure was more common. While 109 of 204 (53.4%) students reported &#x201C;never&#x201D; entering their own personal identifiers, 87 (42.6%) reported at least 1 instance, 8 (3.9%) were unsure, and 18 (8.8%) reported that this occurs &#x201C;often&#x201D; (<xref ref-type="table" rid="table6">Table 6</xref>). The broadest category of disclosures involved real medical content: 100 (49%) reported &#x201C;never,&#x201D; whereas 96 (47.1%) reported processing real medical data (even if perceived as anonymized), including 20 (9.8%) reporting &#x201C;often&#x201D; (<xref ref-type="table" rid="table6">Table 6</xref>).</p><table-wrap id="t6" position="float"><label>Table 6.</label><caption><p>Self-reported disclosure of sensitive information when using generative artificial intelligence (n=204).</p></caption><table id="table6" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Type of data disclosed</td><td align="left" valign="bottom">Never,<break/>n (%)</td><td align="left" valign="bottom">Once,<break/>n (%)</td><td align="left" valign="bottom">A few times,<break/>n (%)</td><td align="left" valign="bottom">Often,<break/>n (%)</td><td align="left" valign="bottom">Maybe<sup><xref ref-type="table-fn" rid="table6fn1">a</xref></sup>, n (%)</td><td align="left" valign="bottom">Do not know,<break/>n (%)</td></tr></thead><tbody><tr><td align="left" valign="top">Patient-identifying data (<italic>name, ID, address</italic>)</td><td align="left" valign="top">151 (74)</td><td align="left" valign="top">12 (5.9)</td><td align="left" valign="top">22 (10.8)</td><td align="left" valign="top">3 (1.5)</td><td align="left" valign="top">11 (5.4)</td><td align="left" valign="top">5 (2.5)</td></tr><tr><td align="left" valign="top">Student&#x2019;s own personal data (<italic>name, identifiers</italic>)</td><td align="left" valign="top">109 (53.4)</td><td align="left" valign="top">18 (8.8)</td><td align="left" valign="top">39 (19.1)</td><td align="left" valign="top">18 (8.8)</td><td align="left" valign="top">12 (5.9)</td><td align="left" valign="top">8 (3.9)</td></tr><tr><td align="left" valign="top">Real medical data (<italic>anonymized or not</italic>)</td><td align="left" valign="top">100 (49.0)</td><td align="left" valign="top">17 (8.3)</td><td align="left" valign="top">56 (27.5)</td><td align="left" valign="top">20 (9.8)</td><td align="left" valign="top">3 (1.5)</td><td align="left" valign="top">8 (3.9)</td></tr></tbody></table><table-wrap-foot><fn id="table6fn1"><p><sup>a</sup>It may have happened unintentionally (eg, by copying and pasting).</p></fn></table-wrap-foot></table-wrap></sec></sec><sec id="s3-3"><title>Perceived Benefits and Risks</title><sec id="s3-3-1"><title>Perceived Benefits of GenAI Use in Clinical Practice</title><p>Students rated seven potential benefits of GenAI in clinical practice on a 4-point Likert scale (n=388; <xref ref-type="fig" rid="figure1">Figure 1</xref>). Overall, perceived benefits were highest for facilitating the drafting of clinical documents (315/388, 81.2% agree or strongly agree) and improving information accessibility (266/388, 68.5%). Intermediate endorsement was observed for supporting personalized care plan creation, increasing care efficiency, and improving diagnostic accuracy (<xref ref-type="fig" rid="figure1">Figure 1</xref>). In contrast, views were more skeptical regarding patient-information collection and prognostic accuracy, for which most respondents disagreed (212/388, 54.6% and 234/388, 60.3%, respectively). Medians indicated overall agreement for 5 of 7 potential benefits, whereas patient information collection and prognostic accuracy had median disagreement. Full response distributions, item-level medians, and IQRs are reported in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p><p>Free-text proposals about potential clinical uses were segmented into 22 coded meaning units and were consistent with the quantitative patterns. The most frequent themes concerned reflective support and systematic checklists and administrative or documentation streamlining. The full thematic breakdown, including less frequent themes and illustrative quotations, is provided in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Perceived benefits of generative artificial intelligence for clinical practice during placements (n=388).</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="mededu_v12i1e85243_fig01.png"/></fig></sec><sec id="s3-3-2"><title>Perceived Risks of GenAI Use in Clinical Practice</title><p>Students rated seven potential risks of GenAI use in clinical practice on a 4-point Likert scale (n=388; <xref ref-type="fig" rid="figure2">Figure 2</xref>). Three concerns were strongly endorsed: creating dependency on GenAI (353/388, 90.9% agree or strongly agree), long-term loss of clinical skills (329/388, 84.8%), and breaching medical confidentiality (339/388, 87.4%). A more moderate majority agreed that GenAI could challenge professional competencies (228/388, 58.8%; <xref ref-type="fig" rid="figure2">Figure 2</xref>). In contrast, most respondents disagreed that GenAI would replace health care staff or specialists, increase training time for caregivers, or exacerbate inequalities in access to care (all &#x003E;60% disagree or strongly disagree; <xref ref-type="fig" rid="figure2">Figure 2</xref>). Medians showed agreement for 4 of 7 risks, while replacement of staff, increased training time, and inequalities had median disagreement. Full response distributions, item-level medians, and IQRs are reported in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p><p>Open-ended risk comments were segmented into 32 coded meaning units and were consistent with the hierarchy seen in the Likert ratings. The most frequent themes concerned risks of errors and potential patient-safety consequences, dependence or reduced reflective capacity, and relational impacts (eg, depersonalization or reduced quality of the clinician-patient relationship). The full thematic breakdown, including less frequent themes and illustrative quotations, is provided in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Perceived risks of clinical use of generative artificial intelligence during placements (n=388). AI: artificial intelligence.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="mededu_v12i1e85243_fig02.png"/></fig></sec></sec><sec id="s3-4"><title>Training and Support Needs for GenAI Use</title><p>Across 7 items assessing training and support needs, the median response was &#x201C;Agree&#x201D; for every statement (<xref ref-type="fig" rid="figure3">Figure 3</xref>). The strongest endorsement concerned a best-practice clinical guide (292/373, 78.3% agree or strongly agree) and ethics or regulatory awareness training (294/378, 77.7%). Important levels of agreement were also observed for profession-specific coaching and training in human-AI collaboration (<xref ref-type="fig" rid="figure3">Figure 3</xref>). Overall disagreement remained limited (strongly disagree &#x2264;11% across items). Full response distributions, item-level medians, and IQRs are reported in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p><p>Free-text comments on additional needs (24 meaning units) provided further context. A large share reported no additional needs or interest (8/24, 33%). Among expressed needs, the most frequent theme concerned practical training and ongoing support to integrate GenAI appropriately into clinical learning and practice (6/24, 25%). The full thematic breakdown, including less frequent themes and illustrative quotations, is provided in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Training and support needs for generative artificial intelligence (n=388). AI: artificial intelligence; MOOC: massive open online course.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="mededu_v12i1e85243_fig03.png"/></fig></sec><sec id="s3-5"><title>Governance and Organizational Readiness</title><p>Seven items assessed students&#x2019; perceptions of organizational readiness and governance for GenAI use during clinical placements (<xref ref-type="fig" rid="figure4">Figure 4</xref>). Overall, respondents perceived limited institutional communication and safeguards: only a small minority recalled supervisors providing information on legal responsibilities or local conditions of use (<xref ref-type="fig" rid="figure4">Figure 4</xref>). Governance was also perceived as insufficient, with 36 of 359 (10%) agreeing that GenAI use is sufficiently regulated. Operational readiness received the poorest ratings, as only 18 of 372 (4.8%) agreed that staff are trained for clinical use and 56 of 343 (16.3%) agreed that tool biases are accounted for. In contrast, transparency toward patients appeared as the single area of strong consensus: 248 of 331 (74.9%) agreed that patients should be informed when AI is used in their care (<xref ref-type="fig" rid="figure4">Figure 4</xref>). Full response distributions, item-level medians, and IQRs are reported in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p><fig position="float" id="figure4"><label>Figure 4.</label><caption><p>Governance and organizational readiness (n=388). AI: artificial intelligence.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="mededu_v12i1e85243_fig04.png"/></fig></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>In this cross-sectional survey of 388 French students in health professions, 52.6% (204/388) reported using GenAI during clinical placements. Adoption varied across disciplines, with lower uptake in midwifery, and increased markedly with higher self-perceived GenAI maturity. Reported uses primarily involved information retrieval and documentation- or writing-related tasks, while patient-facing activities were less frequently reported.</p><p>Notably, close to a quarter of GenAI users reported at least 1 instance of entering patient-identifying information, and close to half reported processing real medical content perceived as anonymized. Students perceived substantial benefits for documentation support but also expressed prominent concerns about dependency, erosion of clinical skills, and breaches of confidentiality, alongside strong demand for ethics or regulation training and best-practice guidance. Overall, these findings show that GenAI is already embedded in students&#x2019; placement practices, but with uneven adoption, limited institutional framing, and persistent confidentiality risks.</p></sec><sec id="s4-2"><title>Learning to Think With GenAI During Clinical Placements</title><p>In this survey, students appeared to mobilize GenAI during placements primarily as a cognitive and organizational aid (eg, information retrieval and documentation support) rather than as a tool embedded in direct patient-facing work. This &#x201C;low-stakes <italic>versus</italic> clinically situated&#x201D; pattern plausibly reflects implicit boundaries around what feels safe, acceptable, and professionally legitimate to delegate to a third-party model in real clinical environments and aligns with students&#x2019; expressed need for clear best-practice guidance.</p><p>Perceived readiness offers an additional lens to interpret why some students integrate GenAI into placement work while others do not. Our maturity index should be understood as a pragmatic stratification of how prepared students feel to evaluate and use GenAI critically in clinical learning contexts, rather than as an objective measure of competence. The strong adoption gradient across readiness levels is consistent with AI-readiness frameworks in health professions education [<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref25">25</xref>]. Emerging evidence also suggests that structured GenAI-supported simulations can improve targeted competencies in medical students [<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref32">32</xref>]. Educationally, this matters because uneven readiness may translate into uneven exposure to GenAI practices during placements, potentially reinforcing a hidden curriculum in which norms of safe and legitimate use are acquired informally and shape professional identity formation at a formative stage [<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref34">34</xref>].</p><p>This raises a broader tension between efficiency and reflection in workplace learning. While GenAI may support information management and autonomous learning [<xref ref-type="bibr" rid="ref35">35</xref>], medical documentation and reasoning processes are also critical spaces for cultivating clinical rigor, accountability, and reflective judgment. Without explicit pedagogical framing, early delegation of reasoning-related tasks may narrow opportunities for reflexivity and increase vulnerability to automation bias and overreliance. These mechanisms were previously described in clinical decision support contexts, where they can compromise patient safety [<xref ref-type="bibr" rid="ref36">36</xref>]. Conversely, when embedded in structured educational approaches (eg, simulation with feedback and validated rubrics), GenAI can be leveraged to enrich reflection and support deliberate practice, as illustrated by initiatives such as MedSimAI [<xref ref-type="bibr" rid="ref32">32</xref>]. Collectively, these findings support the need to move from informal, individual experimentation toward supervised and pedagogically grounded uses of GenAI in clinical learning environments [<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref36">36</xref>].</p></sec><sec id="s4-3"><title>Interpreting Nonuse, Perceived Benefits, and Perceived Risks</title><p>Students&#x2019; reasons for not using GenAI during placements suggested that nonadoption often reflected deliberate reservations rather than simple unfamiliarity. Reported concerns included limited perceived clinical relevance, fear of diminished reflexivity, confidentiality risks, and broader socio-technical critiques, such as environmental impact. This aligns with Tortella et al [<xref ref-type="bibr" rid="ref6">6</xref>], who observed that reluctance among physiotherapy students was largely driven by concerns about inaccuracy and error, and perceived limits for complex tasks, rather than a lack of awareness. From a teaching perspective, such reservations should be treated as a pedagogical resource: they create opportunities to make expectations explicit and to cultivate critical judgment, irrespective of whether students ultimately adopt GenAI tools.</p><p>Perceived benefits were large but not uniform. Students strongly endorsed GenAI for documentation relief and improved access to information, positioning it primarily as a cognitive and organizational facilitator. In contrast, endorsement was more cautious for higher-stakes functions such as diagnostic support, personalized planning, and efficiency gains, reflecting conditional trust rather than full reliance. This pattern is consistent with Janumpally et al [<xref ref-type="bibr" rid="ref37">37</xref>], who emphasized that GenAI may support learning and selected reasoning tasks but remains constrained by output reliability and cannot substitute for human judgment, particularly in domains directly implicating professional responsibility, such as patient data collection and prognostication.</p><p>Perceived risks were dominated by concerns about dependency, long-term erosion of clinical skills, and confidentiality breaches, alongside worries about bias, uncertain legal accountability, and a more subtle displacement of professional responsibility and human interaction. These concerns resonate with the broader literature on automation bias and overreliance in clinical decision support, including discussions of &#x201C;assistive AI&#x201D; as the lowest level of autonomy that may still erode vigilance if not framed and supervised [<xref ref-type="bibr" rid="ref36">36</xref>]. Importantly, students did not primarily express fears of being replaced. Rather, they emphasized risks of weakened judgment, diluted accountability, and reduced quality of the care relationship. These concerns suggest that GenAI integration in workplace learning should be structured, reflective, and explicitly governed rather than left to informal experimentation [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref37">37</xref>].</p></sec><sec id="s4-4"><title>Confidentiality and Data Governance</title><p>From a patient-safety and governance perspective, the disclosure of sensitive information calls for specific attention. In our sample, 23.5% (48/204) reported at least 1 instance of entering patient-identifying information, and 47.1% (96/204) reported processing real medical content perceived as anonymized. Even when students perceive data as anonymized, free-text prompts may contain contextual details that increase reidentification risk, and the use of nonapproved third-party tools may involve data processing outside institutional control (eg, storage and transfer conditions) [<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref39">39</xref>]. In European contexts, this directly intersects with General Data Protection Regulation obligations and institutional requirements for handling health data [<xref ref-type="bibr" rid="ref40">40</xref>].</p><p>These findings imply a need for explicit supervision and coordinated governance during placements. Placement supervisors, faculty, and host clinical sites (eg, hospital departments and private practices) should clearly define acceptable versus unacceptable prompting, provide concrete deidentification rules and examples, and ensure access to approved, compliant tools (or clear prohibitions where such tools are not available). Because clinical training occurs across multiple host sites, governance should be coordinated between the academic institution and placement providers so that expectations and safeguards are consistent across settings. At the institutional level, local charters and &#x201C;allowed tool&#x201D; policies aligned with data-protection governance should go with training, so that confidentiality norms are not left to informal experimentation. In France, this approach is consistent with recent institutional guidance: the French National Academy of Medicine discusses ethical issues and recommendations for the use of GenAI in health [<xref ref-type="bibr" rid="ref41">41</xref>], and the French National Authority for Health has published a practical guide to support appropriate and informed use of GenAI by health professionals [<xref ref-type="bibr" rid="ref42">42</xref>].</p></sec><sec id="s4-5"><title>From Technical Literacy to Patient-Centered GenAI Education</title><p>Students&#x2019; expectations for training extend beyond technical mastery toward the full triad of knowledge, skills, and professional attitudes: knowledge of ethical and regulatory frameworks; practical competencies developed through workshops and profession-specific coaching; and professional dispositions cultivated through mentorship, reflexivity, and attention to sustainability. This aligns with calls to design health AI curricula that move past technical literacy to incorporate critical thinking, ethics, and patient-centered judgment. Komasawa and Yokohira [<xref ref-type="bibr" rid="ref43">43</xref>] argue that the rise of GenAI requires rethinking health professionalism by combining technological fluency with enduring humanistic values such as empathy, integrity, and accountability.</p></sec><sec id="s4-6"><title>A Pragmatic Pathway From Informal Adoption to Responsible Integration</title><p>Our findings point to an institutional readiness gap during placements: limited shared benchmarks, uneven supervisory guidance, and insufficient attention to bias, regulation, and data governance. They also support a stepwise pathway to move from informal, student-driven use toward safe and educationally meaningful integration across disciplines. First, a transversal common core should address ethics, regulation, professional standards, and model limitations or uncertainty, alongside concrete rules for health data handling in GenAI contexts (what must never be entered, how to deidentify, and which tools are approved). Second, competency development requires practice beyond theory: simulation- or case-based activities with GenAI, explicit reflection prompts, and structured feedback can help students learn when GenAI supports reasoning versus when it undermines it (eg, automation bias and overreliance) and can contribute to professional identity formation. Third, training for both academic staff and host-site supervisors is essential so that placement supervisors can set shared expectations, mentor reflective use, and intervene when unsafe practices occur. Fourth, institutional governance should provide clear local policies (eg, charters, approved tool lists, patient transparency where relevant, and escalation procedures for data incidents), ensuring alignment between pedagogy and professional standards. This enabling framework does not mandate adoption; it operationalizes conditions under which GenAI use remains compatible with reflexivity, professional accountability, and patient-centered care, and can help transform spontaneous use into integrated, supervised competencies in clinical learning settings, as illustrated by structured simulation initiatives such as MedSimAI [<xref ref-type="bibr" rid="ref32">32</xref>].</p></sec><sec id="s4-7"><title>Limitations</title><p>This study has several limitations. First, it was conducted within a single French university (UGA), which limits international generalizability. The survey was embedded in a national initiative funded by the French National Research Agency to support the rollout of digital health and AI in health education across French university training programs. While this may strengthen the transferability of educational implications, it does not eliminate the single-site constraint. Notably, the main trends observed here are consistent with the international survey by Busch et al [<xref ref-type="bibr" rid="ref44">44</xref>] (4596 students from 192 faculties across 48 countries), which similarly reported widespread student adoption of generative tools alongside a global deficit in formal AI education. Second, GenAI use and data-disclosure behaviors were measured through self-report and may be influenced by recall and social desirability biases. Although responses were collected anonymously and without incentives, self-reporting remains susceptible to these biases. Nevertheless, documenting students&#x2019; lived practices and perceptions was a core aim, particularly in contexts where formal pedagogical frameworks are still emerging; as such, perceptions may be viewed as outcomes in their own right, consistent with Busch et al [<xref ref-type="bibr" rid="ref44">44</xref>]. Third, we did not collect data from clinical supervisors or educational leaders and therefore cannot characterize institutional policies or supervisory practices with certainty. This likely reflects a transitional situation in which both learners and supervisors are adapting to rapidly evolving GenAI tools and local guidance, as suggested by recent work by Blanco et al [<xref ref-type="bibr" rid="ref45">45</xref>] and McCoy et al [<xref ref-type="bibr" rid="ref46">46</xref>]. Fourth, the cross-sectional design provides a snapshot at a specific time point; given the rapid evolution of GenAI tools and institutional policies, the stability of these findings over time cannot be assumed. Fifth, interprofessional heterogeneity is an additional limitation: some health professions were underrepresented, limiting robust profession-specific comparisons. This reflects our primary aim of mapping placement-related practices rather than comparing disciplines; nonetheless, profession-specific differences in AI literacy and attitudes have been reported elsewhere and warrant dedicated analyses [<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref48">48</xref>]. Finally, our GenAI maturity score should be considered an exploratory composite index of self-perceived readiness. Although internal consistency was good (Cronbach <italic>&#x03B1;</italic>=0.869), we did not undertake a full psychometric validation; further work should confirm and refine this construct.</p></sec><sec id="s4-8"><title>Perspectives</title><p>Future research should move beyond single institutions to include multicenter and international comparisons, enabling the identification of contextual variations and common challenges. Longitudinal designs will also be needed to capture how perceptions, competencies, and professional identity evolve as GenAI becomes more embedded in health professions education. Finally, triangulating student perspectives with those of supervisors, educators, and institutional leaders will be crucial to developing evidence-based strategies that responsibly integrate GenAI into curricula while respecting profession-specific needs.</p></sec><sec id="s4-9"><title>Conclusion</title><p>In conclusion, GenAI is already being used by French students in health professions during clinical placements, primarily as a cognitive and organizational aid. At the same time, students report concerns about dependence, skill erosion, and confidentiality, and express a strong demand for structured guidance. These findings support the need for explicit, ethically grounded, and supervised frameworks that align curricula, clinical supervision, and host-site governance so that GenAI use fosters reflexivity, autonomy, and professional accountability rather than unsupervised experimentation. More broadly, this echoes developments already observed across higher education and health care governance, including emerging institutional policies [<xref ref-type="bibr" rid="ref13">13</xref>], the timely need for clear ethical and regulatory frameworks [<xref ref-type="bibr" rid="ref49">49</xref>], and methodological approaches for developing evidence-based interdisciplinary guidelines [<xref ref-type="bibr" rid="ref50">50</xref>].</p></sec></sec></body><back><ack><p>The authors gratefully acknowledge the support of the deans of the faculties of medicine and pharmacy at Universit&#x00E9; Grenoble Alpes, whose approval enabled the dissemination of the survey invitation to students. Their assistance in facilitating access to medical and allied health students was essential to the successful recruitment process. During the preparation of this manuscript, the authors used ChatGPT (GPT-5) for code optimization in data analysis and for language editing. The authors reviewed and edited all outputs and take full responsibility for the content, data interpretations, and conclusions of this publication.</p></ack><notes><sec><title>Funding</title><p>The study was supported by the French National Research Agency under the France 2030 program (23-CMAS-0035.) and the Multidisciplinary Institute in Artificial intelligence Cluster (23-IACL-0006). This work also forms part of a broader translational and interdisciplinary Sant&#x00E9; Num&#x00E9;rique Grenoble Intelligence Artificielle research program.</p></sec><sec><title>Data Availability</title><p>The data supporting the findings of this study are not publicly available due to ethical and privacy restrictions. Anonymized datasets can be made available from the corresponding author, SK, upon reasonable request and subject to approval by the institutional ethics committee.</p></sec></notes><fn-group><fn fn-type="con"><p>SK conceived and coordinated the study and drafted the first version of the manuscript. CMT and KG contributed to methodological development. Analyses were performed by SK, CMT, and MM. Data collection was conducted by SK, KG, and MM. Interpretation of the results was conducted by SK, CMT, KG, and MM. NV provided overall supervision and critical guidance throughout the project. All authors critically reviewed the manuscript and approved the definitive version.</p></fn><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">CHERRIES</term><def><p>Checklist for Reporting Results of Internet E-Surveys</p></def></def-item><def-item><term id="abb3">GenAI</term><def><p>generative artificial intelligence</p></def></def-item><def-item><term id="abb4">UGA</term><def><p>Universit&#x00E9; Grenoble Alpes</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ganjavi</surname><given-names>C</given-names> </name><name name-style="western"><surname>Eppler</surname><given-names>M</given-names> </name><name name-style="western"><surname>O&#x2019;Brien</surname><given-names>D</given-names> </name><etal/></person-group><article-title>ChatGPT and large language models (LLMs) awareness and use. a prospective cross-sectional survey of U.S. medical students</article-title><source>PLOS Digit Health</source><year>2024</year><month>09</month><volume>3</volume><issue>9</issue><fpage>e0000596</fpage><pub-id pub-id-type="doi">10.1371/journal.pdig.0000596</pub-id><pub-id pub-id-type="medline">39236008</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Moskovich</surname><given-names>L</given-names> </name><name name-style="western"><surname>Rozani</surname><given-names>V</given-names> </name></person-group><article-title>Health profession students&#x2019; perceptions of ChatGPT in healthcare and education: insights from a mixed-methods study</article-title><source>BMC Med Educ</source><year>2025</year><month>01</month><day>21</day><volume>25</volume><issue>1</issue><fpage>98</fpage><pub-id pub-id-type="doi">10.1186/s12909-025-06702-0</pub-id><pub-id pub-id-type="medline">39833868</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Weidener</surname><given-names>L</given-names> </name><name name-style="western"><surname>Fischer</surname><given-names>M</given-names> </name></person-group><article-title>Artificial intelligence in medicine: cross-sectional study among medical students on application, education, and ethical aspects</article-title><source>JMIR Med Educ</source><year>2024</year><month>01</month><day>5</day><volume>10</volume><fpage>e51247</fpage><pub-id pub-id-type="doi">10.2196/51247</pub-id><pub-id pub-id-type="medline">38180787</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>El Arab</surname><given-names>RA</given-names> </name><name name-style="western"><surname>Al Moosa</surname><given-names>OA</given-names> </name><name name-style="western"><surname>Abuadas</surname><given-names>FH</given-names> </name><name name-style="western"><surname>Somerville</surname><given-names>J</given-names> </name></person-group><article-title>The role of AI in nursing education and practice: umbrella review</article-title><source>J Med Internet Res</source><year>2025</year><month>04</month><day>4</day><volume>27</volume><fpage>e69881</fpage><pub-id pub-id-type="doi">10.2196/69881</pub-id><pub-id pub-id-type="medline">40072926</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yuan</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Fu</surname><given-names>J</given-names> </name><name name-style="western"><surname>Leng</surname><given-names>L</given-names> </name><etal/></person-group><article-title>The strengths, weaknesses, opportunities, and threats of generative artificial intelligence: a qualitative study of undergraduate nursing students</article-title><source>Front Public Health</source><year>2025</year><volume>13</volume><fpage>1672140</fpage><pub-id pub-id-type="doi">10.3389/fpubh.2025.1672140</pub-id><pub-id pub-id-type="medline">40977793</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tortella</surname><given-names>F</given-names> </name><name name-style="western"><surname>Palese</surname><given-names>A</given-names> </name><name name-style="western"><surname>Turolla</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Knowledge and use, perceptions of benefits and limitations of artificial intelligence chatbots among Italian physiotherapy students: a cross-sectional national study</article-title><source>BMC Med Educ</source><year>2025</year><month>04</month><day>18</day><volume>25</volume><issue>1</issue><fpage>572</fpage><pub-id pub-id-type="doi">10.1186/s12909-025-07176-w</pub-id><pub-id pub-id-type="medline">40251635</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Abd-Alrazaq</surname><given-names>A</given-names> </name><name name-style="western"><surname>AlSaad</surname><given-names>R</given-names> </name><name name-style="western"><surname>Alhuwail</surname><given-names>D</given-names> </name><etal/></person-group><article-title>Large language models in medical education: opportunities, challenges, and future directions</article-title><source>JMIR Med Educ</source><year>2023</year><month>06</month><day>1</day><volume>9</volume><fpage>e48291</fpage><pub-id pub-id-type="doi">10.2196/48291</pub-id><pub-id pub-id-type="medline">37261894</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Boscardin</surname><given-names>CK</given-names> </name><name name-style="western"><surname>Gin</surname><given-names>B</given-names> </name><name name-style="western"><surname>Golde</surname><given-names>PB</given-names> </name><name name-style="western"><surname>Hauer</surname><given-names>KE</given-names> </name></person-group><article-title>ChatGPT and generative artificial intelligence for medical education: potential impact and opportunity</article-title><source>Acad Med</source><year>2024</year><month>01</month><day>1</day><volume>99</volume><issue>1</issue><fpage>22</fpage><lpage>27</lpage><pub-id pub-id-type="doi">10.1097/ACM.0000000000005439</pub-id><pub-id pub-id-type="medline">37651677</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Divito</surname><given-names>CB</given-names> </name><name name-style="western"><surname>Katchikian</surname><given-names>BM</given-names> </name><name name-style="western"><surname>Gruenwald</surname><given-names>JE</given-names> </name><name name-style="western"><surname>Burgoon</surname><given-names>JM</given-names> </name></person-group><article-title>The tools of the future are the challenges of today: the use of ChatGPT in problem-based learning medical education</article-title><source>Med Teach</source><year>2024</year><month>03</month><volume>46</volume><issue>3</issue><fpage>320</fpage><lpage>322</lpage><pub-id pub-id-type="doi">10.1080/0142159X.2023.2290997</pub-id><pub-id pub-id-type="medline">38149617</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pham</surname><given-names>TD</given-names> </name><name name-style="western"><surname>Karunaratne</surname><given-names>N</given-names> </name><name name-style="western"><surname>Exintaris</surname><given-names>B</given-names> </name><etal/></person-group><article-title>The impact of generative AI on health professional education: a systematic review in the context of student learning</article-title><source>Med Educ</source><year>2025</year><month>12</month><volume>59</volume><issue>12</issue><fpage>1280</fpage><lpage>1289</lpage><pub-id pub-id-type="doi">10.1111/medu.15746</pub-id><pub-id pub-id-type="medline">40533396</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Svendsen</surname><given-names>K</given-names> </name><name name-style="western"><surname>Askar</surname><given-names>M</given-names> </name><name name-style="western"><surname>Umer</surname><given-names>D</given-names> </name><name name-style="western"><surname>Halvorsen</surname><given-names>KH</given-names> </name></person-group><article-title>Short-term learning effect of ChatGPT on pharmacy students&#x2019; learning</article-title><source>Explor Res Clin Soc Pharm</source><year>2024</year><month>09</month><volume>15</volume><fpage>100478</fpage><pub-id pub-id-type="doi">10.1016/j.rcsop.2024.100478</pub-id><pub-id pub-id-type="medline">39139501</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Elnaem</surname><given-names>MH</given-names> </name><name name-style="western"><surname>Okuyan</surname><given-names>B</given-names> </name><name name-style="western"><surname>Mubarak</surname><given-names>N</given-names> </name><etal/></person-group><article-title>Students&#x2019; acceptance and use of generative AI in pharmacy education: international cross-sectional survey based on the extended unified theory of acceptance and use of technology</article-title><source>Int J Clin Pharm</source><year>2025</year><month>08</month><volume>47</volume><issue>4</issue><fpage>1097</fpage><lpage>1108</lpage><pub-id pub-id-type="doi">10.1007/s11096-025-01936-w</pub-id><pub-id pub-id-type="medline">40465181</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>McDonald</surname><given-names>N</given-names> </name><name name-style="western"><surname>Johri</surname><given-names>A</given-names> </name><name name-style="western"><surname>Ali</surname><given-names>A</given-names> </name><name name-style="western"><surname>Collier</surname><given-names>AH</given-names> </name></person-group><article-title>Generative artificial intelligence in higher education: evidence from an analysis of institutional policies and guidelines</article-title><source>Comput Hum Behav Artif Hum</source><year>2025</year><month>03</month><volume>3</volume><fpage>100121</fpage><pub-id pub-id-type="doi">10.1016/j.chbah.2025.100121</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sharifi Kelarijani</surname><given-names>A</given-names> </name><name name-style="western"><surname>Safdari</surname><given-names>A</given-names> </name><name name-style="western"><surname>Golitaleb</surname><given-names>M</given-names> </name></person-group><article-title>Every coin has two sides: ChatGPT poses a potential threat to nursing students&#x2019; education</article-title><source>Front Med (Lausanne)</source><year>2024</year><volume>11</volume><fpage>1415067</fpage><pub-id pub-id-type="doi">10.3389/fmed.2024.1415067</pub-id><pub-id pub-id-type="medline">39114822</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Galletta</surname><given-names>M</given-names> </name><name name-style="western"><surname>Portoghese</surname><given-names>I</given-names> </name><name name-style="western"><surname>Aviles Gonzales</surname><given-names>CI</given-names> </name><etal/></person-group><article-title>Lack of respect, role uncertainty and satisfaction with clinical practice among nursing students: the moderating role of supportive staff</article-title><source>Acta Biomed</source><year>2017</year><month>07</month><day>18</day><volume>88</volume><issue>3S</issue><fpage>43</fpage><lpage>50</lpage><pub-id pub-id-type="doi">10.23750/abm.v88i3-S.6613</pub-id><pub-id pub-id-type="medline">28752832</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mazalov&#x00E1;</surname><given-names>L</given-names> </name><name name-style="western"><surname>Gurkov&#x00E1;</surname><given-names>E</given-names> </name><name name-style="western"><surname>&#x0160;turekov&#x00E1;</surname><given-names>L</given-names> </name></person-group><article-title>Nursing students&#x2019; perceived stress and clinical learning experience</article-title><source>Nurse Educ Pract</source><year>2022</year><month>10</month><volume>64</volume><fpage>103457</fpage><pub-id pub-id-type="doi">10.1016/j.nepr.2022.103457</pub-id><pub-id pub-id-type="medline">36182730</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lin</surname><given-names>YK</given-names> </name><name name-style="western"><surname>Lin</surname><given-names>CD</given-names> </name><name name-style="western"><surname>Lin</surname><given-names>BYJ</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>DY</given-names> </name></person-group><article-title>Medical students&#x2019; resilience: a protective role on stress and quality of life in clerkship</article-title><source>BMC Med Educ</source><year>2019</year><month>12</month><day>27</day><volume>19</volume><issue>1</issue><fpage>473</fpage><pub-id pub-id-type="doi">10.1186/s12909-019-1912-4</pub-id><pub-id pub-id-type="medline">31881997</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ali</surname><given-names>SR</given-names> </name><name name-style="western"><surname>Dobbs</surname><given-names>TD</given-names> </name><name name-style="western"><surname>Hutchings</surname><given-names>HA</given-names> </name><name name-style="western"><surname>Whitaker</surname><given-names>IS</given-names> </name></person-group><article-title>Using ChatGPT to write patient clinic letters</article-title><source>Lancet Digit Health</source><year>2023</year><month>04</month><volume>5</volume><issue>4</issue><fpage>e179</fpage><lpage>e181</lpage><pub-id pub-id-type="doi">10.1016/S2589-7500(23)00048-1</pub-id><pub-id pub-id-type="medline">36894409</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jeblick</surname><given-names>K</given-names> </name><name name-style="western"><surname>Schachtner</surname><given-names>B</given-names> </name><name name-style="western"><surname>Dexl</surname><given-names>J</given-names> </name><etal/></person-group><article-title>ChatGPT makes medicine easy to swallow: an exploratory case study on simplified radiology reports</article-title><source>Eur Radiol</source><year>2024</year><month>05</month><volume>34</volume><issue>5</issue><fpage>2817</fpage><lpage>2825</lpage><pub-id pub-id-type="doi">10.1007/s00330-023-10213-1</pub-id><pub-id pub-id-type="medline">37794249</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>El Arab</surname><given-names>RA</given-names> </name><name name-style="western"><surname>Al Moosa</surname><given-names>OA</given-names> </name><name name-style="western"><surname>Sagbakken</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Integrative review of artificial intelligence applications in nursing: education, clinical practice, workload management, and professional perceptions</article-title><source>Front Public Health</source><year>2025</year><volume>13</volume><fpage>1619378</fpage><pub-id pub-id-type="doi">10.3389/fpubh.2025.1619378</pub-id><pub-id pub-id-type="medline">40823249</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Blease</surname><given-names>CR</given-names> </name><name name-style="western"><surname>Locher</surname><given-names>C</given-names> </name><name name-style="western"><surname>Gaab</surname><given-names>J</given-names> </name><name name-style="western"><surname>H&#x00E4;gglund</surname><given-names>M</given-names> </name><name name-style="western"><surname>Mandl</surname><given-names>KD</given-names> </name></person-group><article-title>Generative artificial intelligence in primary care: an online survey of UK general practitioners</article-title><source>BMJ Health Care Inform</source><year>2024</year><month>09</month><day>17</day><volume>31</volume><issue>1</issue><fpage>e101102</fpage><pub-id pub-id-type="doi">10.1136/bmjhci-2024-101102</pub-id><pub-id pub-id-type="medline">39288998</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rahmanti</surname><given-names>AR</given-names> </name><name name-style="western"><surname>Iqbal</surname><given-names>U</given-names> </name><name name-style="western"><surname>Reddy</surname><given-names>S</given-names> </name><name name-style="western"><surname>Gao</surname><given-names>XW</given-names> </name><name name-style="western"><surname>Nguyen</surname><given-names>HX</given-names> </name><name name-style="western"><surname>Li</surname><given-names>YCJ</given-names> </name></person-group><article-title>Generative artificial intelligence (AI): a key innovation or just hype in primary care settings?</article-title><source>BMJ Health Care Inform</source><year>2024</year><month>12</month><day>31</day><volume>31</volume><issue>1</issue><fpage>e101367</fpage><pub-id pub-id-type="doi">10.1136/bmjhci-2024-101367</pub-id><pub-id pub-id-type="medline">39740857</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lobet</surname><given-names>M</given-names> </name><name name-style="western"><surname>Honet</surname><given-names>A</given-names> </name><name name-style="western"><surname>Romainville</surname><given-names>M</given-names> </name><name name-style="western"><surname>Wathelet</surname><given-names>V</given-names> </name></person-group><article-title>ChatGPT: quel en a &#x00E9;t&#x00E9; l&#x2019;usage spontan&#x00E9; d&#x2019;&#x00E9;tudiants de premi&#x00E8;re ann&#x00E9;e universitaire &#x00E0; son arriv&#x00E9;e? [Article in French]</article-title><source>Med Med</source><year>2024</year><month>10</month><day>30</day><issue>18</issue><fpage>67</fpage><lpage>90</lpage><pub-id pub-id-type="doi">10.52358/mm.vi18.379</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Karaca</surname><given-names>O</given-names> </name><name name-style="western"><surname>&#x00C7;al&#x0131;&#x015F;kan</surname><given-names>SA</given-names> </name><name name-style="western"><surname>Demir</surname><given-names>K</given-names> </name></person-group><article-title>Medical Artificial Intelligence Readiness Scale for Medical Students (MAIRS-MS) - development, validity and reliability study</article-title><source>BMC Med Educ</source><year>2021</year><month>02</month><day>18</day><volume>21</volume><issue>1</issue><fpage>112</fpage><pub-id pub-id-type="doi">10.1186/s12909-021-02546-6</pub-id><pub-id pub-id-type="medline">33602196</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Boillat</surname><given-names>T</given-names> </name><name name-style="western"><surname>Nawaz</surname><given-names>FA</given-names> </name><name name-style="western"><surname>Rivas</surname><given-names>H</given-names> </name></person-group><article-title>Readiness to embrace artificial intelligence among medical doctors and students: questionnaire-based study</article-title><source>JMIR Med Educ</source><year>2022</year><month>04</month><day>12</day><volume>8</volume><issue>2</issue><fpage>e34973</fpage><pub-id pub-id-type="doi">10.2196/34973</pub-id><pub-id pub-id-type="medline">35412463</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Eysenbach</surname><given-names>G</given-names> </name></person-group><article-title>Improving the quality of web surveys: the Checklist for Reporting Results of Internet E-Surveys (CHERRIES)</article-title><source>J Med Internet Res</source><year>2004</year><month>09</month><day>29</day><volume>6</volume><issue>3</issue><fpage>e34</fpage><pub-id pub-id-type="doi">10.2196/jmir.6.3.e34</pub-id><pub-id pub-id-type="medline">15471760</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rezvan</surname><given-names>PH</given-names> </name><name name-style="western"><surname>Comulada</surname><given-names>WS</given-names> </name><name name-style="western"><surname>Fern&#x00E1;ndez</surname><given-names>MI</given-names> </name><name name-style="western"><surname>Belin</surname><given-names>TR</given-names> </name></person-group><article-title>Assessing alternative imputation strategies for infrequently missing items on multi-item scales</article-title><source>Commun Stat Case Stud Data Anal Appl</source><year>2022</year><volume>8</volume><issue>4</issue><fpage>682</fpage><lpage>713</lpage><pub-id pub-id-type="doi">10.1080/23737484.2022.2115430</pub-id><pub-id pub-id-type="medline">36467970</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Agresti</surname><given-names>A</given-names> </name></person-group><source>An Introduction to Categorical Data Analysis</source><year>2007</year><access-date>2026-03-31</access-date><edition>2</edition><publisher-name>John Wiley &#x0026; Sons</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://onlinelibrary.wiley.com/doi/book/10.1002/0470114754">https://onlinelibrary.wiley.com/doi/book/10.1002/0470114754</ext-link></comment><pub-id pub-id-type="other">9780471226185</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sullivan</surname><given-names>GM</given-names> </name><name name-style="western"><surname>Artino</surname><given-names>AR</given-names> </name></person-group><article-title>Analyzing and interpreting data from Likert-type scales</article-title><source>J Grad Med Educ</source><year>2013</year><month>12</month><volume>5</volume><issue>4</issue><fpage>541</fpage><lpage>542</lpage><pub-id pub-id-type="doi">10.4300/JGME-5-4-18</pub-id><pub-id pub-id-type="medline">24454995</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Altman</surname><given-names>DG</given-names> </name><name name-style="western"><surname>Royston</surname><given-names>P</given-names> </name></person-group><article-title>The cost of dichotomising continuous variables</article-title><source>BMJ</source><year>2006</year><month>05</month><day>6</day><volume>332</volume><issue>7549</issue><fpage>1080</fpage><pub-id pub-id-type="doi">10.1136/bmj.332.7549.1080</pub-id><pub-id pub-id-type="medline">16675816</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Su&#x00E1;rez-Garc&#x00ED;a</surname><given-names>RX</given-names> </name><name name-style="western"><surname>Chavez-Casta&#x00F1;eda</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Orrico-P&#x00E9;rez</surname><given-names>R</given-names> </name><etal/></person-group><article-title>DIALOGUE: a generative AI-based pre-post simulation study to enhance diagnostic communication in medical students through virtual type 2 diabetes scenarios</article-title><source>Eur J Investig Health Psychol Educ</source><year>2025</year><month>08</month><day>7</day><volume>15</volume><issue>8</issue><fpage>152</fpage><pub-id pub-id-type="doi">10.3390/ejihpe15080152</pub-id><pub-id pub-id-type="medline">40863274</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Hicke</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Geathers</surname><given-names>J</given-names> </name><name name-style="western"><surname>Rajashekar</surname><given-names>N</given-names> </name><etal/></person-group><article-title>MedSimAI: simulation and formative feedback generation to enhance deliberate practice in medical education</article-title><source>arXiv</source><comment>Preprint posted online on  Mar 1, 2025</comment><pub-id pub-id-type="doi">10.48550/arXiv.2503.05793</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wald</surname><given-names>HS</given-names> </name></person-group><article-title>Professional identity (trans)formation in medical education: reflection, relationship, resilience</article-title><source>Acad Med</source><year>2015</year><month>06</month><volume>90</volume><issue>6</issue><fpage>701</fpage><lpage>706</lpage><pub-id pub-id-type="doi">10.1097/ACM.0000000000000731</pub-id><pub-id pub-id-type="medline">25881651</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lawrence</surname><given-names>C</given-names> </name><name name-style="western"><surname>Mhlaba</surname><given-names>T</given-names> </name><name name-style="western"><surname>Stewart</surname><given-names>KA</given-names> </name><name name-style="western"><surname>Moletsane</surname><given-names>R</given-names> </name><name name-style="western"><surname>Gaede</surname><given-names>B</given-names> </name><name name-style="western"><surname>Moshabela</surname><given-names>M</given-names> </name></person-group><article-title>The hidden curricula of medical education: a scoping review</article-title><source>Acad Med</source><year>2018</year><month>04</month><volume>93</volume><issue>4</issue><fpage>648</fpage><lpage>656</lpage><pub-id pub-id-type="doi">10.1097/ACM.0000000000002004</pub-id><pub-id pub-id-type="medline">29116981</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Monzon</surname><given-names>N</given-names> </name><name name-style="western"><surname>Hays</surname><given-names>FA</given-names> </name></person-group><article-title>Leveraging generative artificial intelligence to improve motivation and retrieval in higher education learners</article-title><source>JMIR Med Educ</source><year>2025</year><month>03</month><day>11</day><volume>11</volume><fpage>e59210</fpage><pub-id pub-id-type="doi">10.2196/59210</pub-id><pub-id pub-id-type="medline">40068170</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Abdelwanis</surname><given-names>M</given-names> </name><name name-style="western"><surname>Alarafati</surname><given-names>HK</given-names> </name><name name-style="western"><surname>Tammam</surname><given-names>MMS</given-names> </name><name name-style="western"><surname>Simsekler</surname><given-names>MCE</given-names> </name></person-group><article-title>Exploring the risks of automation bias in healthcare artificial intelligence applications: a Bowtie analysis</article-title><source>J Saf Sci Resil</source><year>2024</year><month>12</month><volume>5</volume><issue>4</issue><fpage>460</fpage><lpage>469</lpage><pub-id pub-id-type="doi">10.1016/j.jnlssr.2024.06.001</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Janumpally</surname><given-names>R</given-names> </name><name name-style="western"><surname>Nanua</surname><given-names>S</given-names> </name><name name-style="western"><surname>Ngo</surname><given-names>A</given-names> </name><name name-style="western"><surname>Youens</surname><given-names>K</given-names> </name></person-group><article-title>Generative artificial intelligence in graduate medical education</article-title><source>Front Med (Lausanne)</source><year>2024</year><volume>11</volume><fpage>1525604</fpage><pub-id pub-id-type="doi">10.3389/fmed.2024.1525604</pub-id><pub-id pub-id-type="medline">39867924</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ford</surname><given-names>E</given-names> </name><name name-style="western"><surname>Pillinger</surname><given-names>S</given-names> </name><name name-style="western"><surname>Stewart</surname><given-names>R</given-names> </name><etal/></person-group><article-title>What is the patient re-identification risk from using de-identified clinical free text data for health research?</article-title><source>AI Ethics</source><year>2025</year><volume>5</volume><issue>5</issue><fpage>4441</fpage><lpage>4454</lpage><pub-id pub-id-type="doi">10.1007/s43681-025-00681-0</pub-id><pub-id pub-id-type="medline">40978336</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="report"><article-title>Regulation (EU) 2018/1725 of the European Parliament and of the Council of 23 October 2018 on the protection of natural persons with regard to the processing of personal data by the Union institutions, bodies, offices and agencies and on the free movement of such data, and repealing Regulation (EC) No 45/2001 and Decision No 1247/2002/EC</article-title><year>2018</year><access-date>2026-01-09</access-date><publisher-name>European Union</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://www.edps.europa.eu/data-protection/our-work/publications/legislation/regulation-eu-20181725_en">https://www.edps.europa.eu/data-protection/our-work/publications/legislation/regulation-eu-20181725_en</ext-link></comment></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="report"><article-title>Regulation (EU) 2016/679 of the European Parliament and of the Council of 27 April 2016 on the protection of natural persons with regard to the processing of personal data and on the free movement of such data, and repealing Directive 95/46/EC (General Data Protection Regulation)</article-title><year>2016</year><access-date>2026-01-09</access-date><publisher-name>European Union</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://eur-lex.europa.eu/eli/reg/2016/679/oj">https://eur-lex.europa.eu/eli/reg/2016/679/oj</ext-link></comment></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nordlinger</surname><given-names>B</given-names> </name><name name-style="western"><surname>Kirchner</surname><given-names>C</given-names> </name><name name-style="western"><surname>de Fresnoye</surname><given-names>O</given-names> </name></person-group><article-title>Rapport 24-03. Syst&#x00E8;mes d&#x2019;IA g&#x00E9;n&#x00E9;rative en sant&#x00E9;: enjeux et perspectives [Article in French]</article-title><source>Bulletin l&#x2019;Acad&#x00E9;mie Nationale M&#x00E9;decine</source><year>2024</year><month>05</month><volume>208</volume><issue>5</issue><fpage>536</fpage><lpage>547</lpage><pub-id pub-id-type="doi">10.1016/j.banm.2024.03.005</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="report"><person-group person-group-type="author"><name name-style="western"><surname>Valois</surname><given-names>P</given-names> </name><name name-style="western"><surname>Marc</surname><given-names>J</given-names> </name><name name-style="western"><surname>Collignon</surname><given-names>C</given-names> </name></person-group><article-title>Premi&#x00E8;res clefs d&#x2019;usage de l&#x2019;IA g&#x00E9;n&#x00E9;rative en sant&#x00E9; [Report in French]</article-title><year>2025</year><access-date>2026-01-09</access-date><publisher-name>Haute Autorit&#x00E9; de Sant&#x00E9;</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://www.has-sante.fr/jcms/p_3703115/fr/premieres-clefs-d-usage-de-l-ia-generative-en-sante">https://www.has-sante.fr/jcms/p_3703115/fr/premieres-clefs-d-usage-de-l-ia-generative-en-sante</ext-link></comment></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Komasawa</surname><given-names>N</given-names> </name><name name-style="western"><surname>Yokohira</surname><given-names>M</given-names> </name></person-group><article-title>Generative artificial intelligence (AI) in medical education: a narrative review of the challenges and possibilities for future professionalism</article-title><source>Cureus</source><year>2025</year><month>06</month><volume>17</volume><issue>6</issue><fpage>e86316</fpage><pub-id pub-id-type="doi">10.7759/cureus.86316</pub-id><pub-id pub-id-type="medline">40688936</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Busch</surname><given-names>F</given-names> </name><name name-style="western"><surname>Hoffmann</surname><given-names>L</given-names> </name><name name-style="western"><surname>Truhn</surname><given-names>D</given-names> </name><etal/></person-group><article-title>Global cross-sectional student survey on AI in medical, dental, and veterinary education and practice at 192 faculties</article-title><source>BMC Med Educ</source><year>2024</year><month>09</month><day>28</day><volume>24</volume><issue>1</issue><fpage>1066</fpage><pub-id pub-id-type="doi">10.1186/s12909-024-06035-4</pub-id><pub-id pub-id-type="medline">39342231</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Blanco</surname><given-names>MA</given-names> </name><name name-style="western"><surname>Nelson</surname><given-names>SW</given-names> </name><name name-style="western"><surname>Ramesh</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Integrating artificial intelligence into medical education: a roadmap informed by a survey of faculty and students</article-title><source>Med Educ Online</source><year>2025</year><month>12</month><volume>30</volume><issue>1</issue><fpage>2531177</fpage><pub-id pub-id-type="doi">10.1080/10872981.2025.2531177</pub-id><pub-id pub-id-type="medline">40660466</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>McCoy</surname><given-names>L</given-names> </name><name name-style="western"><surname>Ganesan</surname><given-names>N</given-names> </name><name name-style="western"><surname>Rajagopalan</surname><given-names>V</given-names> </name><name name-style="western"><surname>McKell</surname><given-names>D</given-names> </name><name name-style="western"><surname>Ni&#x00F1;o</surname><given-names>DF</given-names> </name><name name-style="western"><surname>Swaim</surname><given-names>MC</given-names> </name></person-group><article-title>A training needs analysis for AI and generative AI in medical education: perspectives of faculty and students</article-title><source>J Med Educ Curric Dev</source><year>2025</year><volume>12</volume><fpage>23821205251339226</fpage><pub-id pub-id-type="doi">10.1177/23821205251339226</pub-id><pub-id pub-id-type="medline">40376309</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rinc&#x00F3;n</surname><given-names>EHH</given-names> </name><name name-style="western"><surname>Jimenez</surname><given-names>D</given-names> </name><name name-style="western"><surname>Aguilar</surname><given-names>LAC</given-names> </name><name name-style="western"><surname>Fl&#x00F3;rez</surname><given-names>JMP</given-names> </name><name name-style="western"><surname>Tapia</surname><given-names>&#x00C1;ER</given-names> </name><name name-style="western"><surname>Pe&#x00F1;uela</surname><given-names>CLJ</given-names> </name></person-group><article-title>Mapping the use of artificial intelligence in medical education: a scoping review</article-title><source>BMC Med Educ</source><year>2025</year><month>04</month><day>12</day><volume>25</volume><issue>1</issue><fpage>526</fpage><pub-id pub-id-type="doi">10.1186/s12909-025-07089-8</pub-id><pub-id pub-id-type="medline">40221725</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shishehgar</surname><given-names>S</given-names> </name><name name-style="western"><surname>Murray-Parahi</surname><given-names>P</given-names> </name><name name-style="western"><surname>Alsharaydeh</surname><given-names>E</given-names> </name><name name-style="western"><surname>Mills</surname><given-names>S</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>X</given-names> </name></person-group><article-title>Artificial intelligence in health education and practice: a systematic review of health students&#x2019; and academics&#x2019; knowledge, perceptions and experiences</article-title><source>Int Nurs Rev</source><year>2025</year><month>06</month><volume>72</volume><issue>2</issue><fpage>e70045</fpage><pub-id pub-id-type="doi">10.1111/inr.70045</pub-id><pub-id pub-id-type="medline">40545441</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Garc&#x00ED;a-L&#x00F3;pez</surname><given-names>IM</given-names> </name><name name-style="western"><surname>Trujillo-Li&#x00F1;&#x00E1;n</surname><given-names>L</given-names> </name></person-group><article-title>Ethical and regulatory challenges of generative AI in education: a systematic review</article-title><source>Front Educ</source><year>2025</year><month>06</month><day>30</day><volume>10</volume><fpage>1565938</fpage><pub-id pub-id-type="doi">10.3389/feduc.2025.1565938</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Symeou</surname><given-names>L</given-names> </name><name name-style="western"><surname>Louca</surname><given-names>L</given-names> </name><name name-style="western"><surname>Kavadella</surname><given-names>A</given-names> </name><name name-style="western"><surname>Mackay</surname><given-names>J</given-names> </name><name name-style="western"><surname>Danidou</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Raffay</surname><given-names>V</given-names> </name></person-group><article-title>Development of evidence-based guidelines for the integration of generative AI in university education through a multidisciplinary, consensus-based approach</article-title><source>Eur J Dent Educ</source><year>2025</year><month>05</month><volume>29</volume><issue>2</issue><fpage>285</fpage><lpage>303</lpage><pub-id pub-id-type="doi">10.1111/eje.13069</pub-id><pub-id pub-id-type="medline">39949032</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Full survey questionnaire (French version with English translation) and supplementary tables (Tables S1-S11) reporting the generative artificial intelligence maturity items and detailed response distributions across key study domains.</p><media xlink:href="mededu_v12i1e85243_app1.docx" xlink:title="DOCX File, 79 KB"/></supplementary-material><supplementary-material id="app2"><label>Checklist 1</label><p>CHERRIES checklist for the web-based survey reporting.</p><media xlink:href="mededu_v12i1e85243_app2.docx" xlink:title="DOCX File, 584 KB"/></supplementary-material></app-group></back></article>