<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Med Educ</journal-id><journal-id journal-id-type="publisher-id">mededu</journal-id><journal-id journal-id-type="index">20</journal-id><journal-title>JMIR Medical Education</journal-title><abbrev-journal-title>JMIR Med Educ</abbrev-journal-title><issn pub-type="epub">2369-3762</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v11i1e58801</article-id><article-id pub-id-type="doi">10.2196/58801</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Global Health care Professionals&#x2019; Perceptions of Large Language Model Use In Practice: Cross-Sectional Survey Study</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Ozkan</surname><given-names>Ecem</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Tekin</surname><given-names>Aysun</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Ozkan</surname><given-names>Mahmut Can</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Cabrera</surname><given-names>Daniel</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Niven</surname><given-names>Alexander</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff4">4</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Dong</surname><given-names>Yue</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib></contrib-group><aff id="aff1"><institution>Department of Medicine, Jersey Shore University Medical Center</institution><addr-line>1945 NJ-33</addr-line><addr-line>Neptune</addr-line><addr-line>NJ</addr-line><country>United States</country></aff><aff id="aff2"><institution>Department of Anesthesiology, Mayo Clinic College of Medicine</institution><addr-line>Rochester</addr-line><addr-line>MN</addr-line><country>United States</country></aff><aff id="aff3"><institution>Department of Emergency Medicine, Mayo Clinic College of Medicine</institution><addr-line>Rochester</addr-line><addr-line>MN</addr-line><country>United States</country></aff><aff id="aff4"><institution>Department of Pulmonary and Critical Care Medicine, Mayo Clinic College of Medicine</institution><addr-line>Rochester</addr-line><addr-line>MN</addr-line><country>United States</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Lesselroth</surname><given-names>Blake</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Chen</surname><given-names>Fangyuan</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Achara</surname><given-names>Kosisochi</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Mao</surname><given-names>Siqi</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Ecem Ozkan, MD, Department of Medicine, Jersey Shore University Medical Center, 1945 NJ-33, Neptune, NJ, 07753, United States, 1 5078843064; <email>ecemozkanmd@gmail.com</email></corresp></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>12</day><month>5</month><year>2025</year></pub-date><volume>11</volume><elocation-id>e58801</elocation-id><history><date date-type="received"><day>25</day><month>03</month><year>2024</year></date><date date-type="rev-recd"><day>11</day><month>04</month><year>2025</year></date><date date-type="accepted"><day>19</day><month>04</month><year>2025</year></date></history><copyright-statement>&#x00A9; Ecem Ozkan, Aysun Tekin, Mahmut Can Ozkan, Daniel Cabrera, Alexander Niven, Yue Dong. Originally published in JMIR Medical Education (<ext-link ext-link-type="uri" xlink:href="https://mededu.jmir.org">https://mededu.jmir.org</ext-link>), 12.5.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Medical Education, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://mededu.jmir.org/">https://mededu.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://mededu.jmir.org/2025/1/e58801"/><abstract><sec><title>Background</title><p>ChatGPT is a large language model-based chatbot developed by OpenAI. ChatGPT has many potential applications to health care, including enhanced diagnostic accuracy and efficiency, improved treatment planning, and better patient outcomes. However, health care professionals&#x2019; perceptions of ChatGPT and similar artificial intelligence tools are not well known. Understanding these attitudes is important to inform the best approaches to exploring their use in medicine.</p></sec><sec><title>Objective</title><p>Our aim was to evaluate the health care professionals&#x2019; awareness and perceptions regarding potential applications of ChatGPT in the medical field, including potential benefits and challenges of adoption.</p></sec><sec sec-type="methods"><title>Methods</title><p>We designed a 33-question online survey that was distributed among health care professionals via targeted emails and professional Twitter and LinkedIn accounts. The survey included a range of questions to define respondents&#x2019; demographic characteristics, familiarity with ChatGPT, perceptions of this tool&#x2019;s usefulness and reliability, and opinions on its potential to improve patient care, research, and education efforts.</p></sec><sec sec-type="results"><title>Results</title><p>One hundred and fifteen health care professionals from 21 countries responded to the survey, including physicians, nurses, researchers, and educators. Of these, 101 (87.8%) had heard of ChatGPT, mainly from peers, social media, and news, and 77 (76.2%) had used ChatGPT at least once. Participants found ChatGPT to be helpful for writing manuscripts (n=31, 45.6%), emails (n=25, 36.8%), and grants (n=12, 17.6%); accessing the latest research and evidence-based guidelines (n=21, 30.9%); providing suggestions on diagnosis or treatment (n=15, 22.1%); and improving patient communication (n=12, 17.6%). Respondents also felt that the ability of ChatGPT to access and summarize research articles (n=22, 46.8%), provide quick answers to clinical questions (n=15, 31.9%), and generate patient education materials (n=10, 21.3%) was helpful. However, there are concerns regarding the use of ChatGPT, for example, the accuracy of responses (n=14, 29.8%), limited applicability in specific practices (n=18, 38.3%), and legal and ethical considerations (n=6, 12.8%), mainly related to plagiarism or copyright violations. Participants stated that safety protocols such as data encryption (n=63, 62.4%) and access control (n=52, 51.5%) could assist in ensuring patient privacy and data security.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>Our findings show that ChatGPT use is widespread among health care professionals in daily clinical, research, and educational activities. The majority of our participants found ChatGPT to be useful; however, there are concerns about patient privacy, data security, and its legal and ethical issues as well as the accuracy of its information. Further studies are required to understand the impact of ChatGPT and other large language models on clinical, educational, and research outcomes, and the concerns regarding its use must be addressed systematically and through appropriate methods.</p></sec></abstract><kwd-group><kwd>ChatGPT</kwd><kwd>LLM</kwd><kwd>global</kwd><kwd>health care professionals</kwd><kwd>large language model</kwd><kwd>language model</kwd><kwd>chatbot</kwd><kwd>AI</kwd><kwd>diagnostic accuracy</kwd><kwd>efficiency</kwd><kwd>treatment planning</kwd><kwd>patient outcome</kwd><kwd>patient care</kwd><kwd>survey</kwd><kwd>physicians</kwd><kwd>nurses</kwd><kwd>educators</kwd><kwd>patient communication</kwd><kwd>clinical</kwd><kwd>educational</kwd><kwd>utilization</kwd><kwd>artificial intelligence</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Large language model (LLM) refers to advanced artificial intelligence (AI) models designed for natural language processing tasks. LLMs are trained on vast amounts of text data and use deep learning techniques to understand and generate human-like language. They helped transform various fields, including medicine [<xref ref-type="bibr" rid="ref1">1</xref>]. Some examples of most popular LLMs are LlaMA by Meta, Orca and Phi-1 by Microsoft, BLOOM, PaLM2 by Google, and GPT by OpenAI. ChatGPT, a chatbot powered by GPT-3/4 was released by OpenAI in November 2022, incorporating billions of parameters that enable it to comprehend and generate human-like text with the capability of context creation. Its intuitive interface and capacity for prompt engineering have enabled diverse applications across domains [<xref ref-type="bibr" rid="ref2">2</xref>].</p><p>In medicine, recent studies have demonstrated ChatGPT&#x2019;s potential to support clinical decision-making, summarize complex medical data, and streamline documentation processes. For instance, ChatGPT has been evaluated for its ability to generate discharge summaries, assist in developing differential diagnoses, and simplify patient communication [<xref ref-type="bibr" rid="ref3">3</xref>-<xref ref-type="bibr" rid="ref5">5</xref>]. Its role in medical education has also been explored, demonstrating its utility in preparing students for licensing exams like the United States Medical Licensing Examination (USMLE) and enhancing self-directed learning through case-based scenarios [<xref ref-type="bibr" rid="ref5">5</xref>-<xref ref-type="bibr" rid="ref7">7</xref>]. ChatGPT was also shown to be capable of defining and answering clinical vignettes and achieved &#x003E;60% of the threshold on the USMLE, which is the passing score for all three exams [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>]. Additionally, its ability to provide personalized health education and assist in chronic disease management has been highlighted as a promising avenue for improving patient outcomes [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref10">10</xref>].</p><p>The integration of ChatGPT into health care settings is accelerating, with a growing body of literature examining its applications. Despite these advancements, significant challenges remain. Concerns about data privacy, ethical implications, and the accuracy of AI-generated content persist as barriers to widespread adoption [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref10">10</xref>]. Additionally, little is known regarding global health care professionals&#x2019; perspectives and the extent and impact of ChatGPT&#x2019;s integration in health care settings [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref12">12</xref>]. Most studies to date, have been limited to localized settings or specific subgroups. Yet, successful and ethical integration of ChatGPT into health care workflows depends heavily on end-user acceptance, awareness of limitations, and perceptions regarding safety, usability, and value [<xref ref-type="bibr" rid="ref5">5</xref>-<xref ref-type="bibr" rid="ref7">7</xref>].</p><p>This study aimed to evaluate health care professionals&#x2019; awareness and perceptions of ChatGPT, with a focus on its applications, challenges, and utility across clinical, educational, and research settings. We surveyed a diverse group of health care professionals&#x2014;including physicians, nurses, researchers, and educators&#x2014;from multiple countries and practice settings. Using a cross-sectional survey design, we collected data on their familiarity with ChatGPT, how and why they used it, and their concerns about its integration. Our a priori hypothesis was that while many health care professionals would recognize ChatGPT&#x2019;s potential benefits, such as improving efficiency, communication, and access to knowledge, they would also express concerns regarding ethical, legal, and accuracy-related issues.</p><p>This study offers timely insights for health care leaders, educators, and policymakers considering the responsible adoption of generative AI tools. By reflecting on global perspectives from frontline users, our findings may help shape discussions on how to balance innovation with safety and trust in clinical AI applications.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><p>This study was conducted as a cross-sectional survey between April 20 and July 3, 2023 (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>).</p><sec id="s2-1"><title>Survey Instrument Development and Validation</title><p>The questionnaire used in this study was developed de novo by the research team. The design process was informed by the research team&#x2019;s multidisciplinary experience in medicine, education, and digital health, as well as the evolving discourse around AI in health care. To assist with rapid prototyping, the research team used ChatGPT (OpenAI) to generate the first draft of the questionnaire. This initial draft provided a foundation for question phrasing and thematic organization. The final survey was iteratively refined by the study investigators to ensure clinical and contextual relevance.</p><p>To enhance clarity and assess feasibility, the questionnaire was piloted informally among five health care researchers affiliated with our institution. Their feedback informed improvements in question wording, branching logic, and estimated completion time (approximately 5 minutes). No formal psychometric validation was conducted.</p><p>The final survey included 33 questions and was distributed electronically using Research Electronic Data Capture (REDCap) (version 13.1.30; Vanderbilt University) [<xref ref-type="bibr" rid="ref13">13</xref>]. The questionnaire was structured around six thematic domains: (1) respondent demographics and work environment, (2) awareness and familiarity with ChatGPT, (3) frequency and purpose of use, (4) perceived benefits and challenges of ChatGPT in daily practice, (5) views on ethical, legal, and data security concerns, and (6) future expectations and training needs. The questionnaire incorporated branching logic to adapt follow-up questions based on initial responses&#x2014;for example, only respondents who reported using ChatGPT were asked about specific applications or frequency of use. A visual summary of the questionnaire flow and branching logic is provided in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>. The final instrument has been reported in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p></sec><sec id="s2-2"><title>Participants and Sampling Strategy</title><p>We used a convenience sampling approach. The questionnaire was distributed to health care professionals via targeted emails, and professional Twitter, LinkedIn, and Instagram accounts using a snowball technique [<xref ref-type="bibr" rid="ref14">14</xref>]. No predefined inclusion or exclusion criteria were applied beyond the requirement of being a health care professional (eg, physician, nurse, educator, researcher). There were no regional or institutional restrictions. As the survey was open and anonymous, we did not estimate a denominator or calculate a response rate. For the purposes of this study, we defined the application of ChatGPT in the medical field broadly to include its use in clinical care, research, medical education, and health care&#x2013;related administrative tasks. This inclusive definition reflects the multifaceted roles that health care professionals fulfill and acknowledges that tools such as ChatGPT may support a wide range of activities beyond direct patient care, such as writing grants, academic correspondence, and synthesizing medical literature. Survey items were designed to capture this broad spectrum of use across domains relevant to daily professional practice.</p><p>Demographic information of participants was summarized. Among those familiar with ChatGPT, opinions on the tool and potential dissemination resources were assessed. For those who had not used it, barriers to usage were examined (<xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>). Participants with experience using the ChatGPT were also asked about perceived challenges and approaches for enhancing usability. Summary statistics were provided as numbers and frequencies. Comparative analyses were conducted using the <italic>&#x03C7;</italic><sup>2</sup> test, with a two-sided <italic>P</italic> value &#x003C;.05 considered statistically significant. JMP Pro (version 14.1.0 software; SAS Institute Inc.) was used for the analyses.</p></sec><sec id="s2-3"><title>Ethical Considerations</title><p>The study protocol was evaluated by the Mayo Clinic institutional review board and it was determined that it was exempted under 45 CFR 46.102 of the Code of Federal Regulations (2/28/2023). No personally identifying information was collected, and all data were fully anonymous. Study participation was voluntary and survey completion was considered as consent. All survey responses were stored on secure, access-restricted servers in compliance with institutional data protection policies.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Main Findings</title><p>A total of 115 health care professionals from 21 countries responded to the survey. <xref ref-type="table" rid="table1">Table 1</xref> displays a summary of their demographic information (<xref ref-type="fig" rid="figure1">Figures 1</xref>&#x2013;<xref ref-type="fig" rid="figure2">2</xref>).</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Baseline characteristics.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Variables</td><td align="left" valign="bottom">Participants (N=115), n (%)</td></tr></thead><tbody><tr><td align="left" valign="top">Age (years)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>20&#x2010;29</td><td align="left" valign="top">30 (26.1)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>30&#x2010;39</td><td align="left" valign="top">27 (23.5)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>40&#x2010;49</td><td align="left" valign="top">26 (22.6)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>50&#x2010;59</td><td align="left" valign="top">10 (8.7)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>&#x003E;60</td><td align="left" valign="top">22 (19.1)</td></tr><tr><td align="left" valign="top">Sex<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Female</td><td align="left" valign="top">45 (39.5)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Male</td><td align="left" valign="top">68 (59.6)</td></tr><tr><td align="left" valign="top">Profession<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Educator</td><td align="left" valign="top">16 (14.0)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>NP/PA<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup></td><td align="left" valign="top">5 (4.4)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Physician</td><td align="left" valign="top">62 (54.4)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Researcher</td><td align="left" valign="top">25 (21.9)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>RN<sup><xref ref-type="table-fn" rid="table1fn3">c</xref></sup></td><td align="left" valign="top">5 (4.4)</td></tr><tr><td align="left" valign="top">Area/ Unit</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Internal medicine</td><td align="left" valign="top">20 (17.4)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Surgery</td><td align="left" valign="top">15 (13)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Emergency medicine</td><td align="left" valign="top">10 (8.7)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Psychiatry and Neurology</td><td align="left" valign="top">8 (7)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Anesthesiology/ICU<sup><xref ref-type="table-fn" rid="table1fn4">d</xref></sup></td><td align="left" valign="top">10 (8.6)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Obstetrics and Gynecology</td><td align="left" valign="top">7 (6.1)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Radiology</td><td align="left" valign="top">6 (5.2)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Others<sup><xref ref-type="table-fn" rid="table1fn5">e</xref></sup></td><td align="left" valign="top">39 (33.9)</td></tr><tr><td align="left" valign="top">Years since graduation</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>&#x003C;5</td><td align="left" valign="top">43 (37.4)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>5&#x2010;10</td><td align="left" valign="top">27 (23.5)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>11&#x2010;20</td><td align="left" valign="top">16 (13.9)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>&#x003E;20</td><td align="left" valign="top">29 (25.2)</td></tr><tr><td align="left" valign="top">Work length in hospital (years)<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>&#x003C;5</td><td align="left" valign="top">66 (57.9)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>5&#x2010;10</td><td align="left" valign="top">11 (9.6)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>11&#x2010;20</td><td align="left" valign="top">19 (16.7)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>&#x003E;20</td><td align="left" valign="top">18 (15.8)</td></tr><tr><td align="left" valign="top">Country of work</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>United States</td><td align="left" valign="top">53 (46.1)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Turkey</td><td align="left" valign="top">24 (20.9)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Tanzania</td><td align="left" valign="top">7 (6.1)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>China</td><td align="left" valign="top">6 (5.2)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Croatia</td><td align="left" valign="top">3 (2.6)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Russia</td><td align="left" valign="top">2 (1.7)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>France</td><td align="left" valign="top">2 (1.7)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Canada</td><td align="left" valign="top">2 (1.7)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Italy</td><td align="left" valign="top">2 (1.7)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Saudi Arabia</td><td align="left" valign="top">2 (1.7)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Others<sup><xref ref-type="table-fn" rid="table1fn5">e</xref></sup></td><td align="left" valign="top">12 (10.4)</td></tr><tr><td align="left" valign="top" colspan="2">Native language</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>English</td><td align="left" valign="top">28 (24.3)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Turkish</td><td align="left" valign="top">32 (27.8)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Spanish</td><td align="left" valign="top">10 (8.7)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Chinese (Mandarin)</td><td align="left" valign="top">9 (7.8)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Arabic</td><td align="left" valign="top">5 (4.3)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Others<sup><xref ref-type="table-fn" rid="table1fn5">e</xref></sup></td><td align="left" valign="top">31 (26.8)</td></tr><tr><td align="left" valign="top">Place of employment<sup><xref ref-type="table-fn" rid="table1fn6">f</xref></sup></td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Academic hospitals and medical centers</td><td align="left" valign="top">72 (64.2)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Community hospitals</td><td align="left" valign="top">9 (8.0)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Private hospitals</td><td align="left" valign="top">13 (11.6)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Public hospitals</td><td align="left" valign="top">15 (13.4)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Free clinics</td><td align="left" valign="top">6 (5.4)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Others<sup><xref ref-type="table-fn" rid="table1fn5">e</xref></sup></td><td align="left" valign="top">6 (5.3)</td></tr><tr><td align="left" valign="top">Frequency of ChatGPT usage (n=68)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Multiple times per day</td><td align="left" valign="top">14 (20.6)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Once per day</td><td align="left" valign="top">3 (4.4)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Three to five times per week</td><td align="left" valign="top">14 (20.6)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Less than three times a week</td><td align="left" valign="top">13 (19.1)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Only tried it few times</td><td align="left" valign="top">24 (35.3)</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>Due to lack of responses, missing data are not included in the reported totals; as a result, some category counts may not sum to the overall sample size.</p></fn><fn id="table1fn2"><p><sup>b</sup>NP/PA: nurse practitioner/physician assistant.</p></fn><fn id="table1fn3"><p><sup>c</sup>RN: registered nurse.</p></fn><fn id="table1fn4"><p><sup>d</sup>ICU: intensive care unit.</p></fn><fn id="table1fn5"><p><sup>e</sup>For Others see <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>.</p></fn><fn id="table1fn6"><p><sup>f</sup>The subcategories are not mutually exclusive.</p></fn></table-wrap-foot></table-wrap><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>ChatGPT usage based on participants&#x2019; age, gender, and country of work.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="mededu_v11i1e58801_fig01.png"/></fig><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>ChatGPT usage based on participants&#x2019; years since graduation, length of work in the current unit, and profession.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="mededu_v11i1e58801_fig02.png"/></fig><p>Of the 115 participants, 101 (87.8%) had heard of ChatGPT, mainly from social media (n=33, 32.7%) and peers or colleagues (n=43, 42.6%). Of those, 77 (76.2%) had used ChatGPT before, with 18 (23.4%) using it multiple times per day and 23 (29.9%) having tried it only a few times. Moreover, 71 out of 77 (92.2%) participants used it in English. Among these, 50 were not native English speakers, and only 16/50 (32%) speakers used it both in English and their native language (<xref ref-type="fig" rid="figure3">Figure 3</xref>). Furthermore, variations in ChatGPT usage in daily practice were observed between participants using ChatGPT in English versus those who used it in their native language (<xref ref-type="fig" rid="figure4">Figure 4</xref>).</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Ratio of native language use versus English use among participants while using ChatGPT.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="mededu_v11i1e58801_fig03.png"/></fig><fig position="float" id="figure4"><label>Figure 4.</label><caption><p>Main reasons for using ChatGPT in daily practice based on the language used by the participants.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="mededu_v11i1e58801_fig04.png"/></fig><p>The most common reasons to use ChatGPT included writing papers (n=29, 44.6%) and emails (n=25, 38.5%), and obtaining suggestions on diagnosis or treatment (n=14, 21.5%) (<xref ref-type="table" rid="table2">Table 2</xref>). Additional reasons for ChatGPT usage by health care professionals in daily practice are shared in <xref ref-type="table" rid="table3">Table 3</xref>.</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>ChatGPT usefulness based on used features in daily practice.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">ChatGPT features</td><td align="left" valign="bottom">Participants (n=68), n (%)</td></tr></thead><tbody><tr><td align="left" valign="top">Usefulness in daily practice</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top">&#x2003;Not important</td><td align="left" valign="top">14 (20.6)</td></tr><tr><td align="left" valign="top">&#x2003;Slightly important</td><td align="left" valign="top">21 (30.9)</td></tr><tr><td align="left" valign="top">&#x2003;Moderately important</td><td align="left" valign="top">13 (19.1)</td></tr><tr><td align="left" valign="top">&#x2003;Important</td><td align="left" valign="top">13 (19.1)</td></tr><tr><td align="left" valign="top">&#x2003;Very important</td><td align="left" valign="top">7 (10.3)</td></tr><tr><td align="left" valign="top">ChatGPT&#x2019;s usefulness, 0 (most negative experience) to 10 (most positive experience)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top">&#x2003;&#x2265;7</td><td align="left" valign="top">42 (61.8)</td></tr><tr><td align="left" valign="top">&#x2003;4-5-6</td><td align="left" valign="top">19 (27.9)</td></tr><tr><td align="left" valign="top">&#x2003;&#x2264;3</td><td align="left" valign="top">7 (10.3)</td></tr><tr><td align="left" valign="top">Most useful features</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top">&#x2003;To access and summarize research articles efficiently</td><td align="left" valign="top">22 (46.8)</td></tr><tr><td align="left" valign="top">&#x2003;To provide quick answers to clinical questions</td><td align="left" valign="top">15 (31.9)</td></tr><tr><td align="left" valign="top">&#x2003;To provide patient education materials</td><td align="left" valign="top">10 21.3</td></tr><tr><td align="left" valign="top">&#x2003;To write emails, grants, and papers</td><td align="left" valign="top">25 53.2</td></tr></tbody></table></table-wrap><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Percentage of participants&#x2019; main reasons for using ChatGPT in daily practice (multiple choice questions).</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Main reason for using ChatGPT in daily practice</td><td align="left" valign="bottom">Participants (n=68), n (%)</td></tr></thead><tbody><tr><td align="left" valign="top">Writing papers</td><td align="left" valign="top">31 (45.6)</td></tr><tr><td align="left" valign="top">Writing emails</td><td align="left" valign="top">25 (36.8)</td></tr><tr><td align="left" valign="top">To access the latest research and evidence-based guidelines</td><td align="left" valign="top">21 (30.9)</td></tr><tr><td align="left" valign="top">To access suggestions on diagnosis or treatment</td><td align="left" valign="top">15 (22.1)</td></tr><tr><td align="left" valign="top">To improve patient communication</td><td align="left" valign="top">12 (17.6)</td></tr><tr><td align="left" valign="top">To write grants</td><td align="left" valign="top">12 (17.6)</td></tr></tbody></table></table-wrap></sec><sec id="s3-2"><title>Incorporation of ChatGPT Into Daily Practice</title><p>Of the 77 participants who used ChatGPT, 36 (46.8%) used ChatGPT in their clinical practice, 58 (75.3%) used it for research, and 56 out of 77 (72.7%) used it for educational activities (<xref ref-type="fig" rid="figure5">Figure 5</xref>).</p><p>Among all respondents, 42/101 (43.6%) participants agreed that they would not be concerned if their clinician used ChatGPT while providing care to them if they were the patient, whereas 32 (32.7%) disagreed and preferred that their clinician not use ChatGPT during care.</p><p>The majority (n=79, 78.2%) of participants agreed that ChatGPT could be useful for medical or health care professional education. In nonclinical settings, participants stated that ChatGPT could help to reduce workload (n=57, 73.1%), improve efficiency by automating certain tasks (n=51, 65.4%), offer greater access and efficiently summarize research articles (n=52, 66.7%), create patient educational materials (n=49, 62.8%), provide quick answers to questions (n=48, 61.5%), and enhance the ability to write papers (n=37, 47.4%).</p><fig position="float" id="figure5"><label>Figure 5.</label><caption><p>Factors contributing to use and nonuse of ChatGPT. The activities are not mutually exclusive and therefore, the total number of participants may exceed 115.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="mededu_v11i1e58801_fig05.png"/></fig></sec><sec id="s3-3"><title>Challenges for Integrating ChatGPT Into Daily Practice</title><p>The main reasons for respondents not using ChatGPT included concerns about the accuracy of ChatGPT responses (n=14, 29.8%), limited applicability to their practice (n=18, 38.3%), legal and ethical considerations (n=6, 12.8%), limited diagnostic capabilities (n=4, 8.5%), lack of time (n=3, 6.4%), and lack of interest (n=2, 4.3%).</p><p>As one of the significant barriers is legal and ethical considerations, participants were asked to define plagiarism or copyright violations. Participants defined it as copying text or ideas from ChatGPT and using it for another source without citation (n=64, 63.4%), paraphrasing or summarizing content from ChatGPT and using it for another source without citation (n=41, 40.6%), using images from ChatGPT without permission (n=36, 35.6%), reusing or repurposing content from ChatGPT that was previously created for another purpose without permission (n=44, 43.6%).</p><p>In response to the legal and ethical challenges, participants proposed several solutions for integrating ChatGPT into daily practice. Participants stated that data encryption (n=63, 62.4%), access control (n=52, 51.5%), user authentication such as two-factor authentication (n=48, 47.5%), compliance with regulations such as Health Insurance Portability and Accountability Act or General Data Protection Regulation (n=62, 61.4%), transparency and informed consent (n=53, 52.5%), and regular training and awareness for health care professionals (n=58, 57.4%) are necessary to ensure patient privacy and data security.</p></sec><sec id="s3-4"><title>Views on ChatGPT&#x2019;s success and other possible uses</title><p>When asked whether the participants knew ChatGPT had performed with &#x2265;60% accuracy on the USMLE, 52 (51.5%) participants indicated they had heard this before. Additionally, 76 (68.5%) participants reported that they had not used any other AI platform.</p><p>Participants stated that ChatGPT can improve patient outcomes through personalized health education by providing tailored information and support (n=76, 75.2%); assisting with medication management through reminders and refill prescriptions, and provide information on side effects and interactions (n=55, 54.5%); telemedicine support for health care professionals to conduct virtual consultations, collect patient data, and provide decision support (n=48, 50%); aiding in symptom triage for patients (n=49, 48.5%); and offering mental health support by providing guidance on self-management techniques and coping strategies (n=49, 48.5%).</p><p>The distribution of responses based on different levels of postgraduate experience is reported in <xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref>. This distribution was largely balanced between the participants with fewer than 10 years and those with 10 or more years of experience.</p></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>This study offers a global perspective on how health care professional perceive and use ChatGPT in clinical, research, and educational context. Our findings demonstrate that awareness and adoption of ChatGPT are already widespread, with 76.2% of respondents having used the tool at least once. Participants primarily reported using ChatGPT for manuscript and email writing, grant application preparation, accessing research articles, clinical guideline support, diagnostic suggestions, and improving patient communication. Notably, more than three-quarters of participants agreed that ChatGPT holds potential utility in medical education, highlighting its ability to enhance learning experiences and facilitate task automation. Moreover, our study indicates that health care professionals endorse its use among colleagues. However, concerns about data privacy, ethical risks such as plagiarism, and the accuracy of AI-generated content remained as significant barriers to broader adoption. Proposed solutions included implementing safety protocols such as data encryption, access control, and regulatory compliance. In exploratory analyses comparing ChatGPT use, we did not identify significant differences across professional experience levels, which might be due to the limited sample size. Due to the wide range and uneven distribution of medical subspecialties represented, we were not able to conduct a formal comparison across specialties.</p></sec><sec id="s4-2"><title>Implications of Findings</title><p>Our findings highlight the broad and flexible potential of ChatGPT in health care workflows. In clinical practice, ChatGPT is perceived as a tool that can enhance efficiency by automating routine documentation tasks, such as generating draft discharge summaries and patient letters. It also supports decision-making by offering fast access to evidence summaries and aids communication through the creation of patient-friendly materials [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref15">15</xref>]. In medical education, participants identified ChatGPT as a valuable educational supplement&#x2014;one that could be incorporated into curricula to simulate real-world clinical scenarios and assist in preparing students for standardized exams like the USMLE [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref16">16</xref>]. It can also support personalized learning experiences tailored to individual needs and self-directed learning pathways. In research, ChatGPT was valued for its ability in grant writing, literature synthesis, and ideation, especially in the early stages of manuscript development or protocol design [<xref ref-type="bibr" rid="ref5">5</xref>].</p><p>These findings underscore the need for structured training programs and ethical guidelines to support responsible integration of AI tools. Implementing human-in-the-loop systems, in which clinicians oversee and validate AI outputs, may enhance safety, and build user confidence while mitigating risks associated with biases or inaccuracies in AI-generated content [<xref ref-type="bibr" rid="ref17">17</xref>].</p></sec><sec id="s4-3"><title>Comparison to the Literature</title><p>Our findings align with prior studies that underscore ChatGPT&#x2019;s potential in health care. Cascella et al [<xref ref-type="bibr" rid="ref2">2</xref>] described ChatGPT&#x2019;s potential to reduce administrative burden and assist with clinical reasoning, which mirrors participants&#x2019; reported use of ChatGPT for documentation and clinical queries. In medical education, Gilson et al [<xref ref-type="bibr" rid="ref8">8</xref>] showed that ChatGPT achieved passing scores on all three components of the USMLE, highlighting its utility in medical education. Similarly, Kung et al [<xref ref-type="bibr" rid="ref9">9</xref>] emphasized its role in creating standardized templates for patient education materials. These findings also align with our participants&#x2019; views on its usefulness for both learners and patients alike. Sallam [<xref ref-type="bibr" rid="ref18">18</xref>] highlighted ChatGPT&#x2019;s capacity to process and summarize complex medical data efficiently, which our participants also leveraged for research and evidence access.</p><p>However, our study adds unique insights by capturing global perspectives from diverse practice settings. Unlike prior reports focused on specific institutions or national populations, our results reflect a cross-disciplinary, international sample, offering a broader view of how generative AI is being perceived across diverse practice settings.</p><p>The main reasons behind the lack of use of ChatGPT in daily practice were mainly due to the nonapplicability to their practice, lack of information regarding its use, and concerns about the accuracy of ChatGPT&#x2019;s responses, and legal and ethical considerations. The reason behind not using ChatGPT due to lack of information may be partially attributed to insufficient training opportunities for health care professionals in the use of generative AI. Previous studies have also indicated similar concerns regarding its implementation [<xref ref-type="bibr" rid="ref19">19</xref>]. For instance, the concern for the spread of wrong information is a major obstacle, and different languages may have inconsistent results [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref21">21</xref>]. Many studies have shown that up to 96.7% of users are concerned about ethical and legal obstacles [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref18">18</xref>], particularly plagiarism [<xref ref-type="bibr" rid="ref21">21</xref>-<xref ref-type="bibr" rid="ref23">23</xref>], and copyright issues [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref18">18</xref>]. In a study conducted by a university at Sweden, 62% of students considered the use of chatbots for assignments and exams as cheating [<xref ref-type="bibr" rid="ref24">24</xref>]. Our study showed that 86 out of 101 participants defined copying from ChatGPT as plagiarism. These concerns show that the implementation of ChatGPT into clinical settings will require a transition period supported by extensive safety measures. Health care professional leaders need to work with technology experts to develop learning objectives, curricula, assessments and evaluations, and safety protocols for this emerging technology.</p><p>Regarding the accuracy of ChatGPT&#x2019;s responses, our study shows that health care professionals identified this as having a paramount importance. Similar studies have shown that ChatGPT should be used with caution due to potential biases of AI, which may lead to the generation of inaccurate information. When used in the health care system, this could potentially lead to harmful consequences [<xref ref-type="bibr" rid="ref25">25</xref>].</p></sec><sec id="s4-4"><title>Educational Implications</title><p>The educational relevance of our findings is especially important. Our study suggests several opportunities:</p><list list-type="bullet"><list-item><p>Curriculum design: Educators can incorporate ChatGPT into simulation- and case-based learning modules to foster clinical reasoning and application of evidence-based medicine.</p></list-item><list-item><p>Needs Assessment: Educators may use baseline familiarity and usage patterns to tailor AI training initiatives and address gaps in knowledge or ethical understanding.</p></list-item><list-item><p>Institutional Strategies: ChatGPT may serve as a tool in flipped classrooms, interactive tutorials, and self-directed learning, offering real-time feedback and access to guideline-driven responses.</p></list-item><list-item><p>Learner Outcomes: By providing immediate feedback and access to evidence-based guidelines, ChatGPT has the potential to improve learner performance on standardized assessments [<xref ref-type="bibr" rid="ref16">16</xref>].</p></list-item></list><p>Additionally, ChatGPT&#x2019;s ability to generate accessible explanations for patients could enhance health literacy and improve communication between physicians and patients.</p></sec><sec id="s4-5"><title>Strengths and Limitations</title><p>This study has several strengths. We examined ChatGPT adoption from a global perspective. By including participants from 21 countries and various clinical and academic backgrounds, the study provides a valuable overview of current usage patterns and attitudes toward generative AI tools in health care. The survey instrument was comprehensive, capturing a wide range of use cases and concerns across clinical, research, and educational domains.</p><p>However, several limitations must be acknowledged. Although participants were from diverse countries, they are unlikely to represent the full range of health care professionals within their regions. The sample was likely skewed toward individuals with greater access to technology and academic networks, especially in countries where access to ChatGPT or certain social media platforms may be restricted or limited. Therefore, findings should be interpreted with caution and may not be generalized to all health care professionals in low-resource or digitally restricted settings. The use of convenience and snowball sampling likely introduced self-selection bias, attracting participants with preexisting interest in technology or AI. Because of this sampling method, we could not calculate a response rate. Most respondents were from academic hospital settings in the United States, which may limit applicability to other regions or practice environments. Conducting the survey in English may have limited the global inclusivity. Given the swift pace of technological advancements, particularly in generative AI applications such as ChatGPT and the continuous process of learning and integration by health care professionals, the present survey may not accurately capture the current perceptions and attitudes of doctors and nurses toward these technologies [<xref ref-type="bibr" rid="ref26">26</xref>], limiting the temporal relevance of our findings . Lastly, although our survey included open-ended questions, multiple-choice questions may have led participants to an available answer.</p></sec><sec id="s4-6"><title>Future Directions</title><p>Further research is needed to address unanswered questions:</p><list list-type="order"><list-item><p>Long-term impact: Studies should evaluate how ChatGPT influences clinical outcomes, patient satisfaction, and educational performance over time.</p></list-item><list-item><p>Ethical frameworks: There is a pressing need for the development of institutional and regulatory guidelines governing AI use in health care [<xref ref-type="bibr" rid="ref17">17</xref>].</p></list-item><list-item><p>Cross-language applications: Investigating how ChatGPT performs across different languages could help improve accessibility for non-English-speaking populations.</p></list-item><list-item><p>Training programs: Evidence-based strategies are needed to guide health care professionals in the ethical and effective use of generative AI technologies.</p></list-item></list></sec><sec id="s4-7"><title>Conclusion</title><p>ChatGPT usage is expanding within health care settings due to its variety of capabilities, and the majority of health care professionals are likely aware of its availability. It can improve the caliber of writing papers, grants, and emails; help health care professionals in accessing the latest guidelines, diagnosis, and treatment suggestions; and possibly improve patient communication. There are several concerns related to the implementation of LLMs in clinical practice, including legal, ethical, and operational issues. Further research is necessary to clarify the role of ChatGPT and LLM-based generative AI tools in health care education, research, and clinical practice.</p></sec></sec></body><back><ack><p>We thank Dr. Ognjen Gajic for critically reviewing the manuscript.</p></ack><fn-group><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">LLM</term><def><p>large language model</p></def></def-item><def-item><term id="abb3">USMLE</term><def><p>United States Medical Licensing Examination</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhang</surname><given-names>K</given-names> </name><name name-style="western"><surname>Meng</surname><given-names>X</given-names> </name><name name-style="western"><surname>Yan</surname><given-names>X</given-names> </name><etal/></person-group><article-title>Revolutionizing health care: the transformative impact of large language models in medicine</article-title><source>J Med Internet Res</source><year>2025</year><month>01</month><day>7</day><volume>27</volume><fpage>e59069</fpage><pub-id pub-id-type="doi">10.2196/59069</pub-id><pub-id pub-id-type="medline">39773666</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cascella</surname><given-names>M</given-names> </name><name name-style="western"><surname>Montomoli</surname><given-names>J</given-names> </name><name name-style="western"><surname>Bellini</surname><given-names>V</given-names> </name><name name-style="western"><surname>Bignami</surname><given-names>E</given-names> </name></person-group><article-title>Evaluating the feasibility of ChatGPT in healthcare: an analysis of multiple clinical and research scenarios</article-title><source>J Med Syst</source><year>2023</year><month>03</month><day>4</day><volume>47</volume><issue>1</issue><fpage>33</fpage><pub-id pub-id-type="doi">10.1007/s10916-023-01925-4</pub-id><pub-id pub-id-type="medline">36869927</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dave</surname><given-names>T</given-names> </name><name name-style="western"><surname>Athaluri</surname><given-names>SA</given-names> </name><name name-style="western"><surname>Singh</surname><given-names>S</given-names> </name></person-group><article-title>ChatGPT in medicine: an overview of its applications, advantages, limitations, future prospects, and ethical considerations</article-title><source>Front Artif Intell</source><year>2023</year><volume>6</volume><fpage>1169595</fpage><pub-id pub-id-type="doi">10.3389/frai.2023.1169595</pub-id><pub-id pub-id-type="medline">37215063</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>He</surname><given-names>D</given-names> </name></person-group><article-title>The potential applications and challenges of chatgpt in the medical field</article-title><source>Int J Gen Med</source><year>2024</year><volume>17</volume><fpage>817</fpage><lpage>826</lpage><pub-id pub-id-type="doi">10.2147/IJGM.S456659</pub-id><pub-id pub-id-type="medline">38476626</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tangsrivimol</surname><given-names>JA</given-names> </name><name name-style="western"><surname>Darzidehkalani</surname><given-names>E</given-names> </name><name name-style="western"><surname>Virk</surname><given-names>HUH</given-names> </name><etal/></person-group><article-title>Benefits, limits, and risks of ChatGPT in medicine</article-title><source>Front Artif Intell</source><year>2025</year><volume>8</volume><fpage>1518049</fpage><pub-id pub-id-type="doi">10.3389/frai.2025.1518049</pub-id><pub-id pub-id-type="medline">39949509</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sallam</surname><given-names>M</given-names> </name><name name-style="western"><surname>Salim</surname><given-names>NA</given-names> </name><name name-style="western"><surname>Barakat</surname><given-names>M</given-names> </name><name name-style="western"><surname>Al-Tammemi</surname><given-names>AB</given-names> </name></person-group><article-title>ChatGPT applications in medical, dental, pharmacy, and public health education: a descriptive study highlighting the advantages and limitations</article-title><source>Narra J</source><year>2023</year><month>04</month><volume>3</volume><issue>1</issue><fpage>e103</fpage><pub-id pub-id-type="doi">10.52225/narra.v3i1.103</pub-id><pub-id pub-id-type="medline">38450035</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Thomae</surname><given-names>AV</given-names> </name><name name-style="western"><surname>Witt</surname><given-names>CM</given-names> </name><name name-style="western"><surname>Barth</surname><given-names>J</given-names> </name></person-group><article-title>Integration of ChatGPT into a course for medical students: explorative study on teaching scenarios, students&#x2019; perception, and applications</article-title><source>JMIR Med Educ</source><year>2024</year><month>08</month><day>22</day><volume>10</volume><fpage>e50545</fpage><pub-id pub-id-type="doi">10.2196/50545</pub-id><pub-id pub-id-type="medline">39177012</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gilson</surname><given-names>A</given-names> </name><name name-style="western"><surname>Safranek</surname><given-names>CW</given-names> </name><name name-style="western"><surname>Huang</surname><given-names>T</given-names> </name><etal/></person-group><article-title>How does ChatGPT perform on the United States Medical Licensing Examination (USMLE)? The implications of large language models for medical education and knowledge assessment</article-title><source>JMIR Med Educ</source><year>2023</year><month>02</month><day>8</day><volume>9</volume><fpage>e45312</fpage><pub-id pub-id-type="doi">10.2196/45312</pub-id><pub-id pub-id-type="medline">36753318</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kung</surname><given-names>TH</given-names> </name><name name-style="western"><surname>Cheatham</surname><given-names>M</given-names> </name><name name-style="western"><surname>Medenilla</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Performance of ChatGPT on USMLE: potential for ai-assisted medical education using large language models</article-title><source>PLOS Digit Health</source><year>2023</year><month>02</month><volume>2</volume><issue>2</issue><fpage>e0000198</fpage><pub-id pub-id-type="doi">10.1371/journal.pdig.0000198</pub-id><pub-id pub-id-type="medline">36812645</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>SY</given-names> </name><name name-style="western"><surname>Kuo</surname><given-names>HY</given-names> </name><name name-style="western"><surname>Chang</surname><given-names>SH</given-names> </name></person-group><article-title>Perceptions of ChatGPT in healthcare: usefulness, trust, and risk</article-title><source>Front Public Health</source><year>2024</year><volume>12</volume><fpage>1457131</fpage><pub-id pub-id-type="doi">10.3389/fpubh.2024.1457131</pub-id><pub-id pub-id-type="medline">39346584</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kim</surname><given-names>JK</given-names> </name><name name-style="western"><surname>Chua</surname><given-names>M</given-names> </name><name name-style="western"><surname>Rickard</surname><given-names>M</given-names> </name><name name-style="western"><surname>Lorenzo</surname><given-names>A</given-names> </name></person-group><article-title>ChatGPT and large language model (LLM) chatbots: the current state of acceptability and a proposal for guidelines on utilization in academic medicine</article-title><source>J Pediatr Urol</source><year>2023</year><month>10</month><volume>19</volume><issue>5</issue><fpage>598</fpage><lpage>604</lpage><pub-id pub-id-type="doi">10.1016/j.jpurol.2023.05.018</pub-id><pub-id pub-id-type="medline">37328321</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mesk&#x00F3;</surname><given-names>B</given-names> </name><name name-style="western"><surname>Topol</surname><given-names>EJ</given-names> </name></person-group><article-title>The imperative for regulatory oversight of large language models (or generative AI) in healthcare</article-title><source>NPJ Digit Med</source><year>2023</year><month>07</month><day>6</day><volume>6</volume><issue>1</issue><fpage>120</fpage><pub-id pub-id-type="doi">10.1038/s41746-023-00873-0</pub-id><pub-id pub-id-type="medline">37414860</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Harris</surname><given-names>PA</given-names> </name><name name-style="western"><surname>Taylor</surname><given-names>R</given-names> </name><name name-style="western"><surname>Minor</surname><given-names>BL</given-names> </name><etal/></person-group><article-title>The REDCap consortium: building an international community of software platform partners</article-title><source>J Biomed Inform</source><year>2019</year><month>07</month><volume>95</volume><fpage>103208</fpage><pub-id pub-id-type="doi">10.1016/j.jbi.2019.103208</pub-id><pub-id pub-id-type="medline">31078660</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rossi</surname><given-names>R</given-names> </name><name name-style="western"><surname>Socci</surname><given-names>V</given-names> </name><name name-style="western"><surname>Pacitti</surname><given-names>F</given-names> </name><etal/></person-group><article-title>Mental health outcomes among frontline and second-line health care workers during the Coronavirus Disease 2019 (COVID-19) pandemic in Italy</article-title><source>JAMA Netw Open</source><year>2020</year><month>05</month><day>1</day><volume>3</volume><issue>5</issue><fpage>e2010185</fpage><pub-id pub-id-type="doi">10.1001/jamanetworkopen.2020.10185</pub-id><pub-id pub-id-type="medline">32463467</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lu</surname><given-names>L</given-names> </name><name name-style="western"><surname>Zhu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Healthcare professionals and the public sentiment analysis of ChatGPT in clinical practice</article-title><source>Sci Rep</source><year>2025</year><volume>15</volume><issue>1</issue><fpage>1223</fpage><pub-id pub-id-type="doi">10.1038/s41598-024-84512-y</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Khan</surname><given-names>AA</given-names> </name><name name-style="western"><surname>Khan</surname><given-names>AR</given-names> </name><name name-style="western"><surname>Munshi</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Assessing the performance of ChatGPT in medical ethical decision-making: a comparative study with USMLE-based scenarios</article-title><source>J Med Ethics</source><year>2025</year><month>01</month><day>25</day><fpage>jme-2024-110240</fpage><pub-id pub-id-type="doi">10.1136/jme-2024-110240</pub-id><pub-id pub-id-type="medline">39863417</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>C</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>S</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>H</given-names> </name><name name-style="western"><surname>Guo</surname><given-names>J</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>J</given-names> </name></person-group><article-title>Ethical considerations of using ChatGPT in health care</article-title><source>J Med Internet Res</source><year>2023</year><month>08</month><day>11</day><volume>25</volume><fpage>e48009</fpage><pub-id pub-id-type="doi">10.2196/48009</pub-id><pub-id pub-id-type="medline">37566454</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sallam</surname><given-names>M</given-names> </name></person-group><article-title>ChatGPT utility in healthcare education, research, and practice: systematic review on the promising perspectives and valid concerns</article-title><source>Healthcare (Basel)</source><year>2023</year><month>03</month><day>19</day><volume>11</volume><issue>6</issue><fpage>887</fpage><pub-id pub-id-type="doi">10.3390/healthcare11060887</pub-id><pub-id pub-id-type="medline">36981544</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>De Angelis</surname><given-names>L</given-names> </name><name name-style="western"><surname>Baglivo</surname><given-names>F</given-names> </name><name name-style="western"><surname>Arzilli</surname><given-names>G</given-names> </name><etal/></person-group><article-title>ChatGPT and the rise of large language models: the new AI-driven infodemic threat in public health</article-title><source>Front Public Health</source><year>2023</year><volume>11</volume><fpage>1166120</fpage><pub-id pub-id-type="doi">10.3389/fpubh.2023.1166120</pub-id><pub-id pub-id-type="medline">37181697</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chatterjee</surname><given-names>J</given-names> </name><name name-style="western"><surname>Dethlefs</surname><given-names>N</given-names> </name></person-group><article-title>This new conversational AI model can be your friend, philosopher, and guide... and even your worst enemy</article-title><source>Patterns (N Y)</source><year>2023</year><month>01</month><day>13</day><volume>4</volume><issue>1</issue><fpage>100676</fpage><pub-id pub-id-type="doi">10.1016/j.patter.2022.100676</pub-id><pub-id pub-id-type="medline">36699746</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Stokel-Walker</surname><given-names>C</given-names> </name><name name-style="western"><surname>Van Noorden</surname><given-names>R</given-names> </name></person-group><article-title>What ChatGPT and generative AI mean for science</article-title><source>Nature New Biol</source><year>2023</year><month>02</month><volume>614</volume><issue>7947</issue><fpage>214</fpage><lpage>216</lpage><pub-id pub-id-type="doi">10.1038/d41586-023-00340-6</pub-id><pub-id pub-id-type="medline">36747115</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gao</surname><given-names>CA</given-names> </name><name name-style="western"><surname>Howard</surname><given-names>FM</given-names> </name><name name-style="western"><surname>Markov</surname><given-names>NS</given-names> </name><etal/></person-group><article-title>Comparing scientific abstracts generated by ChatGPT to real abstracts with detectors and blinded human reviewers</article-title><source>NPJ Digit Med</source><year>2023</year><month>04</month><day>26</day><volume>6</volume><issue>1</issue><fpage>75</fpage><pub-id pub-id-type="doi">10.1038/s41746-023-00819-6</pub-id><pub-id pub-id-type="medline">37100871</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rahimi</surname><given-names>F</given-names> </name><name name-style="western"><surname>Talebi Bezmin Abadi</surname><given-names>A</given-names> </name></person-group><article-title>ChatGPT and publication ethics</article-title><source>Arch Med Res</source><year>2023</year><month>04</month><volume>54</volume><issue>3</issue><fpage>272</fpage><lpage>274</lpage><pub-id pub-id-type="doi">10.1016/j.arcmed.2023.03.004</pub-id><pub-id pub-id-type="medline">36990890</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>St&#x00F6;hr</surname><given-names>C</given-names> </name><name name-style="western"><surname>Ou</surname><given-names>AW</given-names> </name><name name-style="western"><surname>Malmstr&#x00F6;m</surname><given-names>H</given-names> </name></person-group><article-title>Perceptions and usage of AI chatbots among students in higher education across genders, academic levels and fields of study</article-title><source>Computers and Education: Artificial Intelligence</source><year>2024</year><month>12</month><volume>7</volume><fpage>100259</fpage><pub-id pub-id-type="doi">10.1016/j.caeai.2024.100259</pub-id><pub-id pub-id-type="medline">100259</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sivarajah</surname><given-names>U</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Olya</surname><given-names>H</given-names> </name><name name-style="western"><surname>Mathew</surname><given-names>S</given-names> </name></person-group><article-title>Responsible artificial intelligence (AI) for digital health and medical analytics</article-title><source>Inf Syst Front</source><year>2023</year><month>06</month><day>5</day><volume>2023</volume><fpage>1</fpage><lpage>6</lpage><pub-id pub-id-type="doi">10.1007/s10796-023-10412-7</pub-id><pub-id pub-id-type="medline">37361886</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Boscardin</surname><given-names>CK</given-names> </name><name name-style="western"><surname>Gin</surname><given-names>B</given-names> </name><name name-style="western"><surname>Golde</surname><given-names>PB</given-names> </name><name name-style="western"><surname>Hauer</surname><given-names>KE</given-names> </name></person-group><article-title>ChatGPT and generative artificial intelligence for medical education: potential impact and opportunity</article-title><source>Acad Med</source><year>2024</year><month>01</month><day>1</day><volume>99</volume><issue>1</issue><fpage>22</fpage><lpage>27</lpage><pub-id pub-id-type="doi">10.1097/ACM.0000000000005439</pub-id><pub-id pub-id-type="medline">37651677</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>ChatGPT Survey.</p><media xlink:href="mededu_v11i1e58801_app1.pdf" xlink:title="PDF File, 59 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Diagram explaining survey flow.</p><media xlink:href="mededu_v11i1e58801_app2.png" xlink:title="PNG File, 98 KB"/></supplementary-material><supplementary-material id="app3"><label>Multimedia Appendix 3</label><p>Others within the demographic information table.</p><media xlink:href="mededu_v11i1e58801_app3.docx" xlink:title="DOCX File, 12 KB"/></supplementary-material><supplementary-material id="app4"><label>Multimedia Appendix 4</label><p>The distribution of answers to respondents with different levels of post-graduate experience.</p><media xlink:href="mededu_v11i1e58801_app4.docx" xlink:title="DOCX File, 24 KB"/></supplementary-material></app-group></back></article>