<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JME</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Med Educ</journal-id>
      <journal-title>JMIR Medical Education</journal-title>
      <issn pub-type="epub">2369-3762</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v9i1e50658</article-id>
      <article-id pub-id-type="pmid">38133908</article-id>
      <article-id pub-id-type="doi">10.2196/50658</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Using ChatGPT for Clinical Practice and Medical Education: Cross-Sectional Survey of Medical Students’ and Physicians’ Perceptions</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Eysenbach</surname>
            <given-names>Gunther</given-names>
          </name>
        </contrib>
        <contrib contrib-type="editor">
          <name>
            <surname>Venkatesh</surname>
            <given-names>Kaushik</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Chen</surname>
            <given-names>Tzeng-Ji</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Knoedler</surname>
            <given-names>Leonard</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Dergaa</surname>
            <given-names>Ismail</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Thirunavukarasu</surname>
            <given-names>Arun</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author">
          <name name-style="western">
            <surname>Tangadulrat</surname>
            <given-names>Pasin</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-0346-7135</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Sono</surname>
            <given-names>Supinya</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-3145-1907</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Tangtrakulwanich</surname>
            <given-names>Boonsin</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Department of Orthopedics</institution>
            <institution>Faculty of Medicine</institution>
            <institution>Prince of Songkla University</institution>
            <addr-line>Floor 9 Rattanacheewarak Building</addr-line>
            <addr-line>15 Kanchanavanich Rd</addr-line>
            <addr-line>Hatyai, 90110</addr-line>
            <country>Thailand</country>
            <phone>66 74451601</phone>
            <email>boonsin.b@psu.ac.th</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-0933-1669</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Department of Orthopedics</institution>
        <institution>Faculty of Medicine</institution>
        <institution>Prince of Songkla University</institution>
        <addr-line>Hatyai</addr-line>
        <country>Thailand</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Division of Family and Preventive Medicine</institution>
        <institution>Faculty of Medicine</institution>
        <institution>Prince of Songkla University</institution>
        <addr-line>Hatyai</addr-line>
        <country>Thailand</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Boonsin Tangtrakulwanich <email>boonsin.b@psu.ac.th</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2023</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>22</day>
        <month>12</month>
        <year>2023</year>
      </pub-date>
      <volume>9</volume>
      <elocation-id>e50658</elocation-id>
      <history>
        <date date-type="received">
          <day>8</day>
          <month>7</month>
          <year>2023</year>
        </date>
        <date date-type="rev-request">
          <day>28</day>
          <month>9</month>
          <year>2023</year>
        </date>
        <date date-type="rev-recd">
          <day>17</day>
          <month>10</month>
          <year>2023</year>
        </date>
        <date date-type="accepted">
          <day>11</day>
          <month>12</month>
          <year>2023</year>
        </date>
      </history>
      <copyright-statement>©Pasin Tangadulrat, Supinya Sono, Boonsin Tangtrakulwanich. Originally published in JMIR Medical Education (https://mededu.jmir.org), 22.12.2023.</copyright-statement>
      <copyright-year>2023</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Medical Education, is properly cited. The complete bibliographic information, a link to the original publication on https://mededu.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://mededu.jmir.org/2023/1/e50658" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>ChatGPT is a well-known large language model–based chatbot. It could be used in the medical field in many aspects. However, some physicians are still unfamiliar with ChatGPT and are concerned about its benefits and risks.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>We aim to evaluate the perception of physicians and medical students toward using ChatGPT in the medical field.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>A web-based questionnaire was sent to medical students, interns, residents, and attending staff with questions regarding their perception toward using ChatGPT in clinical practice and medical education. Participants were also asked to rate their perception of ChatGPT’s generated response about knee osteoarthritis.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>Participants included 124 medical students, 46 interns, 37 residents, and 32 attending staff. After reading ChatGPT’s response, 132 of the 239 (55.2%) participants had a positive rating about using ChatGPT for clinical practice. The proportion of positive answers was significantly lower in graduated physicians (48/115, 42%) compared with medical students (84/124, 68%; <italic>P</italic>&#60;.001). Participants listed a lack of a patient-specific treatment plan, updated evidence, and a language barrier as ChatGPT’s pitfalls. Regarding using ChatGPT for medical education, the proportion of positive responses was also significantly lower in graduate physicians (71/115, 62%) compared to medical students (103/124, 83.1%; <italic>P</italic>&#60;.001). Participants were concerned that ChatGPT’s response was too superficial, might lack scientific evidence, and might need expert verification.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>Medical students generally had a positive perception of using ChatGPT for guiding treatment and medical education, whereas graduated doctors were more cautious in this regard. Nonetheless, both medical students and graduated doctors positively perceived using ChatGPT for creating patient educational materials.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>ChatGPT</kwd>
        <kwd>AI</kwd>
        <kwd>artificial intelligence</kwd>
        <kwd>medical education</kwd>
        <kwd>medical students</kwd>
        <kwd>student</kwd>
        <kwd>students</kwd>
        <kwd>intern</kwd>
        <kwd>interns</kwd>
        <kwd>resident</kwd>
        <kwd>residents</kwd>
        <kwd>knee osteoarthritis</kwd>
        <kwd>survey</kwd>
        <kwd>surveys</kwd>
        <kwd>questionnaire</kwd>
        <kwd>questionnaires</kwd>
        <kwd>chatbot</kwd>
        <kwd>chatbots</kwd>
        <kwd>conversational agent</kwd>
        <kwd>conversational agents</kwd>
        <kwd>attitude</kwd>
        <kwd>attitudes</kwd>
        <kwd>opinion</kwd>
        <kwd>opinions</kwd>
        <kwd>perception</kwd>
        <kwd>perceptions</kwd>
        <kwd>perspective</kwd>
        <kwd>perspectives</kwd>
        <kwd>acceptance</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>Artificial intelligence (AI) is a new technology that has changed various industries, including medicine. AI refers to the development of computer systems capable of performing complex tasks that normally require human intelligence, such as understanding conversation, recognizing patterns or images, and making decisions. Traditionally, AI in medicine was used in areas such as medical imaging, diagnostics tests, and prediction tools. However, it evolved and became involved in other aspects of the medical field, for example, helping physicians gather patient data before the visit [<xref ref-type="bibr" rid="ref1">1</xref>].</p>
      <p>One of the most remarkable developments in AI is the advancement of large language models and natural language processing, which aim to facilitate the automatic analysis of language, mimicking human language understanding. ChatGPT is an application built based on large language models, namely, GPT-3.5 or GPT-4. This newly developed AI technology enables users to engage in interactive conversations and receive humanlike responses, thereby creating a more dynamic and engaging user experience [<xref ref-type="bibr" rid="ref2">2</xref>]. ChatGPT fascinates many people in a variety of fields. In the medical field, it has been used to help write manuscripts [<xref ref-type="bibr" rid="ref3">3</xref>-<xref ref-type="bibr" rid="ref5">5</xref>]. However, researchers were still concerned about the contents’ ethical consideration and validity [<xref ref-type="bibr" rid="ref6">6</xref>]. Many researchers have also evaluated ChatGPT for medical education, such as taking examinations and comparing the results to medical students [<xref ref-type="bibr" rid="ref7">7</xref>-<xref ref-type="bibr" rid="ref11">11</xref>]. The use of ChatGPT to help in the patient care process has also been reported [<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref13">13</xref>].</p>
      <p>The potential of using AI in the medical field, especially orthopedics, is promising. For example, deep learning AI has been used for detecting and classifying many orthopedic conditions, such as degenerative spinal conditions, rotator cuff injury, and implant loosening [<xref ref-type="bibr" rid="ref14">14</xref>-<xref ref-type="bibr" rid="ref16">16</xref>]. ChatGPT itself has been tested with the American Board of Orthopaedic Surgery Examination, but it cannot pass the exam [<xref ref-type="bibr" rid="ref17">17</xref>]. One of the challenges encountered in medical practice is the high volume of patients, which may sometimes prevent physicians from providing detailed information to patients. Given that ChatGPT is a language model focused on communication, it could help provide appropriate treatment plans and patient education.</p>
      <p>Therefore, we aim to investigate how medical students and practicing doctors perceive the use of ChatGPT in clinical settings and medical education. Additionally, we will explore whether there are differences in perception between medical students and doctors at various levels of experience regarding ChatGPT’s responses to a clinical question. We hypothesized that different levels of clinical experience would change participants’ perceptions of ChatGPT.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Ethical Considerations</title>
        <p>This study was approved by the institutional review board (REC.66-125-11-1) at the Faculty of Medicine, Prince of Songkla University.</p>
      </sec>
      <sec>
        <title>Study Design</title>
        <p>This was a cross-sectional study investigating the perceptions of medical students, interns, residents, and attending staff toward using an AI chatbot (ChatGPT) in clinical practice and medical education. Specifically, we asked participants to rate their opinions on the ChatGPT-generated treatment plan and advice using knee osteoarthritis as an example.</p>
      </sec>
      <sec>
        <title>Instrument</title>
        <p>We developed a web-based questionnaire. The first part inquired about participants’ demographic data, including age, sex, and status. The second part explored participants’ general experience and perception toward using an AI system in medicine. The responses for the second and third parts used a Likert-scale system with five levels: strongly agree, agree, neither agree nor disagree, disagree, and strongly disagree.</p>
        <p>The third part of the questionnaire explored the perception of the AI-generated response to a clinical question. We first gave ChatGPT (version 3.5) a question prompt: “Please act as a doctor and give me general knowledge, natural history and detailed treatment plan for a 65-year-old woman with knee osteoarthritis.” The response was shown in a questionnaire. We then asked participants to rate their perception of ChatGPT’s response validity, clinical reasoning, clinical application, and use as a patient education tool. Participants were asked if they could provide a better response than ChatGPT, and lastly, participants were asked to rate their perception of using ChatGPT’s response for medical education. In addition, we included open-ended questions for participants to express their opinions about the potential benefits and pitfalls of using ChatGPT for clinical practice and medical education.</p>
        <p>A pilot test using a developed questionnaire was performed with 20 participants as the pilot group. The Cronbach α for internal consistency was .86.</p>
      </sec>
      <sec>
        <title>Participant Recruitment</title>
        <p>The study was set in a university-affiliated teaching hospital. We recruited two groups of participants. The first group consisted of fifth-year medical students who had completed an orthopedics rotation. The second comprised graduated physicians of various levels, including interns, family medicine and orthopedic residents, and family medicine and orthopedic attendings. The questionnaire’s link was emailed according to the email list registered with the hospital.</p>
      </sec>
      <sec>
        <title>Data Analysis</title>
        <p>All participants’ responses were exported as an Excel file (Microsoft Corporation) from the Google Form website. It was then imported and analyzed using the R program (version 4.2.3; R Foundation for Statistical Computing). Strongly agree and agree responses were grouped as a positive perception. Neither agree nor disagree responses were categorized as a neutral perception. Disagree and strongly disagree were grouped as a negative perception. Answers to the open-ended question were reviewed and discussed between investigators. Data distribution patterns were examined by histogram and Shapiro-Wilk test. Normally distributed continuous data were presented as means (SDs) and tested with an independent <italic>t</italic> test. Nonnormally distributed continuous variables were presented as medians (IQRs) and were tested with the Mann-Whitney <italic>U</italic> test. Categorical data were presented with count and percentage and tested with the Fisher exact probability test. Statistical significance was set at <italic>P</italic>&#60;.05.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Overview</title>
        <p>We sent out 350 questionnaires and received 239 (68.2%) responses. A total of 124 of 185 (67%) medical students, 46 of 78 (59%) interns, 37 of 43 (86%) residents, and 32 of 44 (73%) attending staff responded. The median age of medical students, internists, residents, and attending staff were 23 (IQR 22-24), 25 (IQR 25-26), 29 (IQR 27-31), and 38 (IQR 35-47) years, respectively. Of the 239 respondents, 132 (55%) were female. Female respondents made up 79 of 124 (64%) medical students, 24 of 46 (52%) interns, 16 of 37 (43%) residents, and 13 of 32 (41%) attending staff.</p>
        <p>Only 9 of the 239 (4%) respondents stated that they did not know about the concept of AI. When asked whether they used AI in their daily life, we found that 113 (47%) respondents rarely used it. Respondents who answered that they often used AI and who answered that they sometimes used AI were equal (n=39, 16%). Of the 239 respondents, 28 (12%) never used AI, and only 20 (8%) used AI regularly.</p>
        <p>We specified the question further and inquired about the experience using an AI chatbot or ChatGPT in the medical field. Of the 239 respondents, 158 (66.1%) had never heard of AI in medicine or heard of it but never used it (<xref ref-type="table" rid="table1">Table 1</xref>). Even though there was a higher percentage of attending staff (13/32, 41%) and residents (10/37, 27%) who had never heard of AI chatbots or ChatGPT compared to interns (10/46, 22%) and medical students (18/124, 15%), the proportion of answers tested by Fisher exact test did not differ significantly between groups (<italic>P</italic>=.07).</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>What is your experience using an AI chatbot or ChatGPT in the medical field?</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="230"/>
            <col width="150"/>
            <col width="170"/>
            <col width="130"/>
            <col width="160"/>
            <col width="160"/>
            <thead>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Use regularly, n (%)</td>
                <td>Use sometimes, n (%)</td>
                <td>Use rarely, n (%)</td>
                <td>Heard of it but never use, n (%)</td>
                <td>Never heard of it, n (%)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Medical student (n=124)</td>
                <td>5 (4.0)</td>
                <td>15 (12.1)</td>
                <td>27 (21.8)</td>
                <td>59 (47.6)</td>
                <td>18 (14.5)</td>
              </tr>
              <tr valign="top">
                <td>Intern (n=46)</td>
                <td>2 (4.4)</td>
                <td>7 (15.2)</td>
                <td>8 (17.4)</td>
                <td>19 (41.3)</td>
                <td>10 (21.7)</td>
              </tr>
              <tr valign="top">
                <td>Resident (n=37)</td>
                <td>5 (13.5)</td>
                <td>1 (2.7)</td>
                <td>4 (10.8)</td>
                <td>17 (46.0)</td>
                <td>10 (27.0)</td>
              </tr>
              <tr valign="top">
                <td>Staff (n=32)</td>
                <td>1 (3.1)</td>
                <td>2 (6.3)</td>
                <td>4 (12.5)</td>
                <td>12 (37.5)</td>
                <td>13 (40.6)</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <p>Next, we evaluated respondents’ perceptions toward AI chatbots or ChatGPT use in clinical settings (part A of <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). We found that a lower proportion of attending staff (16/32, 50%) and residents (20/37, 54%) had a positive perception toward the use of ChatGPT for clinical practice when compared to medical students (94/124, 76%) and interns (32/46, 70%). The difference between groups did not reach statistical significance (Fisher exact test <italic>P</italic>=.06). One attending who disagreed with using ChatGPT for clinical practice commented that patients prefer human interaction over a computer program. When asked whether ChatGPT could benefit medical education, most respondents had a positive perception (part B of <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>), with no significant difference between groups (<italic>P</italic>=.46).</p>
        <p>Participants were asked to rate whether they agreed with the statement regarding the response from ChatGPT about treatment and patient education for knee osteoarthritis. We found that most participants agreed that the response from ChatGPT was valid and well reasoned (part A in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>). The proportion of responses did not differ significantly between medical students, interns, residents, and attending staff (Fisher exact test <italic>P</italic>=.24). However, when asked whether they agreed that the responses were useful for clinical application, there was a statistical difference between the responses of each group (Fisher exact test <italic>P</italic>&#60;.001). While medical students mostly agreed that it could be used in clinical practice, some attending staff, residents, and interns disagreed (part B in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>). The result shows that some participants changed their minds after reading ChatGPT’s response. Of the 162 participants who felt positive toward using ChatGPT for patient care (part A of <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>), only 99 (61%) kept the same answer, while 54 (33%) changed to neutral and 9 (6%) changed to negative (part B in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>).</p>
        <p>Most participants agreed that the response from ChatGPT could be used to make educational media for patients (part C in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>). The answer did not differ significantly between groups (Fisher exact test <italic>P</italic>=.83). When asked whether the participant could give a better treatment plan and patient education compared to the response from ChatGPT, we found a significant difference in answers between groups (Fisher exact test <italic>P</italic>&#60;.001). While most medical students neither agreed nor disagreed with the statement, most residents and attending staff felt they could formulate a better treatment plan and give better advice (part D in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>). Interestingly, some interns even rated ChatGPT’s response better than theirs. They explained that they could not provide advice as comprehensive as ChatGPT due to the time limit for each patient visit.</p>
        <p>Lastly, we asked if the participants agreed that the responses from ChatGPT could be used as educational materials for medical students. Most medical students and residents agreed with the statement, but only about half of the attending staff and interns agreed. Of the 32 attending staff, 4 (13%) disagreed with the statement (part E in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>). The proportional difference in answers between participant groups was statistically significant by Fisher exact test (<italic>P</italic>&#60;.001).</p>
        <p>A total of 32 participants gave additional comments about ChatGPT use for clinical practice and medical education. These responses could be categorized as the potential benefits, limitations, and pitfalls of using ChatGPT.</p>
      </sec>
      <sec>
        <title>ChatGPT in Medical Education</title>
        <sec>
          <title>Potential Benefits</title>
          <p>Some medical students commented that the responses generated could be used to prepare for the objective structured clinical examination (OSCE), especially for the question that asks the student to give advice and a general treatment plan. Some attending staff and residents stated that it could be used to review and conceptualize the understanding of each disease.</p>
        </sec>
        <sec>
          <title>Limitations and Pitfalls</title>
          <p>Medical students did not give any comment regarding limitations. However, there were many concerns from attending staff, residents, and interns. Many respondents felt that the response generated by ChatGPT was superficial and too general. They believed that medical students should pursue a deeper understanding of the disease.</p>
          <p>Several participants also commented that the knowledge, even though it is valid, may lack proper supporting scientific evidence, and medical students should learn to acquire and evaluate new knowledge from standard and trustworthy sources. The reliability of the answers was another concerning point. Respondents still doubted whether ChatGPT could produce a valid response for all diseases. One attending staff who disagreed about using ChatGPT for medical education stated that the lack of content verification by experts was another major concern.</p>
        </sec>
      </sec>
      <sec>
        <title>ChatGPT in Clinical Practice</title>
        <sec>
          <title>Potential Benefits</title>
          <p>The majority of respondents agreed that the answers from ChatGPT are suitable for general treatment planning. Many also stated that the answer could be used as a template for making patient education media.</p>
        </sec>
        <sec>
          <title>Limitations and Pitfalls</title>
          <p>Respondents raised several limitations. First, the treatment plan was too generalized and may not be suitable for different patients. They also stated that physicians need to make an individualized treatment plan for each patient according to many factors, such as disease severity, lifestyle, and patient expectations. Second, respondents were also concerned about whether the AI could provide up-to-date treatment information and suggested that physicians must regularly update their knowledge from trustworthy sources. Third, many worried about the language barrier. ChatGPT was created using English as the primary language. The meaning and correctness must be re-evaluated when the information is translated to make patient education media. Lastly, almost all respondents were concerned about data bias. ChatGPT was trained from massive internet data; however, the sources were not always from an appropriate scientific database. Therefore, the resulting answer may not be correct.</p>
        </sec>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <p>This study reflected how medical students and various levels of physicians felt about medical answers from ChatGPT and its applications. We found that participants with different clinical experience levels had different perceptions toward ChatGPT’s use for clinical practice and medical education. Medical students generally had a positive perception, while practicing physicians were more neutral.</p>
      <p>For clinical practice, a higher proportion of attending staff and residents disagreed with using ChatGPT. While medical students were satisfied with responses that followed textbooks and sounded authentic, more experienced physicians could detect the pitfalls of the responses. They had shared their concerns, which had both supporting and conflicting literature.</p>
      <p>The first concern was the lack of patient-specific treatment plans. ChatGPT seemed to provide accurate and reproducible advice for general knowledge. For example, bariatric surgeons rated responses of ChatGPT as “comprehensive” for 86.8% of the questions asked [<xref ref-type="bibr" rid="ref18">18</xref>]. Gastroenterologists also rated ChatGPT’s response to common patient questions with a score of 3.9 (SD 0.8), 3.9 (SD 0.9), and 3.3 (SD 0.9) out of 5 for accuracy, clarity, and efficacy, respectively [<xref ref-type="bibr" rid="ref12">12</xref>]. It could provide a well-structured and comprehensive response to common breast augmentation surgery questions [<xref ref-type="bibr" rid="ref19">19</xref>]. The responses to common questions about retinal detachments were rated appropriate in 80%-90% of the questions asked [<xref ref-type="bibr" rid="ref20">20</xref>]. However, patient-specific conditions should also be included in treatment planning. The most appropriate treatment method selection may need clinical reasoning and experience. Therefore, ChatGPT’s answer could be used as a general outline for treatment, but currently, it could not replace a physician’s clinical reasoning and judgment. If the model is further explicitly trained for some medical conditions, it might be able to provide more specific treatment recommendations.</p>
      <p>Another concern about using ChatGPT in clinical practice was its evidence-based element. It seemed that ChatGPT gathered resources from reasonably reliable sources. For example, in responding to public health questions, 91% of the answers given were determined to be based on evidence [<xref ref-type="bibr" rid="ref21">21</xref>]. However, there were reports of ChatGPT citing nonexistent publications when asked [<xref ref-type="bibr" rid="ref22">22</xref>]. Data validity was another point of concern. Due to increasing numbers of publications and emerging predatory publishers, ChatGPT might have relied on references that it deemed valid but were, in fact, fraudulent. Therefore, physicians may still have advantages over AI because they can assess and choose the most valid, reliable, and up-to-date knowledge for their clinical practice.</p>
      <p>Most participants agreed that ChatGPT could be used for patient education. Some research also supported this opinion. ChatGPT had the potential to be used as a diabetic educator [<xref ref-type="bibr" rid="ref23">23</xref>]. It could also provide an effective diet plan for people with food allergies, albeit with minor errors [<xref ref-type="bibr" rid="ref13">13</xref>]. ChatGPT correctly answered 61% of basic public medical consultations, but only 39% of questions asked by health care personnel were correctly answered [<xref ref-type="bibr" rid="ref24">24</xref>]. It seemed that for general medical questions, ChatGPT could generate appropriate advice. However, for more specific topics, the development of a dedicated chatbot might be more beneficial. For example, the SnehAI chatbot was developed to educate adolescents in India about sexual health and showed promising results [<xref ref-type="bibr" rid="ref25">25</xref>]. Another chatbot, “VIRA,” was created to communicate and ensure COVID-19 vaccine safety with young adults and minority populations [<xref ref-type="bibr" rid="ref26">26</xref>].</p>
      <p>In medical education, ChatGPT could be used in various aspects [<xref ref-type="bibr" rid="ref27">27</xref>]. Using ChatGPT for preparing for OSCE and other exams was mentioned by participants and in the literature [<xref ref-type="bibr" rid="ref28">28</xref>]. For OSCE, it could help by generating example scenarios, suggesting a proper physical examination, and giving appropriate medical advice. Surprisingly, it could score even higher than humans for a virtual OSCE in obstetrics and gynecology [<xref ref-type="bibr" rid="ref29">29</xref>]. However, it should be noted that ChatGPT responses were compared to only two human candidates and might not represent the whole picture. For multiple-choice question examinations, ChatGPT could answer some questions correctly and give explanations with acceptable insights and reasoning. However, the results of using ChatGPT were quite varied, from passing the exam to failing some [<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref30">30</xref>-<xref ref-type="bibr" rid="ref33">33</xref>]. When explored in detail, the passing score of ChatGPT in most tests was at average or slightly above minimal passing level. Therefore, it supported the fact that many attending staff and residents felt that the response by ChatGPT was superficial and did not show a deep understanding of the topic. For more advanced examination levels, such as resident-level examinations, ChatGPT performed more poorly [<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref35">35</xref>]. For example, ChatGPT’s score in the plastic surgery in-training examination was ranked at the 49th percentile compared with first-year residents but significantly worse than fifth- and sixth-year residents at the zeroth percentile [<xref ref-type="bibr" rid="ref9">9</xref>]. However, more recent research using an updated GPT-4 model capable of advanced reasoning and complex problem-solving showed remarkable results, and the GPT-4 model consistently outperforms GPT-3.5. GPT-4 was able to pass the Peruvian National Licensing Examination, the Japanese Medical Licensing Examination, German medical state examinations, and the Family Medicine Residency Progress Test with exceptional scores [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref36">36</xref>-<xref ref-type="bibr" rid="ref38">38</xref>].</p>
      <p>Our study tried to gather information from different levels of students and physicians and contrasted their results. We found that less experienced medical students might overlook some potential pitfalls of using ChatGPT in clinical practice and medical education. Even though there were many benefits of using ChatGPT, medical teachers needed to be aware of the risks and warn their medical students accordingly.</p>
      <p>The limitation of our study was that we used only one scenario of knee osteoarthritis. If there were more scenarios of other diseases, the perception might differ; however, we felt that knee osteoarthritis was a good representation of a condition commonly encountered by various levels of physicians and would generate a diverse response. Moreover, ChatGPT has been known to answer according to the prompt and may change its answer depending on how the question was asked. In our study, the question contained the “General knowledge” word, which might affect how the respondent rates the answer. The participants also came from one center, which could limit the generalizability of the results. Additionally, the response rate of 68.2% might indicate the selection bias toward people who were already interested in AI, therefore, boosting the positive perception toward ChatGPT. Furthermore, besides the limited representativeness of doctors and medical students within the survey setting, the omission of patient perspectives neglected the input of arguably the most crucial stakeholder in health care. Lastly, the latest ChatGPT model is GPT-4, which is more advanced and may be able to provide more detailed responses. However, the superiority of ChatGPT-4 compared to ChatGPT-3.5 has mainly been proven in a scenario of multiple-choice examinations.</p>
      <p>In conclusion, medical students generally had a positive perception of using ChatGPT for guiding treatment and medical education, whereas graduated doctors were more cautious in this regard. Nonetheless, both medical students and graduated doctors positively perceived using ChatGPT for creating patient educational materials.</p>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>(A) Perceptions toward using artificial intelligence (AI) chatbot for patient care. (B) Perception toward AI for medical education.</p>
        <media xlink:href="mededu_v9i1e50658_app1.png" xlink:title="PNG File , 188 KB"/>
      </supplementary-material>
      <supplementary-material id="app2">
        <label>Multimedia Appendix 2</label>
        <p>(A) Perception toward validity and clinical reasoning of ChatGPT's response. (B) Perception toward using ChatGPT's response in clinical practice. (C) Perception toward using ChatGPT's response for patient education material. (D) Perception of self-advice compared to ChatGPT. (E) Perception toward using ChatGPT's response for medical education.</p>
        <media xlink:href="mededu_v9i1e50658_app2.png" xlink:title="PNG File , 201 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">OSCE</term>
          <def>
            <p>objective structured clinical examination</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Xie</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ye</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Ma</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Investigating patients' continuance intention toward conversational agents in outpatient departments: cross-sectional field survey</article-title>
          <source>J Med Internet Res</source>
          <year>2022</year>
          <month>11</month>
          <day>07</day>
          <volume>24</volume>
          <issue>11</issue>
          <fpage>e40681</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2022/11/e40681/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/40681</pub-id>
          <pub-id pub-id-type="medline">36342768</pub-id>
          <pub-id pub-id-type="pii">v24i11e40681</pub-id>
          <pub-id pub-id-type="pmcid">PMC9679947</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Thirunavukarasu</surname>
              <given-names>AJ</given-names>
            </name>
            <name name-style="western">
              <surname>Ting</surname>
              <given-names>DSJ</given-names>
            </name>
            <name name-style="western">
              <surname>Elangovan</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Gutierrez</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Tan</surname>
              <given-names>TF</given-names>
            </name>
            <name name-style="western">
              <surname>Ting</surname>
              <given-names>DSW</given-names>
            </name>
          </person-group>
          <article-title>Large language models in medicine</article-title>
          <source>Nat Med</source>
          <year>2023</year>
          <month>08</month>
          <volume>29</volume>
          <issue>8</issue>
          <fpage>1930</fpage>
          <lpage>1940</lpage>
          <pub-id pub-id-type="doi">10.1038/s41591-023-02448-8</pub-id>
          <pub-id pub-id-type="medline">37460753</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41591-023-02448-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dergaa</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Chamari</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Zmijewski</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Ben Saad</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>From human writing to artificial intelligence generated text: examining the prospects and potential threats of ChatGPT in academic writing</article-title>
          <source>Biol Sport</source>
          <year>2023</year>
          <month>04</month>
          <volume>40</volume>
          <issue>2</issue>
          <fpage>615</fpage>
          <lpage>622</lpage>
          <pub-id pub-id-type="doi">10.5114/biolsport.2023.125623</pub-id>
          <pub-id pub-id-type="medline">37077800</pub-id>
          <pub-id pub-id-type="pii">125623</pub-id>
          <pub-id pub-id-type="pmcid">PMC10108763</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Švab</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Klemenc-Ketiš</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Zupanič</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>New challenges in scientific publications: referencing, artificial intelligence and ChatGPT</article-title>
          <source>Zdr Varst</source>
          <year>2023</year>
          <month>09</month>
          <volume>62</volume>
          <issue>3</issue>
          <fpage>109</fpage>
          <lpage>112</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37327133"/>
          </comment>
          <pub-id pub-id-type="doi">10.2478/sjph-2023-0015</pub-id>
          <pub-id pub-id-type="medline">37327133</pub-id>
          <pub-id pub-id-type="pii">sjph-2023-0015</pub-id>
          <pub-id pub-id-type="pmcid">PMC10263368</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Salimi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Saheb</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Large language models in ophthalmology scientific writing: ethical considerations blurred lines or not at all?</article-title>
          <source>Am J Ophthalmol</source>
          <year>2023</year>
          <month>10</month>
          <volume>254</volume>
          <fpage>177</fpage>
          <lpage>181</lpage>
          <pub-id pub-id-type="doi">10.1016/j.ajo.2023.06.004</pub-id>
          <pub-id pub-id-type="medline">37348667</pub-id>
          <pub-id pub-id-type="pii">S0002-9394(23)00237-4</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Májovský</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Černý</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kasal</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Komarc</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Netuka</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence can generate fraudulent but authentic-looking scientific medical articles: Pandora's box has been opened</article-title>
          <source>J Med Internet Res</source>
          <year>2023</year>
          <month>05</month>
          <day>31</day>
          <volume>25</volume>
          <fpage>e46924</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2023//e46924/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/46924</pub-id>
          <pub-id pub-id-type="medline">37256685</pub-id>
          <pub-id pub-id-type="pii">v25i1e46924</pub-id>
          <pub-id pub-id-type="pmcid">PMC10267787</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hoch</surname>
              <given-names>CC</given-names>
            </name>
            <name name-style="western">
              <surname>Wollenberg</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Lüers</surname>
              <given-names>JC</given-names>
            </name>
            <name name-style="western">
              <surname>Knoedler</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Knoedler</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Frank</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Cotofana</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Alfertshofer</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT's quiz skills in different otolaryngology subspecialties: an analysis of 2576 single-choice and multiple-choice board certification preparation questions</article-title>
          <source>Eur Arch Otorhinolaryngol</source>
          <year>2023</year>
          <month>09</month>
          <volume>280</volume>
          <issue>9</issue>
          <fpage>4271</fpage>
          <lpage>4278</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37285018"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s00405-023-08051-4</pub-id>
          <pub-id pub-id-type="medline">37285018</pub-id>
          <pub-id pub-id-type="pii">10.1007/s00405-023-08051-4</pub-id>
          <pub-id pub-id-type="pmcid">PMC10382366</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Huh</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Are ChatGPT’s knowledge and interpretation ability comparable to those of medical students in Korea for taking a parasitology examination?: a descriptive study</article-title>
          <source>J Educ Eval Health Prof</source>
          <year>2023</year>
          <volume>20</volume>
          <fpage>1</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36627845"/>
          </comment>
          <pub-id pub-id-type="doi">10.3352/jeehp.2023.20.1</pub-id>
          <pub-id pub-id-type="medline">36627845</pub-id>
          <pub-id pub-id-type="pii">jeehp.2023.20.1</pub-id>
          <pub-id pub-id-type="pmcid">PMC9905868</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Humar</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Asaad</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Bengur</surname>
              <given-names>FB</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT is equivalent to first-year plastic surgery residents: evaluation of ChatGPT on the plastic surgery in-service examination</article-title>
          <source>Aesthet Surg J</source>
          <year>2023</year>
          <month>11</month>
          <day>16</day>
          <volume>43</volume>
          <issue>12</issue>
          <fpage>NP1085</fpage>
          <lpage>NP1089</lpage>
          <pub-id pub-id-type="doi">10.1093/asj/sjad130</pub-id>
          <pub-id pub-id-type="medline">37140001</pub-id>
          <pub-id pub-id-type="pii">7151262</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kung</surname>
              <given-names>TH</given-names>
            </name>
            <name name-style="western">
              <surname>Cheatham</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Medenilla</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Sillos</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>De Leon</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Elepaño</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Madriaga</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Aggabao</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Diaz-Candido</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Maningo</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Tseng</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>Performance of ChatGPT on USMLE: potential for AI-assisted medical education using large language models</article-title>
          <source>PLOS Digit Health</source>
          <year>2023</year>
          <month>02</month>
          <volume>2</volume>
          <issue>2</issue>
          <fpage>e0000198</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36812645"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pdig.0000198</pub-id>
          <pub-id pub-id-type="medline">36812645</pub-id>
          <pub-id pub-id-type="pii">PDIG-D-22-00371</pub-id>
          <pub-id pub-id-type="pmcid">PMC9931230</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Flores-Cohaila</surname>
              <given-names>JA</given-names>
            </name>
            <name name-style="western">
              <surname>García-Vicente</surname>
              <given-names>Abigaíl</given-names>
            </name>
            <name name-style="western">
              <surname>Vizcarra-Jiménez</surname>
              <given-names>Sonia F</given-names>
            </name>
            <name name-style="western">
              <surname>De la Cruz-Galán</surname>
              <given-names>Janith P</given-names>
            </name>
            <name name-style="western">
              <surname>Gutiérrez-Arratia</surname>
              <given-names>Jesús D</given-names>
            </name>
            <name name-style="western">
              <surname>Quiroga Torres</surname>
              <given-names>BG</given-names>
            </name>
            <name name-style="western">
              <surname>Taype-Rondan</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Performance of ChatGPT on the Peruvian National Licensing Medical Examination: Cross-Sectional Study</article-title>
          <source>JMIR Med Educ</source>
          <year>2023</year>
          <month>09</month>
          <day>28</day>
          <volume>9</volume>
          <fpage>e48039</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mededu.jmir.org/2023//e48039/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/48039</pub-id>
          <pub-id pub-id-type="medline">37768724</pub-id>
          <pub-id pub-id-type="pii">v9i1e48039</pub-id>
          <pub-id pub-id-type="pmcid">PMC10570896</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lahat</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Shachar</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Avidan</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Glicksberg</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Klang</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Evaluating the utility of a large language model in answering common patients' gastrointestinal health-related questions: are we there yet?</article-title>
          <source>Diagnostics (Basel)</source>
          <year>2023</year>
          <month>06</month>
          <day>02</day>
          <volume>13</volume>
          <issue>11</issue>
          <fpage>1950</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=diagnostics13111950"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/diagnostics13111950</pub-id>
          <pub-id pub-id-type="medline">37296802</pub-id>
          <pub-id pub-id-type="pii">diagnostics13111950</pub-id>
          <pub-id pub-id-type="pmcid">PMC10252924</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Niszczota</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Rybicka</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>The credibility of dietary advice formulated by ChatGPT: robo-diets for people with food allergies</article-title>
          <source>Nutrition</source>
          <year>2023</year>
          <month>08</month>
          <volume>112</volume>
          <fpage>112076</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0899-9007(23)00105-3"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.nut.2023.112076</pub-id>
          <pub-id pub-id-type="medline">37269717</pub-id>
          <pub-id pub-id-type="pii">S0899-9007(23)00105-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Coppock</surname>
              <given-names>JA</given-names>
            </name>
            <name name-style="western">
              <surname>Zimmer</surname>
              <given-names>NE</given-names>
            </name>
            <name name-style="western">
              <surname>Spritzer</surname>
              <given-names>CE</given-names>
            </name>
            <name name-style="western">
              <surname>Goode</surname>
              <given-names>AP</given-names>
            </name>
            <name name-style="western">
              <surname>DeFrate</surname>
              <given-names>LE</given-names>
            </name>
          </person-group>
          <article-title>Automated segmentation and prediction of intervertebral disc morphology and uniaxial deformations from MRI</article-title>
          <source>Osteoarthr Cartil Open</source>
          <year>2023</year>
          <month>09</month>
          <volume>5</volume>
          <issue>3</issue>
          <fpage>100378</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S2665-9131(23)00045-6"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.ocarto.2023.100378</pub-id>
          <pub-id pub-id-type="medline">37388644</pub-id>
          <pub-id pub-id-type="pii">S2665-9131(23)00045-6</pub-id>
          <pub-id pub-id-type="pmcid">PMC10302207</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Benhenneda</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Brouard</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Charousset</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Berhouet</surname>
              <given-names>J</given-names>
            </name>
            <collab>Francophone Arthroscopy Society (SFA)</collab>
          </person-group>
          <article-title>Can artificial intelligence help decision-making in arthroscopy? Part 2: The IA-RTRHO model - a decision-making aid for long head of the biceps diagnoses in small rotator cuff tears</article-title>
          <source>Orthop Traumatol Surg Res</source>
          <year>2023</year>
          <month>12</month>
          <volume>109</volume>
          <issue>8S</issue>
          <fpage>103652</fpage>
          <pub-id pub-id-type="doi">10.1016/j.otsr.2023.103652</pub-id>
          <pub-id pub-id-type="medline">37380127</pub-id>
          <pub-id pub-id-type="pii">S1877-0568(23)00143-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Cho</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hur</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>In</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Machine learning for detecting total knee arthroplasty implant loosening on plain radiographs</article-title>
          <source>Bioengineering (Basel)</source>
          <year>2023</year>
          <month>05</month>
          <day>23</day>
          <volume>10</volume>
          <issue>6</issue>
          <fpage>632</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=bioengineering10060632"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/bioengineering10060632</pub-id>
          <pub-id pub-id-type="medline">37370563</pub-id>
          <pub-id pub-id-type="pii">bioengineering10060632</pub-id>
          <pub-id pub-id-type="pmcid">PMC10295184</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lum</surname>
              <given-names>ZC</given-names>
            </name>
          </person-group>
          <article-title>Can artificial intelligence pass the American Board of Orthopaedic Surgery Examination? Orthopaedic residents versus ChatGPT</article-title>
          <source>Clin Orthop Relat Res</source>
          <year>2023</year>
          <month>08</month>
          <day>01</day>
          <volume>481</volume>
          <issue>8</issue>
          <fpage>1623</fpage>
          <lpage>1630</lpage>
          <pub-id pub-id-type="doi">10.1097/CORR.0000000000002704</pub-id>
          <pub-id pub-id-type="medline">37220190</pub-id>
          <pub-id pub-id-type="pii">00003086-990000000-01207</pub-id>
          <pub-id pub-id-type="pmcid">PMC10344569</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Samaan</surname>
              <given-names>JS</given-names>
            </name>
            <name name-style="western">
              <surname>Yeo</surname>
              <given-names>YH</given-names>
            </name>
            <name name-style="western">
              <surname>Rajeev</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Hawley</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Abel</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ng</surname>
              <given-names>WH</given-names>
            </name>
            <name name-style="western">
              <surname>Srinivasan</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Burch</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Watson</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Liran</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Samakar</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Assessing the accuracy of responses by the language model ChatGPT to questions regarding bariatric surgery</article-title>
          <source>Obes Surg</source>
          <year>2023</year>
          <month>06</month>
          <volume>33</volume>
          <issue>6</issue>
          <fpage>1790</fpage>
          <lpage>1796</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37106269"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s11695-023-06603-5</pub-id>
          <pub-id pub-id-type="medline">37106269</pub-id>
          <pub-id pub-id-type="pii">10.1007/s11695-023-06603-5</pub-id>
          <pub-id pub-id-type="pmcid">PMC10234918</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Seth</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Cox</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Xie</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Bulloch</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Hunter-Smith</surname>
              <given-names>DJ</given-names>
            </name>
            <name name-style="western">
              <surname>Rozen</surname>
              <given-names>WM</given-names>
            </name>
            <name name-style="western">
              <surname>Ross</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Evaluating Chatbot efficacy for answering frequently asked questions in plastic surgery: a ChatGPT case study focused on breast augmentation</article-title>
          <source>Aesthet Surg J</source>
          <year>2023</year>
          <month>09</month>
          <day>14</day>
          <volume>43</volume>
          <issue>10</issue>
          <fpage>1126</fpage>
          <lpage>1135</lpage>
          <pub-id pub-id-type="doi">10.1093/asj/sjad140</pub-id>
          <pub-id pub-id-type="medline">37158147</pub-id>
          <pub-id pub-id-type="pii">7157259</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Momenaei</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Wakabayashi</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Shahlaee</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Durrani</surname>
              <given-names>AF</given-names>
            </name>
            <name name-style="western">
              <surname>Pandit</surname>
              <given-names>SA</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Mansour</surname>
              <given-names>HA</given-names>
            </name>
            <name name-style="western">
              <surname>Abishek</surname>
              <given-names>RM</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Sridhar</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Yonekawa</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Kuriyan</surname>
              <given-names>AE</given-names>
            </name>
          </person-group>
          <article-title>Appropriateness and readability of ChatGPT-4-generated responses for surgical treatment of retinal diseases</article-title>
          <source>Ophthalmol Retina</source>
          <year>2023</year>
          <month>10</month>
          <volume>7</volume>
          <issue>10</issue>
          <fpage>862</fpage>
          <lpage>868</lpage>
          <pub-id pub-id-type="doi">10.1016/j.oret.2023.05.022</pub-id>
          <pub-id pub-id-type="medline">37277096</pub-id>
          <pub-id pub-id-type="pii">S2468-6530(23)00246-4</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ayers</surname>
              <given-names>JW</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Poliak</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Leas</surname>
              <given-names>EC</given-names>
            </name>
            <name name-style="western">
              <surname>Dredze</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Hogarth</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Smith</surname>
              <given-names>DM</given-names>
            </name>
          </person-group>
          <article-title>Evaluating artificial intelligence responses to public health questions</article-title>
          <source>JAMA Netw Open</source>
          <year>2023</year>
          <month>06</month>
          <day>01</day>
          <volume>6</volume>
          <issue>6</issue>
          <fpage>e2317517</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37285160"/>
          </comment>
          <pub-id pub-id-type="doi">10.1001/jamanetworkopen.2023.17517</pub-id>
          <pub-id pub-id-type="medline">37285160</pub-id>
          <pub-id pub-id-type="pii">2805756</pub-id>
          <pub-id pub-id-type="pmcid">PMC10248742</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hueber</surname>
              <given-names>AJ</given-names>
            </name>
            <name name-style="western">
              <surname>Kleyer</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Quality of citation data using the natural language processing tool ChatGPT in rheumatology: creation of false references</article-title>
          <source>RMD Open</source>
          <year>2023</year>
          <month>06</month>
          <volume>9</volume>
          <issue>2</issue>
          <fpage>e003248</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://rmdopen.bmj.com/lookup/pmidlookup?view=long&#38;pmid=37286300"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/rmdopen-2023-003248</pub-id>
          <pub-id pub-id-type="medline">37286300</pub-id>
          <pub-id pub-id-type="pii">rmdopen-2023-003248</pub-id>
          <pub-id pub-id-type="pmcid">PMC10254965</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sharma</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Pajai</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Prasad</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Wanjari</surname>
              <given-names>MB</given-names>
            </name>
            <name name-style="western">
              <surname>Munjewar</surname>
              <given-names>PK</given-names>
            </name>
            <name name-style="western">
              <surname>Sharma</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Pathade</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>A Critical Review of ChatGPT as a Potential Substitute for Diabetes Educators</article-title>
          <source>Cureus</source>
          <year>2023</year>
          <month>05</month>
          <volume>15</volume>
          <issue>5</issue>
          <fpage>e38380</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37265899"/>
          </comment>
          <pub-id pub-id-type="doi">10.7759/cureus.38380</pub-id>
          <pub-id pub-id-type="medline">37265899</pub-id>
          <pub-id pub-id-type="pmcid">PMC10231273</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hsu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Hsu</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Hou</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Hsieh</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Examining real-world medication consultations and drug-herb interactions: ChatGPT performance evaluation</article-title>
          <source>JMIR Med Educ</source>
          <year>2023</year>
          <month>08</month>
          <day>21</day>
          <volume>9</volume>
          <fpage>e48433</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mededu.jmir.org/2023//e48433/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/48433</pub-id>
          <pub-id pub-id-type="medline">37561097</pub-id>
          <pub-id pub-id-type="pii">v9i1e48433</pub-id>
          <pub-id pub-id-type="pmcid">PMC10477918</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Gupta</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Singhal</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Muttreja</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Singh</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sharma</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Piterova</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>An artificial intelligence chatbot for young people's sexual and reproductive health in India (SnehAI): instrumental case study</article-title>
          <source>J Med Internet Res</source>
          <year>2022</year>
          <month>01</month>
          <day>03</day>
          <volume>24</volume>
          <issue>1</issue>
          <fpage>e29969</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2022/1/e29969/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/29969</pub-id>
          <pub-id pub-id-type="medline">34982034</pub-id>
          <pub-id pub-id-type="pii">v24i1e29969</pub-id>
          <pub-id pub-id-type="pmcid">PMC8764609</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Weeks</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Cooper</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Sangha</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Sedoc</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>White</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Toledo</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Gretz</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lahav</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Martin</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Michel</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>JH</given-names>
            </name>
            <name name-style="western">
              <surname>Slonim</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Bar-Zeev</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Chatbot-Delivered COVID-19 Vaccine Communication Message Preferences of Young Adults and Public Health Workers in Urban American Communities: Qualitative Study</article-title>
          <source>J Med Internet Res</source>
          <year>2022</year>
          <month>07</month>
          <day>06</day>
          <volume>24</volume>
          <issue>7</issue>
          <fpage>e38418</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2022/7/e38418/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/38418</pub-id>
          <pub-id pub-id-type="medline">35737898</pub-id>
          <pub-id pub-id-type="pii">v24i7e38418</pub-id>
          <pub-id pub-id-type="pmcid">PMC9301547</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Abd-Alrazaq</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>AlSaad</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Alhuwail</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Ahmed</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Healy</surname>
              <given-names>PM</given-names>
            </name>
            <name name-style="western">
              <surname>Latifi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Aziz</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Damseh</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Alabed Alrazak</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sheikh</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Large Language Models in Medical Education: Opportunities, Challenges, and Future Directions</article-title>
          <source>JMIR Med Educ</source>
          <year>2023</year>
          <month>06</month>
          <day>01</day>
          <volume>9</volume>
          <fpage>e48291</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mededu.jmir.org/2023//e48291/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/48291</pub-id>
          <pub-id pub-id-type="medline">37261894</pub-id>
          <pub-id pub-id-type="pii">v9i1e48291</pub-id>
          <pub-id pub-id-type="pmcid">PMC10273039</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tsang</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Practical Applications of ChatGPT in Undergraduate Medical Education</article-title>
          <source>J Med Educ Curric Dev</source>
          <year>2023</year>
          <volume>10</volume>
          <fpage>23821205231178449</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://journals.sagepub.com/doi/10.1177/23821205231178449?url_ver=Z39.88-2003&#38;rfr_id=ori:rid:crossref.org&#38;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/23821205231178449</pub-id>
          <pub-id pub-id-type="medline">37255525</pub-id>
          <pub-id pub-id-type="pii">10.1177_23821205231178449</pub-id>
          <pub-id pub-id-type="pmcid">PMC10226299</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>SW</given-names>
            </name>
            <name name-style="western">
              <surname>Kemp</surname>
              <given-names>MW</given-names>
            </name>
            <name name-style="western">
              <surname>Logan</surname>
              <given-names>SJS</given-names>
            </name>
            <name name-style="western">
              <surname>Dimri</surname>
              <given-names>PS</given-names>
            </name>
            <name name-style="western">
              <surname>Singh</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Mattar</surname>
              <given-names>CNZ</given-names>
            </name>
            <name name-style="western">
              <surname>Dashraath</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Ramlal</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Mahyuddin</surname>
              <given-names>AP</given-names>
            </name>
            <name name-style="western">
              <surname>Kanayan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Carter</surname>
              <given-names>SWD</given-names>
            </name>
            <name name-style="western">
              <surname>Thain</surname>
              <given-names>SPT</given-names>
            </name>
            <name name-style="western">
              <surname>Fee</surname>
              <given-names>EL</given-names>
            </name>
            <name name-style="western">
              <surname>Illanes</surname>
              <given-names>SE</given-names>
            </name>
            <name name-style="western">
              <surname>Choolani</surname>
              <given-names>MA</given-names>
            </name>
            <collab>National University of Singapore ObstetricsGynecology Artificial Intelligence (NUS OBGYN-AI) Collaborative Group</collab>
          </person-group>
          <article-title>ChatGPT outscored human candidates in a virtual objective structured clinical examination in obstetrics and gynecology</article-title>
          <source>Am J Obstet Gynecol</source>
          <year>2023</year>
          <month>08</month>
          <volume>229</volume>
          <issue>2</issue>
          <fpage>172.e1</fpage>
          <lpage>172.e12</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0002-9378(23)00251-X"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.ajog.2023.04.020</pub-id>
          <pub-id pub-id-type="medline">37088277</pub-id>
          <pub-id pub-id-type="pii">S0002-9378(23)00251-X</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Duong</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Solomon</surname>
              <given-names>BD</given-names>
            </name>
          </person-group>
          <article-title>Analysis of large-language model versus human performance for genetics questions</article-title>
          <source>Eur J Hum Genet</source>
          <year>2023</year>
          <month>05</month>
          <day>29</day>
          <fpage>1</fpage>
          <pub-id pub-id-type="doi">10.1038/s41431-023-01396-8</pub-id>
          <pub-id pub-id-type="medline">37246194</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41431-023-01396-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fijačko</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Gosak</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Štiglic</surname>
              <given-names>Gregor</given-names>
            </name>
            <name name-style="western">
              <surname>Picard</surname>
              <given-names>CT</given-names>
            </name>
            <name name-style="western">
              <surname>John Douma</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Can ChatGPT pass the life support exams without entering the American heart association course?</article-title>
          <source>Resuscitation</source>
          <year>2023</year>
          <month>04</month>
          <volume>185</volume>
          <fpage>109732</fpage>
          <pub-id pub-id-type="doi">10.1016/j.resuscitation.2023.109732</pub-id>
          <pub-id pub-id-type="medline">36775020</pub-id>
          <pub-id pub-id-type="pii">S0300-9572(23)00045-X</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gilson</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Safranek</surname>
              <given-names>CW</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Socrates</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Chi</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Taylor</surname>
              <given-names>RA</given-names>
            </name>
            <name name-style="western">
              <surname>Chartash</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>How Does ChatGPT Perform on the United States Medical Licensing Examination? The Implications of Large Language Models for Medical Education and Knowledge Assessment</article-title>
          <source>JMIR Med Educ</source>
          <year>2023</year>
          <month>02</month>
          <day>08</day>
          <volume>9</volume>
          <fpage>e45312</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mededu.jmir.org/2023//e45312/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/45312</pub-id>
          <pub-id pub-id-type="medline">36753318</pub-id>
          <pub-id pub-id-type="pii">v9i1e45312</pub-id>
          <pub-id pub-id-type="pmcid">PMC9947764</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Thirunavukarasu</surname>
              <given-names>AJ</given-names>
            </name>
            <name name-style="western">
              <surname>Hassan</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Mahmood</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sanghera</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Barzangi</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>El Mukashfi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Shah</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Trialling a Large Language Model (ChatGPT) in General Practice With the Applied Knowledge Test: Observational Study Demonstrating Opportunities and Limitations in Primary Care</article-title>
          <source>JMIR Med Educ</source>
          <year>2023</year>
          <month>04</month>
          <day>21</day>
          <volume>9</volume>
          <fpage>e46599</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mededu.jmir.org/2023//e46599/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/46599</pub-id>
          <pub-id pub-id-type="medline">37083633</pub-id>
          <pub-id pub-id-type="pii">v9i1e46599</pub-id>
          <pub-id pub-id-type="pmcid">PMC10163403</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Weng</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Hwang</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT failed Taiwan's Family Medicine Board Exam</article-title>
          <source>J Chin Med Assoc</source>
          <year>2023</year>
          <month>08</month>
          <day>01</day>
          <volume>86</volume>
          <issue>8</issue>
          <fpage>762</fpage>
          <lpage>766</lpage>
          <pub-id pub-id-type="doi">10.1097/JCMA.0000000000000946</pub-id>
          <pub-id pub-id-type="medline">37294147</pub-id>
          <pub-id pub-id-type="pii">02118582-990000000-00224</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Huynh</surname>
              <given-names>LM</given-names>
            </name>
            <name name-style="western">
              <surname>Bonebrake</surname>
              <given-names>BT</given-names>
            </name>
            <name name-style="western">
              <surname>Schultis</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Quach</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Deibert</surname>
              <given-names>CM</given-names>
            </name>
          </person-group>
          <article-title>New Artificial Intelligence ChatGPT Performs Poorly on the 2022 Self-assessment Study Program for Urology</article-title>
          <source>Urol Pract</source>
          <year>2023</year>
          <month>07</month>
          <volume>10</volume>
          <issue>4</issue>
          <fpage>409</fpage>
          <lpage>415</lpage>
          <pub-id pub-id-type="doi">10.1097/UPJ.0000000000000406</pub-id>
          <pub-id pub-id-type="medline">37276372</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>RS</given-names>
            </name>
            <name name-style="western">
              <surname>Lu</surname>
              <given-names>KJQ</given-names>
            </name>
            <name name-style="western">
              <surname>Meaney</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Kemppainen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Punnett</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Leung</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Assessment of Resident and AI Chatbot Performance on the University of Toronto Family Medicine Residency Progress Test: Comparative Study</article-title>
          <source>JMIR Med Educ</source>
          <year>2023</year>
          <month>09</month>
          <day>19</day>
          <volume>9</volume>
          <fpage>e50514</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mededu.jmir.org/2023//e50514/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/50514</pub-id>
          <pub-id pub-id-type="medline">37725411</pub-id>
          <pub-id pub-id-type="pii">v9i1e50514</pub-id>
          <pub-id pub-id-type="pmcid">PMC10548315</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Takagi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Watari</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Erabi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Sakaguchi</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Performance of GPT-3.5 and GPT-4 on the Japanese Medical Licensing Examination: comparison study</article-title>
          <source>JMIR Med Educ</source>
          <year>2023</year>
          <month>06</month>
          <day>29</day>
          <volume>9</volume>
          <fpage>e48002</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mededu.jmir.org/2023//e48002/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/48002</pub-id>
          <pub-id pub-id-type="medline">37384388</pub-id>
          <pub-id pub-id-type="pii">v9i1e48002</pub-id>
          <pub-id pub-id-type="pmcid">PMC10365615</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Roos</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kasapovic</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Jansen</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Kaczmarczyk</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Artificial Intelligence in Medical Education: Comparative Analysis of ChatGPT, Bing, and Medical Students in Germany</article-title>
          <source>JMIR Med Educ</source>
          <year>2023</year>
          <month>09</month>
          <day>04</day>
          <volume>9</volume>
          <fpage>e46482</fpage>
          <pub-id pub-id-type="doi">10.2196/46482</pub-id>
          <pub-id pub-id-type="medline">37665620</pub-id>
          <pub-id pub-id-type="pii">v9i1e46482</pub-id>
          <pub-id pub-id-type="pmcid">PMC10507517</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
