<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Med Educ</journal-id><journal-id journal-id-type="publisher-id">mededu</journal-id><journal-id journal-id-type="index">20</journal-id><journal-title>JMIR Medical Education</journal-title><abbrev-journal-title>JMIR Med Educ</abbrev-journal-title><issn pub-type="epub">2369-3762</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v10i1e52746</article-id><article-id pub-id-type="doi">10.2196/52746</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Performance of ChatGPT on Nursing Licensure Examinations in the United States and China: Cross-Sectional Study</article-title></title-group><contrib-group><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Wu</surname><given-names>Zelin</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Gan</surname><given-names>Wenyi</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Xue</surname><given-names>Zhaowen</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Ni</surname><given-names>Zhengxin</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Zheng</surname><given-names>Xiaofei</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Zhang</surname><given-names>Yiyi</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib></contrib-group><aff id="aff1"><institution>Department of Bone and Joint Surgery and Sports Medicine Center, The First Affiliated Hospital</institution>, <addr-line>Guangzhou</addr-line>, <country>China</country></aff><aff id="aff2"><institution>Department of Joint Surgery and Sports Medicine, Zhuhai People&#x2019;s Hospital</institution>, <addr-line>Zhuhai City</addr-line>, <country>China</country></aff><aff id="aff3"><institution>School of Nursing, Yangzhou University</institution>, <addr-line>Yangzhou</addr-line>, <country>China</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Cardoso</surname><given-names>Taiane de Azevedo</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Kabir</surname><given-names>Humayun</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Bojic</surname><given-names>Iva</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Beunza</surname><given-names>Juan-Jose</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Yiyi Zhang, PhD, Department of Bone and Joint Surgery and Sports Medicine Center, The First Affiliated Hospital, No.613, Huangpu Avenue West, Tianhe District, Guangzhou, 510630, China, 86 18002255355; <email>yiyizjun@126.com</email></corresp><fn fn-type="equal" id="equal-contrib1"><label>*</label><p>these authors contributed equally</p></fn></author-notes><pub-date pub-type="collection"><year>2024</year></pub-date><pub-date pub-type="epub"><day>3</day><month>10</month><year>2024</year></pub-date><volume>10</volume><elocation-id>e52746</elocation-id><history><date date-type="received"><day>14</day><month>09</month><year>2023</year></date><date date-type="rev-recd"><day>12</day><month>06</month><year>2024</year></date><date date-type="accepted"><day>15</day><month>06</month><year>2024</year></date></history><copyright-statement>&#x00A9; Zelin Wu, Wenyi Gan, Zhaowen Xue, Zhengxin Ni, Xiaofei Zheng, Yiyi Zhang. Originally published in JMIR Medical Education (<ext-link ext-link-type="uri" xlink:href="https://mededu.jmir.org">https://mededu.jmir.org</ext-link>), 3.10.2024. </copyright-statement><copyright-year>2024</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Medical Education, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://mededu.jmir.org/">https://mededu.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://mededu.jmir.org/2024/1/e52746"/><abstract><sec><title>Background</title><p>The creation of large language models (LLMs) such as ChatGPT is an important step in the development of artificial intelligence, which shows great potential in medical education due to its powerful language understanding and generative capabilities. The purpose of this study was to quantitatively evaluate and comprehensively analyze ChatGPT&#x2019;s performance in handling questions for the National Nursing Licensure Examination (NNLE) in China and the United States, including the National Council Licensure Examination for Registered Nurses (NCLEX-RN) and the NNLE.</p></sec><sec><title>Objective</title><p>This study aims to examine how well LLMs respond to the NCLEX-RN and the NNLE multiple-choice questions (MCQs) in various language inputs. To evaluate whether LLMs can be used as multilingual learning assistance for nursing, and to assess whether they possess a repository of professional knowledge applicable to clinical nursing practice.</p></sec><sec sec-type="methods"><title>Methods</title><p>First, we compiled 150 NCLEX-RN Practical MCQs, 240 NNLE Theoretical MCQs, and 240 NNLE Practical MCQs. Then, the translation function of ChatGPT 3.5 was used to translate NCLEX-RN questions from English to Chinese and NNLE questions from Chinese to English. Finally, the original version and the translated version of the MCQs were inputted into ChatGPT 4.0, ChatGPT 3.5, and Google Bard. Different LLMs were compared according to the accuracy rate, and the differences between different language inputs were compared.</p></sec><sec sec-type="results"><title>Results</title><p>The accuracy rates of ChatGPT 4.0 for NCLEX-RN practical questions and Chinese-translated NCLEX-RN practical questions were 88.7% (133/150) and 79.3% (119/150), respectively. Despite the statistical significance of the difference (<italic>P</italic>=.03), the correct rate was generally satisfactory. Around 71.9% (169/235) of NNLE Theoretical MCQs and 69.1% (161/233) of NNLE Practical MCQs were correctly answered by ChatGPT 4.0. The accuracy of ChatGPT 4.0 in processing NNLE Theoretical MCQs and NNLE Practical MCQs translated into English was 71.5% (168/235; <italic>P</italic>=.92) and 67.8% (158/233; <italic>P</italic>=.77), respectively, and there was no statistically significant difference between the results of text input in different languages. ChatGPT 3.5 (NCLEX-RN <italic>P</italic>=.003, NNLE Theoretical <italic>P</italic>&#x003C;.001, NNLE Practical <italic>P</italic>=.12) and Google Bard (NCLEX-RN <italic>P</italic>&#x003C;.001, NNLE Theoretical <italic>P</italic>&#x003C;.001, NNLE Practical <italic>P</italic>&#x003C;.001) had lower accuracy rates for nursing-related MCQs than ChatGPT 4.0 in English input. English accuracy was higher when compared with ChatGPT 3.5&#x2019;s Chinese input, and the difference was statistically significant (NCLEX-RN <italic>P</italic>=.02, NNLE Practical <italic>P</italic>=.02). Whether submitted in Chinese or English, the MCQs from the NCLEX-RN and NNLE demonstrated that ChatGPT 4.0 had the highest number of unique correct responses and the lowest number of unique incorrect responses among the 3 LLMs.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>This study, focusing on 618 nursing MCQs including NCLEX-RN and NNLE exams, found that ChatGPT 4.0 outperformed ChatGPT 3.5 and Google Bard in accuracy. It excelled in processing English and Chinese inputs, underscoring its potential as a valuable tool in nursing education and clinical decision-making.</p></sec></abstract><kwd-group><kwd>artificial intelligence</kwd><kwd>ChatGPT</kwd><kwd>nursing licensure examination</kwd><kwd>nursing</kwd><kwd>LLMs</kwd><kwd>large language models</kwd><kwd>nursing education</kwd><kwd>AI</kwd><kwd>nursing student</kwd><kwd>large language model</kwd><kwd>licensing</kwd><kwd>observation</kwd><kwd>observational study</kwd><kwd>China</kwd><kwd>USA</kwd><kwd>United States of America</kwd><kwd>auxiliary tool</kwd><kwd>accuracy rate</kwd><kwd>theoretical</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>The large language model (LLM) technology is a stepping stone in the evolution of artificial intelligence (AI) [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref2">2</xref>]. Through the analysis of a large database, the primary module generates a logical and plain text response to the user&#x2019;s query promptly following the user&#x2019;s textual input [<xref ref-type="bibr" rid="ref3">3</xref>]. Currently, popular AI software includes ChatGPT 4.0, ChatGPT 3.5, and Google Bard, and research indicates that these 3 AI algorithms perform well when answering queries about lung cancer [<xref ref-type="bibr" rid="ref4">4</xref>]. AI tools are the result of the advancement of science and technology, and the advent of revolutionary tools will alter the way people learn and work, which is an irreversible trend.</p><p>ChatGPT has been controversial since its public release in November 2022 due to its powerful text generation capabilities, and attention has been focused on students using ChatGPT for essay writing and assignment plagiarism [<xref ref-type="bibr" rid="ref5">5</xref>-<xref ref-type="bibr" rid="ref7">7</xref>]. With the birth of regulatory software such as GPTZero, AI-Text-Classifier, and ChatGPT Detector, people gradually focused on the application of ChatGPT, trying to explore and expand the application field of ChatGPT. The study found that ChatGPT showed both professionalism and empathy in answering general public health questions [<xref ref-type="bibr" rid="ref8">8</xref>]. ChatGPT not only showed strong expertise in answering basic research directions but also followed evidence-based clinical decision-making [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref10">10</xref>]. Nevertheless, there may be some ethical problems in clinical application, and it is necessary to consider whether the use of ChatGPT will violate the rights and interests of patients [<xref ref-type="bibr" rid="ref11">11</xref>-<xref ref-type="bibr" rid="ref13">13</xref>]. Therefore, more and more researchers have placed the application field of ChatGPT in education [<xref ref-type="bibr" rid="ref14">14</xref>]. The studies found that ChatGPT performed well on multiple-choice questions (MCQs) about otolaryngology and gynecology [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref16">16</xref>]. In addition, ChatGPT software can pass the Plastic Surgery Inservice Training Examination [<xref ref-type="bibr" rid="ref17">17</xref>], the American Heart Association Basic Life Support Examinations [<xref ref-type="bibr" rid="ref18">18</xref>], and the Taiwanese Pharmacist Licensing Examination [<xref ref-type="bibr" rid="ref19">19</xref>]. ChatGPT is also able to solve higher-order problems related to medical biochemistry while also achieving satisfactory performance in surgical education and training [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref21">21</xref>]. However, ChatGPT is not a training tool for all exams, with the exception of the American Heart Association&#x2019;s Advanced Cardiovascular Life Support (ACLS) exams and Taiwan&#x2019;s Family Medicine Board Exam [<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref22">22</xref>]. This might suggest that ChatGPT&#x2019;s application areas may be limited by language and region in addition to speciality.</p><p>Both the United States and China have instituted licensing exams to regulate the qualifications of registered nurses [<xref ref-type="bibr" rid="ref23">23</xref>]. China uses the National Nursing Licensure Examination (NNLE) [<xref ref-type="bibr" rid="ref23">23</xref>], whereas the United States uses the National Council Licensure Examination for Registered Nurses (NCLEX-RN) [<xref ref-type="bibr" rid="ref24">24</xref>], both of which seek to standardize the theoretical and practical foundations of nurses through standardized assessment procedures to ensure the professionalism of nurses who are entering the medical field. The content of nursing studies is not medically specialized but rather interdisciplinary and multidisciplinary [<xref ref-type="bibr" rid="ref25">25</xref>]. On the basis of their nursing work, nurses are frequently required to comprehend clinical decisions made by physicians. As a result, it is easy for society to disregard the difficulty of nursing education and training, that is, the necessity of a medical foundation for the development of nursing expertise [<xref ref-type="bibr" rid="ref26">26</xref>]. Presently, there are no professional nursing learning aids to assist nurses in gaining a better understanding of the professional medical issues encountered during the clinical learning process. Huge and intricate, the medical knowledge system necessitates repeated learning, even for specialists, in order to master specialized knowledge [<xref ref-type="bibr" rid="ref27">27</xref>]. Despite the fact that many researchers attempt to implement various review strategies to increase the passage rate of nursing professional examinations, it is frequently difficult to popularize a single review strategy due to varying local practical policies [<xref ref-type="bibr" rid="ref28">28</xref>]. No single revision method is appropriate for all individuals. How to assist nurses in gaining a deeper understanding of medical knowledge, enhancing their stockpile of professional theoretical knowledge, and increasing their exam pass rate is a pressing issue for nurses today.</p><p>The design of this research is cross-sectional. By incorporating NCLEX-RN and NNLE questions, we evaluated the precision of responses from ChatGPT 4.0, ChatGPT 3.5, and Google Bard. Concurrently, the translation feature of ChatGPT 3.5 was used to convert between Chinese and English, while an examination was conducted into the disparity in the rate of accurate responses provided by ChatGPT across various languages. The aim of this study is to offer a conceptual framework that supports the implementation of ChatGPT and advances nursing education and clinical application.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Design</title><p>With reference to Zong et al [<xref ref-type="bibr" rid="ref29">29</xref>], we designed a cross-sectional study. The experimental data from our study had been recorded in an Excel file and uploaded as <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>. The STROBE Initiative [<xref ref-type="bibr" rid="ref30">30</xref>] was used in this study and the STROBE Initiative checklist is available in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>.</p></sec><sec id="s2-2"><title>Ethical Considerations</title><p>As this study does not involve interventional experiments on humans or animals, the research does not require approval per the Ethics Committee of the First Affiliated Hospital of Jinan University guidelines.</p></sec><sec id="s2-3"><title>Data Source</title><p>NCLEX-RN practice questions were compiled at the website &#x201C;nurseslabs&#x201D; [<xref ref-type="bibr" rid="ref31">31</xref>]. There were no set questions on the official NCLEX-RN test; instead, a computer produced new questions with a minimum of 75 and a maximum of 265 depending on how accurate the preceding questions were. Thus, we got the most recent 2 sets of practice questions for the NCLEX-RN exam from the internet. In 2 practice sets, we compiled a total of 150 MCQs.</p><p>The NNLE question categories were divided into 2 sections: nursing theory and nursing practice, each containing 120 MCQs. On the website &#x201C;baidu&#x201D; [<xref ref-type="bibr" rid="ref32">32</xref>], we used the most current 480 NNLE-MCQs from the 2022 and 2021 exams that were accessible. According to the classification of nursing theory examination and nursing practice, the questions for 2022 and 2021 were merged and then separated into NNLE Theoretical MCQs (n=240) and NNLE Practical MCQs (n=240).</p></sec><sec id="s2-4"><title>Procedures</title><p>According to the research stages (<xref ref-type="fig" rid="figure1">Figure 1</xref>), we translated the original English NCLEX-RN-MCQs into the Chinese version of the NCLEX-RN-MCQs. The original NNLE queries were written in Chinese, and we also translated them into English. To avoid systematic errors induced by differences in translation quality during the translation process, ChatGPT 3.5 was used to translate both from Chinese to English and from English to Chinese. We checked the language both before and after translating using ChatGPT 3.5 to translate between Chinese and English, as well as English and Chinese. About some clear translation mistakes, we entered the incorrect translation points in ChatGPT 3.5&#x2019;s dialog box and requested that ChatGPT 3.5 retranslate the text.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Diagrammatic representation of the progression of exploratory application experiments. MCQ: multiple-choice question; NCLEX-RN: National Council Licensure Examination for Registered Nurses; NNLE: National Nursing Licensure Examination.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="mededu_v10i1e52746_fig01.png"/></fig><p>Entered all questions on ChatGPT 4.0 (<xref ref-type="fig" rid="figure2">Figure 2A</xref>and <xref ref-type="fig" rid="figure2">C</xref>) [<xref ref-type="bibr" rid="ref33">33</xref>] and ChatGPT 3.5 (<xref ref-type="fig" rid="figure2">Figure 2B</xref> and <xref ref-type="fig" rid="figure2">D</xref>) [<xref ref-type="bibr" rid="ref34">34</xref>] as well as Google Bard (<xref ref-type="fig" rid="figure2">Figure 2E</xref>) [<xref ref-type="bibr" rid="ref35">35</xref>], then recorded the responses. Both ChatGPT 4.0 and ChatGPT 3.5 support text input in non-English languages, whereas Google Bard only supports text input in English at this time. The use of &#x201C;New chat&#x201D; for each inquiry ensured the independence of each response because it prevented the AI from using context from previous interactions, thereby eliminating any learning or bias that may have been carried over from earlier questions. Additionally, no plugins were used with ChatGPT, and the &#x201C;Chat history &#x0026; training&#x201D; option was deactivated to preserve the objectivity of each response.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>(A) English multiple-choice questions (MCQs) input in ChatGPT 4.0. (B) English MCQs input in ChatGPT 3.5. (C) Chinese MCQs input in ChatGPT 4.0. (D) Chinese MCQs input in ChatGPT 3.5. (E) English MCQs input in Google Bard.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="mededu_v10i1e52746_fig02.png"/></fig></sec><sec id="s2-5"><title>Data Analysis</title><p>SPSS program (version 26.0; IBM Corp) was used for statistical analysis. With reference to Zong et al [<xref ref-type="bibr" rid="ref29">29</xref>]. Collected the responses from ChatGPT 4.0, ChatGPT 3.5, and Google Bard and converted them to the binary variables &#x201C;true&#x201D; or &#x201C;false.&#x201D; Pearson The <italic>&#x03C7;</italic><sup><italic>2</italic></sup> test was used to compare the differences between various LLM software or the same software input in various languages. A difference was considered statistically significant when the <italic>P</italic> value was less than .05. Used the web-based VENN diagram drawing website &#x201C;bioinfogp&#x201D; [<xref ref-type="bibr" rid="ref36">36</xref>] to draw VENN diagrams to display different AI software&#x2019;s results for the same type of subject with various linguistic inputs. Last, bar charts were constructed from a portion of the data using GraphPad Prism 8.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Overview</title><p>We collected 150 NCLEX-RN-MCQs in total. We excluded the image questions from the compiled NNLE-MCQs because the picture analysis of ChatGPT and Google Bard required the use of external plug-ins. After eliminating the image questions, there were a total of 235 NNLE Theoretical MCQs and 233 NNLE Practical MCQs left. Then, ChatGPT 3.5 converted NCLEX-RN-MCQs for English questions into the Chinese version and NNLE-MCQs into the English version.</p></sec><sec id="s3-2"><title>Performance of LLMs in Responding to English NCLEX-RN MCQs</title><p>ChatGPT 4.0 had an accuracy rate of 88.67% (133/150) when answering NCLEX-RN MCQs in English, which was higher than ChatGPT 3.5 (113/150, 75.3%) and Google Bard (96/150, 64%) (<xref ref-type="fig" rid="figure3">Figure 3C</xref>). Statistically, ChatGPT 4.0 performed significantly better than the other 2 categories (ChatGPT 4.0 vs ChatGPT 3.5, <italic>P</italic>=.003; ChatGPT 4.0 vs Google Bard, <italic>P</italic>&#x003C;.001) (<xref ref-type="fig" rid="figure3">Figure 3C</xref>). ChatGPT 3.5 was more accurate than Google Bard and the difference was statistically significant (<italic>P</italic>=.03) (<xref ref-type="fig" rid="figure3">Figure 3C</xref>).</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>(A,B) VENN diagram shows the correct and incorrect intersection of NCLEX-RN practical questions in different large language models. (C) The correct rate of NCLEX-RN practical questions in various large language models. MCQ: multiple-choice question; NCLEX-RN: National Council Licensure Examination for Registered Nurses.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="mededu_v10i1e52746_fig03.png"/></fig></sec><sec id="s3-3"><title>Performance of LLMs in Responding to Chinese NNLE-MCQs</title><p>The difference between the correct rates of ChatGPT 4.0 and ChatGPT 3.5 in answering the Chinese version of NNLE theoretical MCQs (<italic>P</italic>&#x003C;.001) and NNLE practical MCQs (<italic>P</italic>&#x003C;.001) was statistically significant (<xref ref-type="fig" rid="figure4">Figure 4E</xref> and <xref ref-type="fig" rid="figure4">F</xref>). The correct rates of ChatGPT 4.0 answering NNLE theoretical MCQs and NNLE practical MCQs were 71.9% (169/235) and 69.1% (161/233), respectively, compared with 53.2% (125/235) and 50.2% (117/233) for ChatGPT 3.5 (<xref ref-type="fig" rid="figure4">Figure 4E</xref> and <xref ref-type="fig" rid="figure4">F</xref>).</p><fig position="float" id="figure4"><label>Figure 4.</label><caption><p>(A,B) VENN diagram shows the correct and incorrect intersection of NNLE theoretical MCQs in different large language models (LLMs). (C,D) VENN diagram shows the correct and incorrect intersection of NNLE practical MCQs in different LLMs. (E) The correct rate of NNLE theoretical MCQs in various LLMs. (F) The correct rate of NNLE practical MCQs in various LLMs. MCQ: multiple-choice question; NNLE: National Nursing Licensure Examination.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="mededu_v10i1e52746_fig04.png"/></fig></sec><sec id="s3-4"><title>Performance and Variations of MCQs Input Into LLMs in Various Languages</title><p>After entering the Chinese-translated version of NCLEX-RN-MCQs into ChatGPT 4.0 and ChatGPT 3.5, we discovered that the accuracy rates were 79.3% (119/150) and 63.3% (95/150), respectively, with a statistically significant difference between the two (<italic>P</italic>=.002) (<xref ref-type="fig" rid="figure3">Figure 3C</xref>).</p><p>Then, we fed the English-translated version of NNLE Theoretical MCQs into ChatGPT 4.0, ChatGPT 3.5, and Google Bard and determined that their respective accuracy rates were 71.5 % (168/235), 55.7% (131/235), and 49.8% (117/235) (<xref ref-type="fig" rid="figure4">Figure 4E</xref>). ChatGPT 4.0 had a higher accuracy rate than ChatGPT 3.5 (<italic>P</italic>&#x003C;.001) and Google Bard (<italic>P</italic>&#x003C;.001) for the English-translated version of NNLE Theoretical MCQs while the difference was statistically significant (<xref ref-type="fig" rid="figure4">Figure 4E</xref>). ChatGPT 3.5 had a higher accuracy rate than Google Bard, but the difference was not statistically significant (<italic>P</italic>=.20) (<xref ref-type="fig" rid="figure4">Figure 4E</xref>).</p><p>The accuracy rates of ChatGPT 4.0, ChatGPT 3.5, and Google Bard were 67.8% (158/233), 60.9% (142/233), and 46.8% (109/233), respectively, when the English-translated version of NNLE Practical MCQs was inputted (<xref ref-type="fig" rid="figure4">Figure 4F</xref>). In terms of the English-translated version of NNLE Practical MCQs, the accuracy rates of both ChatGPT 4.0 (<italic>P</italic>&#x003C;.001) and ChatGPT 3.5 (<italic>P</italic>=.002) were higher than those of Google Bard, and the difference was statistically significant; however, unlike before, the difference in accuracy rates between ChatGPT 4.0 and ChatGPT 3.5 was not statistically significant (<italic>P</italic>=.12) (<xref ref-type="fig" rid="figure4">Figure 4F</xref>).</p><p>When processing NCLEX-RN-MCQs, the accuracy of inputs in the original English version was statistically significantly higher than that of inputs translated into Chinese for both ChatGPT 4.0 (<italic>P</italic>=.03) and ChatGPT 3.5 (<italic>P</italic>=.02) (<xref ref-type="fig" rid="figure3">Figure 3C</xref>). The difference was not statistically significant between the accuracy of inputs in the original Chinese version and the inputs of the translated English version for both ChatGPT 4.0 (<italic>P</italic>=.92) and ChatGPT 3.5 (<italic>P</italic>=.58) when processing NNLE Theoretical MCQs (<xref ref-type="fig" rid="figure4">Figure 4E</xref>). The accuracy of ChatGPT 4.0&#x2019;s inputs in the original Chinese version was higher than that of inputs translated into English when processing NNLE Practical MCQs, but this difference was not statistically significant (<italic>P</italic>=.77) (<xref ref-type="fig" rid="figure4">Figure 4F</xref>). Surprisingly, the accuracy of ChatGPT 3.5&#x2019;s inputs in the original Chinese version was lower than that of inputs translated into English while dealing with NNLE Practical MCQs, and this difference was statistically significant (<italic>P</italic>=.02) (<xref ref-type="fig" rid="figure4">Figure 4F</xref>).</p><p><xref ref-type="fig" rid="figure3">Figure 3A</xref> and <xref ref-type="fig" rid="figure3">B</xref> depicts, respectively, the intersection of correct and incorrect questions when NCLEX-RN practical questions were inputted into various LLMs in various languages. Similarly, <xref ref-type="fig" rid="figure4">Figure 4A</xref> and <xref ref-type="fig" rid="figure4">B</xref> depicts NNLE Theoretical MCQs, while <xref ref-type="fig" rid="figure4">Figure 4C</xref> and <xref ref-type="fig" rid="figure4">D</xref> depicts NNLE Practical MCQs. When the same questions were input into ChatGPT 4.0, ChatGPT 3.5, and Google Bard in English, ChatGPT 4.0 had the highest number (n for NCLEX-RN MCQs=14; n for NNLE Theoretical MCQs=33; n for NNLE Practical MCQs=26) of uniquely correct answers and the lowest number (n for NCLEX-RN MCQs=2; n for NNLE Theoretical MCQs=6; n for NNLE Practical MCQs=7) of uniquely incorrect answers among the 3 engines. Instead, Google Bard had a lower number (n for NCLEX-RN MCQs=2; n for NNLE Theoretical MCQs=10; n for NNLE Practical MCQs=6) of uniquely correct answers than ChatGPT 4.0 and the highest number (n for NCLEX-RN MCQs=26; n for NNLE Theoretical MCQs=34; n for NNLE Practical MCQs=36) of uniquely incorrect answers among the 3 engines when the MCQs were input into 3 engines in English. Likewise, after the questions were submitted in Chinese, we found that ChatGPT 4.0 (n for NCLEX-RN MCQs=35; n for NNLE Theoretical MCQs=61; n for NNLE Practical MCQs=63) gives more uniquely accurate responses than ChatGPT 3.5(n for NCLEX-RN MCQs=11; n for NNLE Theoretical MCQs=17; n for NNLE Practical MCQs=19) does.</p></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>This study is a cross-sectional study that collected a total of 618 nursing-related MCQs, including 150 NCLEX-RN practice questions and 468 NNLE actual exam questions. To observe differences between inputs in different languages, ChatGPT 3.5 was used exclusively for Chinese-to-English and English-to-Chinese translations. The results revealed that ChatGPT 4.0 had a significantly higher accuracy rate when handling English input for NCLEX-RN practical MCQs compared with ChatGPT 3.5 and Google Bard. Similarly, ChatGPT 4.0 also outperformed ChatGPT 3.5 in accuracy when processing the Chinese input of NNLE exam MCQs. Therefore, ChatGPT 4.0 has the potential to be an effective learning assistance software for ChatGPT users, and due to its powerful real-time text generation capabilities, it can also provide additional sources of information and reference for nursing decisions in clinical nursing work.</p><p>Despite being a tool that accepts input in different languages, ChatGPT has linguistic bias while processing text input, as this research has shown. ChatGPT 3.5 translates NCLEX-RN practical MCQs from English to Chinese. Following input, it was discovered that while interacting with English, ChatGPT 4.0 and ChatGPT 3.5 had accuracy rates that were noticeably greater than Chinese. When NNLE MCQs were input into ChatGPT in English, ChatGPT 4.0&#x2019;s accuracy of the response was only somewhat less accurate than the Chinese input, while ChatGPT 3.5&#x2019;s English input was even more accurate than the Chinese input. Although there may be some linguistic distortion when translating between languages using software, the findings of our cross-sectional investigation indicated that ChatGPT processes English input more accurately than Chinese input. I asked ChatGPT, an AI program that facilitates real-time communication, questions in an attempt to comprehend the logic behind handling input in various languages. In response, ChatGPT said that it can assess and respond to queries in several languages depending on the language of input. This capability stems from its training of various input kinds in various languages. As a result, the current discrepancy in accuracy caused by input in Chinese and English may be the result of ChatGPT receiving different amounts of training in different languages. This discrepancy may disappear with an increase in language training once ChatGPT becomes more well-known worldwide.</p><p>The low passage rate of nursing examinations is partly attributed to the lack of fundamental theoretical and clinical knowledge among nursing staff [<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref37">37</xref>]. Researchers have tried to reform and innovate nursing education models within certain limits to improve knowledge levels and exam pass rates [<xref ref-type="bibr" rid="ref28">28</xref>]. However, due to differences in language and local policies, it is challenging to widely implement a single educational model. MCQs are an effective method to assess student knowledge [<xref ref-type="bibr" rid="ref38">38</xref>], but existing learning resources often require students to conduct independent searches to expand knowledge, adding to learning pressure and affecting the coherence of the learning process. ChatGPT&#x2019;s big data analysis and rapid text feedback can help students consolidate and expand knowledge points while completing MCQ exercises [<xref ref-type="bibr" rid="ref39">39</xref>]. Besides, ChatGPT 4.0 not only enhances the efficiency of nursing education [<xref ref-type="bibr" rid="ref40">40</xref>] but also provides clinicians and nurses with objective information support based on evidence-based medicine and big data analysis in complex clinical scenarios [<xref ref-type="bibr" rid="ref41">41</xref>]. For instance, the research discovered that ChatGPT 4.0 not only analyzed imaging data with acceptable accuracy and sensitivity but also assisted physicians in thinking outside the box and offering several helpful recommendations when making individualized clinical treatment choices for tumor patients [<xref ref-type="bibr" rid="ref41">41</xref>]. Furthermore, ChatGPT may provide nurses with a customized and immersive learning experience, bolster their competence and self-assurance in overseeing remote patient care, and furnish them with the necessary abilities for remote patient monitoring, all of which can contribute to the enhancement of patient outcomes and care quality [<xref ref-type="bibr" rid="ref42">42</xref>]. Additionally, ChatGPT may assist doctors in streamlining patient data organization and easing the burden of interpreting medical records in order to improve patient communication while doing therapeutic procedures [<xref ref-type="bibr" rid="ref43">43</xref>].</p><p>According to this study and previous research findings, ChatGPT 4.0 is currently the most accurate and repeatable AI software among many LLMs. In answering questions related to electrocardiogram images [<xref ref-type="bibr" rid="ref44">44</xref>], the Multi-Specialty Recruitment Assessment exam [<xref ref-type="bibr" rid="ref45">45</xref>], dental professional issues [<xref ref-type="bibr" rid="ref46">46</xref>], and analyzing radiology data [<xref ref-type="bibr" rid="ref47">47</xref>], ChatGPT 4.0 provides more accurate and comprehensive responses compared with ChatGPT 3.5 and Google Bard. Since ChatGPT 4.0 is currently the only paid AI software compared with free-to-use LLMs like ChatGPT 3.5, Google Bard, and Bing, it is essential to compare its functionality with these free LLMs when exploring its real-world application value. The economic cost of use is also a factor that must be considered in the popularization and promotion of its application [<xref ref-type="bibr" rid="ref48">48</xref>].</p><p>Assessing ChatGPT&#x2019;s clinical application value in a manner that aligns with the training of experienced clinical workers is the same approach; upon passing the theory test, candidates will be deemed to possess fundamental medical theoretical knowledge and be capable of managing simple clinical scenarios [<xref ref-type="bibr" rid="ref49">49</xref>]. The intricacy of clinical issues will then continuously increase as a result of ongoing training that corrects incorrect theoretical knowledge and clinical reasoning. Last, they get training to become highly repeatable and capable self-correcting clinical practitioners. ChatGPT has shown that it has a theoretical foundation for supporting clinical practice with its outstanding success in the qualifying exams of many clinical professions [<xref ref-type="bibr" rid="ref15">15</xref>-<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref49">49</xref>]. However, whether it is used as an auxiliary tool for self-learning and education, to support patient communication, or to aid in the analysis of complicated clinical circumstances, a commensurate regulatory system must be developed. In order to limit the circumstances in which ChatGPT is used, schools, hospitals, and publishing companies must first create pertinent policies [<xref ref-type="bibr" rid="ref50">50</xref>]. Some examples of these policies include forbidding the use of ChatGPT during exams [<xref ref-type="bibr" rid="ref51">51</xref>] and obtaining patient consent before using ChatGPT as an auxiliary tool in real clinical settings [<xref ref-type="bibr" rid="ref52">52</xref>]. Authors must state that ChatGPT was not directly engaged in the creation of the text for the paper and are forbidden from claiming ChatGPT as an independent author [<xref ref-type="bibr" rid="ref53">53</xref>]. Furthermore, the most immediate regulators of ChatGPT are its users. ChatGPT can assist with data collection and content integration, but the user has to take part in the quality review process of the content that ChatGPT generates, identify any problems in the responses that ChatGPT generates, and finish training ChatGPT via error correction and continuous input and output. Although many companies developing LLMs claim to avoid the collection and leakage of private information, as users of these software, it is also essential to ensure the content and quality of the input information. Users should intentionally avoid and delete personal and private information, thereby enhancing their personal oversight function during the use of the software. It is also crucial to seek the informed permission of other participants and make suitable declarations while using ChatGPT in public to prevent unwanted confrontations between doctors and patients, moral and ethical disagreements, and concerns with writing integrity.</p></sec><sec id="s4-2"><title>Implication</title><p>Our study has demonstrated that ChatGPT 4.0 exhibits a satisfactory accuracy rate in handling MCQs for the NCLEX-RN and NNLE exams, outperforming 2 other AI engines, ChatGPT 3.5 and Google Bard. Although there were differences in accuracy rates when the same questions were inputted in different languages, the overall accuracy of ChatGPT 4.0 remains commendable. Combined with conclusions from previous research, it can be inferred that ChatGPT 4.0 possesses the knowledge reserve necessary for application in medical education, learning, and clinical scenarios, with the potential to assist in managing complex clinical situations. To promote the rational application of ChatGPT 4.0 in the medical field, it is imperative for relevant authorities to develop effective and reasonable regulatory mechanisms and supervisory bodies in the future. This will ensure that ChatGPT 4.0, a powerful auxiliary AI software, is used appropriately within the health care sector.</p></sec><sec id="s4-3"><title>Limitation</title><p>This study is a cross-sectional analysis, and the findings suggest that ChatGPT 4.0 possesses a certain level of nursing professional knowledge. However, high-quality prospective randomized controlled trials are still required to validate the actual effectiveness of ChatGPT 4.0 in nursing education, learning, and clinical application. Besides, since the logic behind how AI processes questions is part of the company&#x2019;s &#x201C;black box,&#x201D; we can only understand its logic in processing inputs in different languages by interacting with the AI software. Therefore, we infer that the differences in handling Chinese and English inputs are due to variations in the amount of training between languages.</p></sec><sec id="s4-4"><title>Conclusion</title><p>This cross-sectional study collected and analyzed 618 nursing-related MCQs, including NCLEX-RN practice questions and NNLE actual exam questions, to evaluate the performance of ChatGPT 4.0 in processing different language inputs. The study exclusively used ChatGPT 3.5 for Chinese-to-English and English-to-Chinese translations and found that ChatGPT 4.0 demonstrated a significantly higher accuracy rate than ChatGPT 3.5 and Google Bard, particularly in handling English input for NCLEX-RN Practice MCQs and Chinese input for NNLE exam MCQs. These findings suggest that ChatGPT 4.0 has substantial potential as an effective learning assistance tool for nursing education and can provide valuable information and reference in clinical nursing settings due to its advanced real-time text generation capabilities.</p></sec></sec></body><back><notes><sec><title>Data Availability</title><p>The data that support the findings of this study are available on request from the corresponding author.</p></sec></notes><fn-group><fn fn-type="con"><p>ZW, WG, ZX, and ZN contributed equally. ZW, WG, ZX, and ZN conceived the study, performed the statistical analysis, interpreted the results, and drafted the manuscript. YZ and XZ supervised the entire study. All authors read and approved the final manuscript.</p></fn><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">LLM</term><def><p>large language model</p></def></def-item><def-item><term id="abb3">MCQ</term><def><p>multiple-choice question</p></def></def-item><def-item><term id="abb4">NCLEX-RN</term><def><p>National Council Licensure Examination for Registered Nurses</p></def></def-item><def-item><term id="abb5">NNLE</term><def><p>National Nursing Licensure Examination</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mesko</surname><given-names>B</given-names> </name></person-group><article-title>The ChatGPT (generative artificial intelligence) revolution has made artificial intelligence approachable for medical professionals</article-title><source>J Med Internet Res</source><year>2023</year><month>06</month><day>22</day><volume>25</volume><fpage>e48392</fpage><pub-id pub-id-type="doi">10.2196/48392</pub-id><pub-id pub-id-type="medline">37347508</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sorin</surname><given-names>V</given-names> </name><name name-style="western"><surname>Klang</surname><given-names>E</given-names> </name><name name-style="western"><surname>Sklair-Levy</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Large language model (ChatGPT) as a support tool for breast tumor board</article-title><source>NPJ Breast Cancer</source><year>2023</year><month>05</month><day>30</day><volume>9</volume><issue>1</issue><fpage>44</fpage><pub-id pub-id-type="doi">10.1038/s41523-023-00557-8</pub-id><pub-id pub-id-type="medline">37253791</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Perera Molligoda Arachchige</surname><given-names>AS</given-names> </name></person-group><article-title>Large language models (LLM) and ChatGPT: a medical student perspective</article-title><source>Eur J Nucl Med Mol Imaging</source><year>2023</year><month>07</month><volume>50</volume><issue>8</issue><fpage>2248</fpage><lpage>2249</lpage><pub-id pub-id-type="doi">10.1007/s00259-023-06227-y</pub-id><pub-id pub-id-type="medline">37046082</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rahsepar</surname><given-names>AA</given-names> </name><name name-style="western"><surname>Tavakoli</surname><given-names>N</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>GHJ</given-names> </name><name name-style="western"><surname>Hassani</surname><given-names>C</given-names> </name><name name-style="western"><surname>Abtin</surname><given-names>F</given-names> </name><name name-style="western"><surname>Bedayat</surname><given-names>A</given-names> </name></person-group><article-title>How AI responds to common lung cancer questions: ChatGPT vs Google Bard</article-title><source>Radiology</source><year>2023</year><month>06</month><volume>307</volume><issue>5</issue><fpage>e230922</fpage><pub-id pub-id-type="doi">10.1148/radiol.230922</pub-id><pub-id pub-id-type="medline">37310252</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Graham</surname><given-names>A</given-names> </name></person-group><article-title>ChatGPT and other AI tools put students at risk of plagiarism allegations, MDU warns</article-title><source>BMJ</source><year>2023</year><month>05</month><day>17</day><volume>381</volume><fpage>1133</fpage><pub-id pub-id-type="doi">10.1136/bmj.p1133</pub-id><pub-id pub-id-type="medline">37197782</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Stokel-Walker</surname><given-names>C</given-names> </name></person-group><article-title>AI bot ChatGPT writes smart essays - should professors worry?</article-title><source>Nature</source><year>2022</year><month>12</month><day>9</day><pub-id pub-id-type="doi">10.1038/d41586-022-04397-7</pub-id><pub-id pub-id-type="medline">36494443</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><collab>The Lancet Digital Health</collab></person-group><article-title>ChatGPT: friend or foe?</article-title><source>Lancet Digit Health</source><year>2023</year><month>03</month><volume>5</volume><issue>3</issue><fpage>e102</fpage><pub-id pub-id-type="doi">10.1016/S2589-7500(23)00023-7</pub-id><pub-id pub-id-type="medline">36754723</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ayers</surname><given-names>JW</given-names> </name><name name-style="western"><surname>Poliak</surname><given-names>A</given-names> </name><name name-style="western"><surname>Dredze</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Comparing physician and artificial intelligence chatbot responses to patient questions posted to a public social media forum</article-title><source>JAMA Intern Med</source><year>2023</year><month>06</month><day>1</day><volume>183</volume><issue>6</issue><fpage>589</fpage><lpage>596</lpage><pub-id pub-id-type="doi">10.1001/jamainternmed.2023.1838</pub-id><pub-id pub-id-type="medline">37115527</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhou</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>X</given-names> </name><name name-style="western"><surname>Li</surname><given-names>X</given-names> </name><name name-style="western"><surname>Liao</surname><given-names>L</given-names> </name></person-group><article-title>Is ChatGPT an evidence-based doctor?</article-title><source>Eur Urol</source><year>2023</year><month>09</month><volume>84</volume><issue>3</issue><fpage>355</fpage><lpage>356</lpage><pub-id pub-id-type="doi">10.1016/j.eururo.2023.03.037</pub-id><pub-id pub-id-type="medline">37061445</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Miao</surname><given-names>H</given-names> </name><name name-style="western"><surname>Ahn</surname><given-names>H</given-names> </name></person-group><article-title>Impact of ChatGPT on interdisciplinary nursing education and research</article-title><source>Asian Pac Isl Nurs J</source><year>2023</year><month>04</month><day>24</day><volume>7</volume><fpage>e48136</fpage><pub-id pub-id-type="doi">10.2196/48136</pub-id><pub-id pub-id-type="medline">37093625</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kao</surname><given-names>HJ</given-names> </name><name name-style="western"><surname>Chien</surname><given-names>TW</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>WC</given-names> </name><name name-style="western"><surname>Chou</surname><given-names>W</given-names> </name><name name-style="western"><surname>Chow</surname><given-names>JC</given-names> </name></person-group><article-title>Assessing ChatGPT&#x2019;s capacity for clinical decision support in pediatrics: a comparative study with pediatricians using KIDMAP of rasch analysis</article-title><source>Medicine (Baltimore)</source><year>2023</year><month>06</month><day>23</day><volume>102</volume><issue>25</issue><fpage>e34068</fpage><pub-id pub-id-type="doi">10.1097/MD.0000000000034068</pub-id><pub-id pub-id-type="medline">37352054</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liao</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Shi</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Lu</surname><given-names>L</given-names> </name><name name-style="western"><surname>Tabata</surname><given-names>H</given-names> </name></person-group><article-title>Revolutionary potential of ChatGPT in constructing intelligent clinical decision support systems</article-title><source>Ann Biomed Eng</source><year>2024</year><month>02</month><volume>52</volume><issue>2</issue><fpage>125</fpage><lpage>129</lpage><pub-id pub-id-type="doi">10.1007/s10439-023-03288-w</pub-id><pub-id pub-id-type="medline">37332008</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Secor</surname><given-names>AM</given-names> </name><name name-style="western"><surname>C&#x00E9;lestin</surname><given-names>K</given-names> </name><name name-style="western"><surname>Jasmin</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Electronic medical record data missingness and interruption in antiretroviral therapy among adults and children living with HIV in Haiti: retrospective longitudinal study</article-title><source>JMIR Pediatr Parent</source><year>2024</year><month>03</month><day>6</day><volume>7</volume><fpage>e51574</fpage><pub-id pub-id-type="doi">10.2196/51574</pub-id><pub-id pub-id-type="medline">38488632</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Torales</surname><given-names>J</given-names> </name><name name-style="western"><surname>O&#x2019;Higgins</surname><given-names>M</given-names> </name></person-group><article-title>ChatGPT and social psychiatry: a commentary on the article &#x2018;Old dog, new tricks? exploring the potential functionalities of ChatGPT in supporting educational methods in social psychiatry&#x2019;.</article-title><source>Int J Soc Psychiatry</source><year>2023</year><month>06</month><day>30</day><fpage>207640231178488</fpage><pub-id pub-id-type="doi">10.1177/00207640231178488</pub-id><pub-id pub-id-type="medline">37392002</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hoch</surname><given-names>CC</given-names> </name><name name-style="western"><surname>Wollenberg</surname><given-names>B</given-names> </name><name name-style="western"><surname>L&#x00FC;ers</surname><given-names>JC</given-names> </name><etal/></person-group><article-title>ChatGPT&#x2019;s quiz skills in different otolaryngology subspecialties: an analysis of 2576 single-choice and multiple-choice board certification preparation questions</article-title><source>Eur Arch Otorhinolaryngol</source><year>2023</year><month>09</month><volume>280</volume><issue>9</issue><fpage>4271</fpage><lpage>4278</lpage><pub-id pub-id-type="doi">10.1007/s00405-023-08051-4</pub-id><pub-id pub-id-type="medline">37285018</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>SW</given-names> </name><name name-style="western"><surname>Kemp</surname><given-names>MW</given-names> </name><name name-style="western"><surname>Logan</surname><given-names>SJS</given-names> </name><etal/></person-group><article-title>ChatGPT outscored human candidates in a virtual objective structured clinical examination in obstetrics and gynecology</article-title><source>Am J Obstet Gynecol</source><year>2023</year><month>08</month><volume>229</volume><issue>2</issue><fpage>172</fpage><pub-id pub-id-type="doi">10.1016/j.ajog.2023.04.020</pub-id><pub-id pub-id-type="medline">37088277</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gupta</surname><given-names>R</given-names> </name><name name-style="western"><surname>Herzog</surname><given-names>I</given-names> </name><name name-style="western"><surname>Park</surname><given-names>JB</given-names> </name><etal/></person-group><article-title>Performance of ChatGPT on the plastic surgery inservice training examination</article-title><source>Aesthet Surg J</source><year>2023</year><month>11</month><day>16</day><volume>43</volume><issue>12</issue><fpage>NP 1078</fpage><lpage>NP1082</lpage><pub-id pub-id-type="doi">10.1093/asj/sjad128</pub-id><pub-id pub-id-type="medline">37128784</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhu</surname><given-names>L</given-names> </name><name name-style="western"><surname>Mou</surname><given-names>W</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>T</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>R</given-names> </name></person-group><article-title>ChatGPT can pass the AHA exams: open-ended questions outperform multiple-choice format</article-title><source>Resuscitation</source><year>2023</year><month>07</month><volume>188</volume><fpage>109783</fpage><pub-id pub-id-type="doi">10.1016/j.resuscitation.2023.109783</pub-id><pub-id pub-id-type="medline">37349064</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>YM</given-names> </name><name name-style="western"><surname>Shen</surname><given-names>HW</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>TJ</given-names> </name></person-group><article-title>Performance of ChatGPT on the pharmacist licensing examination in Taiwan</article-title><source>J Chin Med Assoc</source><year>2023</year><month>07</month><day>1</day><volume>86</volume><issue>7</issue><fpage>653</fpage><lpage>658</lpage><pub-id pub-id-type="doi">10.1097/JCMA.0000000000000942</pub-id><pub-id pub-id-type="medline">37227901</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ghosh</surname><given-names>A</given-names> </name><name name-style="western"><surname>Bir</surname><given-names>A</given-names> </name></person-group><article-title>Evaluating ChatGPT&#x2019;s ability to solve higher-order questions on the competency-based medical education curriculum in medical biochemistry</article-title><source>Cureus</source><year>2023</year><month>04</month><volume>15</volume><issue>4</issue><fpage>e37023</fpage><pub-id pub-id-type="doi">10.7759/cureus.37023</pub-id><pub-id pub-id-type="medline">37143631</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Oh</surname><given-names>N</given-names> </name><name name-style="western"><surname>Choi</surname><given-names>GS</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>WY</given-names> </name></person-group><article-title>ChatGPT goes to the operating room: evaluating GPT-4 performance and its potential in surgical education and training in the era of large language models</article-title><source>Ann Surg Treat Res</source><year>2023</year><month>05</month><volume>104</volume><issue>5</issue><fpage>269</fpage><lpage>273</lpage><pub-id pub-id-type="doi">10.4174/astr.2023.104.5.269</pub-id><pub-id pub-id-type="medline">37179699</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Weng</surname><given-names>TL</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>YM</given-names> </name><name name-style="western"><surname>Chang</surname><given-names>S</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>TJ</given-names> </name><name name-style="western"><surname>Hwang</surname><given-names>SJ</given-names> </name></person-group><article-title>ChatGPT failed Taiwan&#x2019;s family medicine board exam</article-title><source>J Chin Med Assoc</source><year>2023</year><month>08</month><day>1</day><volume>86</volume><issue>8</issue><fpage>762</fpage><lpage>766</lpage><pub-id pub-id-type="doi">10.1097/JCMA.0000000000000946</pub-id><pub-id pub-id-type="medline">37294147</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hou</surname><given-names>J</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>S</given-names> </name><name name-style="western"><surname>Sabharwal</surname><given-names>S</given-names> </name><name name-style="western"><surname>Fan</surname><given-names>V</given-names> </name><name name-style="western"><surname>Yan</surname><given-names>M</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>W</given-names> </name></person-group><article-title>Comparison of RN licensure examination: China and the United States</article-title><source>Int J Nurs Sci</source><year>2019</year><month>01</month><day>10</day><volume>6</volume><issue>1</issue><fpage>111</fpage><lpage>116</lpage><pub-id pub-id-type="doi">10.1016/j.ijnss.2018.11.002</pub-id><pub-id pub-id-type="medline">31406876</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Muirhead</surname><given-names>L</given-names> </name><name name-style="western"><surname>Cimiotti</surname><given-names>JP</given-names> </name><name name-style="western"><surname>Hayes</surname><given-names>R</given-names> </name><etal/></person-group><article-title>Diversity in nursing and challenges with the NCLEX-RN</article-title><source>Nurs Outlook</source><year>2022</year><volume>70</volume><issue>5</issue><fpage>762</fpage><lpage>771</lpage><pub-id pub-id-type="doi">10.1016/j.outlook.2022.06.003</pub-id><pub-id pub-id-type="medline">35933180</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>O&#x2019;Reilly</surname><given-names>P</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>SH</given-names> </name><name name-style="western"><surname>O&#x2019;Sullivan</surname><given-names>M</given-names> </name><name name-style="western"><surname>Cullen</surname><given-names>W</given-names> </name><name name-style="western"><surname>Kennedy</surname><given-names>C</given-names> </name><name name-style="western"><surname>MacFarlane</surname><given-names>A</given-names> </name></person-group><article-title>Assessing the facilitators and barriers of interdisciplinary team working in primary care using normalisation process theory: an integrative review</article-title><source>PLoS One</source><year>2017</year><month>05</month><day>18</day><volume>12</volume><issue>5</issue><fpage>e0177026</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0177026</pub-id><pub-id pub-id-type="medline">28545038</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Horsley</surname><given-names>TL</given-names> </name><name name-style="western"><surname>Reed</surname><given-names>T</given-names> </name><name name-style="western"><surname>Muccino</surname><given-names>K</given-names> </name><name name-style="western"><surname>Quinones</surname><given-names>D</given-names> </name><name name-style="western"><surname>Siddall</surname><given-names>VJ</given-names> </name><name name-style="western"><surname>McCarthy</surname><given-names>J</given-names> </name></person-group><article-title>Developing a foundation for interprofessional education within nursing and medical curricula</article-title><source>Nurse Educ</source><year>2016</year><volume>41</volume><issue>5</issue><fpage>234</fpage><lpage>238</lpage><pub-id pub-id-type="doi">10.1097/NNE.0000000000000255</pub-id><pub-id pub-id-type="medline">26963036</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gan</surname><given-names>W</given-names> </name><name name-style="western"><surname>Mok</surname><given-names>TN</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Researching the application of virtual reality in medical education: one-year follow-up of a randomized trial</article-title><source>BMC Med Educ</source><year>2023</year><month>01</month><day>3</day><volume>23</volume><issue>1</issue><fpage>3</fpage><pub-id pub-id-type="doi">10.1186/s12909-022-03992-6</pub-id><pub-id pub-id-type="medline">36597093</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cobourne</surname><given-names>K</given-names> </name></person-group><article-title>Strategies to increase NCLEX pass rates: from 68% to 92% in 1 year</article-title><source>Nurse Educ</source><year>2023</year><volume>48</volume><issue>4</issue><fpage>220</fpage><lpage>222</lpage><pub-id pub-id-type="doi">10.1097/NNE.0000000000001382</pub-id><pub-id pub-id-type="medline">36857572</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zong</surname><given-names>H</given-names> </name><name name-style="western"><surname>Li</surname><given-names>J</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>E</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>R</given-names> </name><name name-style="western"><surname>Lu</surname><given-names>J</given-names> </name><name name-style="western"><surname>Shen</surname><given-names>B</given-names> </name></person-group><article-title>Performance of ChatGPT on Chinese national medical licensing examinations: a five-year examination evaluation study for physicians, pharmacists and nurses</article-title><source>BMC Med Educ</source><year>2024</year><month>02</month><day>14</day><volume>24</volume><issue>1</issue><fpage>143</fpage><pub-id pub-id-type="doi">10.1186/s12909-024-05125-7</pub-id><pub-id pub-id-type="medline">38355517</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>von Elm</surname><given-names>E</given-names> </name><name name-style="western"><surname>Altman</surname><given-names>DG</given-names> </name><name name-style="western"><surname>Egger</surname><given-names>M</given-names> </name><name name-style="western"><surname>Pocock</surname><given-names>SJ</given-names> </name><name name-style="western"><surname>G&#x00F8;tzsche</surname><given-names>PC</given-names> </name><name name-style="western"><surname>Vandenbroucke</surname><given-names>JP</given-names> </name></person-group><article-title>The Strengthening the Reporting of Observational Studies in Epidemiology (STROBE) statement: guidelines for reporting observational studies</article-title><source>Lancet</source><year>2007</year><month>10</month><volume>370</volume><issue>9596</issue><fpage>1453</fpage><lpage>1457</lpage><pub-id pub-id-type="doi">10.1016/S0140-6736(07)61602-X</pub-id><pub-id pub-id-type="medline">18064739</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="web"><article-title>NCLEX practice questions test bank for free</article-title><source>Nurseslabs</source><year>2024</year><access-date>2024-09-23</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://nurseslabs.com/nclex-practice-questions">https://nurseslabs.com/nclex-practice-questions</ext-link></comment></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="web"><article-title>National nursing licensure examination</article-title><source>Baidu</source><year>2024</year><access-date>2024-09-23</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://wenku.baidu.com/search?word=%E5%9B%BD%E5%AE%B6%E6%8A%A4%E7%90%86%E8%B5%84%E6%A0%BC%E8%80%83%E8%AF%95&#x0026;searchType=0&#x0026;lm=0&#x0026;od=0&#x0026;fr=search&#x0026;ie=utf-8&#x0026;_wkts_=1711005119260&#x0026;bdQuery=%E7%99%BE%E5%BA%A6%E6%96%87%E5%BA%93&#x0026;wkQuery=%E5%9B%BD%E5%AE%B6%E6%8A%A4%E7%90%86%E8%B5%84%E6%A0%BC%E8%80%83%E8%AF%952022">https://wenku.baidu.com/search?word=%E5%9B%BD%E5%AE%B6%E6%8A%A4%E7%90%86%E8%B5%84%E6%A0%BC%E8%80%83%E8%AF%95&#x0026;searchType=0&#x0026;lm=0&#x0026;od=0&#x0026;fr=search&#x0026;ie=utf-8&#x0026;_wkts_=1711005119260&#x0026;bdQuery=%E7%99%BE%E5%BA%A6%E6%96%87%E5%BA%93&#x0026;wkQuery=%E5%9B%BD%E5%AE%B6%E6%8A%A4%E7%90%86%E8%B5%84%E6%A0%BC%E8%80%83%E8%AF%952022</ext-link></comment></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="web"><article-title>ChatGPT 4.0</article-title><source>OpenAI</source><year>2024</year><access-date>2024-09-23</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://chat.openai.com/?model=gpt-4">https://chat.openai.com/?model=gpt-4</ext-link></comment></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="web"><article-title>ChatGPT 3.5</article-title><source>OpenAI</source><year>2024</year><access-date>2024-09-23</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://chat.openai.com/?model=text-davinci-002-render-sha">https://chat.openai.com/?model=text-davinci-002-render-sha</ext-link></comment></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="web"><article-title>Google Bard</article-title><source>Google</source><year>2024</year><access-date>2024-09-23</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://bard.google.com">https://bard.google.com</ext-link></comment></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="web"><article-title>Venny2.1</article-title><source>Bioinfogp</source><year>2024</year><access-date>2024-09-23</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://bioinfogp.cnb.csic.es/tools/venny/index.html">https://bioinfogp.cnb.csic.es/tools/venny/index.html</ext-link></comment></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Flowers</surname><given-names>M</given-names> </name><name name-style="western"><surname>Olenick</surname><given-names>M</given-names> </name><name name-style="western"><surname>Maltseva</surname><given-names>T</given-names> </name><name name-style="western"><surname>Simon</surname><given-names>S</given-names> </name><name name-style="western"><surname>Diez-Sampedro</surname><given-names>A</given-names> </name><name name-style="western"><surname>Allen</surname><given-names>LR</given-names> </name></person-group><article-title>Academic factors predicting NCLEX-RN success</article-title><source>Nurs Educ Perspect</source><year>2022</year><volume>43</volume><issue>2</issue><fpage>112</fpage><lpage>114</lpage><pub-id pub-id-type="doi">10.1097/01.NEP.0000000000000788</pub-id><pub-id pub-id-type="medline">35192289</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Levant</surname><given-names>B</given-names> </name><name name-style="western"><surname>Z&#x00FC;ckert</surname><given-names>W</given-names> </name><name name-style="western"><surname>Paolo</surname><given-names>A</given-names> </name></person-group><article-title>Post-exam feedback with question rationales improves re-test performance of medical students on a multiple-choice exam</article-title><source>Adv Health Sci Educ Theory Pract</source><year>2018</year><month>12</month><volume>23</volume><issue>5</issue><fpage>995</fpage><lpage>1003</lpage><pub-id pub-id-type="doi">10.1007/s10459-018-9844-z</pub-id><pub-id pub-id-type="medline">30043313</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ghorashi</surname><given-names>N</given-names> </name><name name-style="western"><surname>Ismail</surname><given-names>A</given-names> </name><name name-style="western"><surname>Ghosh</surname><given-names>P</given-names> </name><name name-style="western"><surname>Sidawy</surname><given-names>A</given-names> </name><name name-style="western"><surname>Javan</surname><given-names>R</given-names> </name></person-group><article-title>AI-powered chatbots in medical education: potential applications and implications</article-title><source>Cureus</source><year>2023</year><month>08</month><volume>15</volume><issue>8</issue><fpage>e43271</fpage><pub-id pub-id-type="doi">10.7759/cureus.43271</pub-id><pub-id pub-id-type="medline">37692629</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ahmed</surname><given-names>SK</given-names> </name></person-group><article-title>The impact of ChatGPT on the nursing profession: revolutionizing patient care and education</article-title><source>Ann Biomed Eng</source><year>2023</year><month>11</month><volume>51</volume><issue>11</issue><fpage>2351</fpage><lpage>2352</lpage><pub-id pub-id-type="doi">10.1007/s10439-023-03262-6</pub-id><pub-id pub-id-type="medline">37266721</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Benary</surname><given-names>M</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>XD</given-names> </name><name name-style="western"><surname>Schmidt</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Leveraging large language models for decision support in personalized oncology</article-title><source>JAMA Netw Open</source><year>2023</year><month>11</month><day>1</day><volume>6</volume><issue>11</issue><fpage>e2343689</fpage><pub-id pub-id-type="doi">10.1001/jamanetworkopen.2023.43689</pub-id><pub-id pub-id-type="medline">37976064</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sharma</surname><given-names>M</given-names> </name><name name-style="western"><surname>Sharma</surname><given-names>S</given-names> </name></person-group><article-title>A holistic approach to remote patient monitoring, fueled by ChatGPT and metaverse technology: the future of nursing education</article-title><source>Nurse Educ Today</source><year>2023</year><month>12</month><volume>131</volume><fpage>105972</fpage><pub-id pub-id-type="doi">10.1016/j.nedt.2023.105972</pub-id><pub-id pub-id-type="medline">37757713</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Baker</surname><given-names>HP</given-names> </name><name name-style="western"><surname>Dwyer</surname><given-names>E</given-names> </name><name name-style="western"><surname>Kalidoss</surname><given-names>S</given-names> </name><name name-style="western"><surname>Hynes</surname><given-names>K</given-names> </name><name name-style="western"><surname>Wolf</surname><given-names>J</given-names> </name><name name-style="western"><surname>Strelzow</surname><given-names>JA</given-names> </name></person-group><article-title>ChatGPT&#x2019;s ability to assist with clinical documentation: a randomized controlled trial</article-title><source>J Am Acad Orthop Surg</source><year>2024</year><month>02</month><day>1</day><volume>32</volume><issue>3</issue><fpage>123</fpage><lpage>129</lpage><pub-id pub-id-type="doi">10.5435/JAAOS-D-23-00474</pub-id><pub-id pub-id-type="medline">37976385</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fija&#x010D;ko</surname><given-names>N</given-names> </name><name name-style="western"><surname>Prosen</surname><given-names>G</given-names> </name><name name-style="western"><surname>Abella</surname><given-names>BS</given-names> </name><name name-style="western"><surname>Metli&#x010D;ar</surname><given-names>&#x0160;</given-names> </name><name name-style="western"><surname>&#x0160;tiglic</surname><given-names>G</given-names> </name></person-group><article-title>Can novel multimodal chatbots such as bing chat enterprise, ChatGPT-4 Pro, and Google Bard correctly interpret electrocardiogram images?</article-title><source>Resuscitation</source><year>2023</year><month>12</month><volume>193</volume><fpage>110009</fpage><pub-id pub-id-type="doi">10.1016/j.resuscitation.2023.110009</pub-id><pub-id pub-id-type="medline">37884222</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tsoutsanis</surname><given-names>P</given-names> </name><name name-style="western"><surname>Tsoutsanis</surname><given-names>A</given-names> </name></person-group><article-title>Evaluation of large language model performance on the multi-specialty recruitment assessment (MSRA) exam</article-title><source>Comput Biol Med</source><year>2024</year><month>01</month><volume>168</volume><fpage>107794</fpage><pub-id pub-id-type="doi">10.1016/j.compbiomed.2023.107794</pub-id><pub-id pub-id-type="medline">38043471</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Giannakopoulos</surname><given-names>K</given-names> </name><name name-style="western"><surname>Kavadella</surname><given-names>A</given-names> </name><name name-style="western"><surname>Aaqel Salim</surname><given-names>A</given-names> </name><name name-style="western"><surname>Stamatopoulos</surname><given-names>V</given-names> </name><name name-style="western"><surname>Kaklamanos</surname><given-names>EG</given-names> </name></person-group><article-title>Evaluation of the performance of generative AI large language models ChatGPT, Google Bard, and Microsoft Bing Chat in supporting evidence-based dentistry: comparative mixed methods study</article-title><source>J Med Internet Res</source><year>2023</year><month>12</month><day>28</day><volume>25</volume><fpage>e51580</fpage><pub-id pub-id-type="doi">10.2196/51580</pub-id><pub-id pub-id-type="medline">38009003</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Amin</surname><given-names>KS</given-names> </name><name name-style="western"><surname>Davis</surname><given-names>MA</given-names> </name><name name-style="western"><surname>Doshi</surname><given-names>R</given-names> </name><name name-style="western"><surname>Haims</surname><given-names>AH</given-names> </name><name name-style="western"><surname>Khosla</surname><given-names>P</given-names> </name><name name-style="western"><surname>Forman</surname><given-names>HP</given-names> </name></person-group><article-title>Accuracy of ChatGPT, Google Bard, and Microsoft Bing for simplifying radiology reports</article-title><source>Radiology</source><year>2023</year><month>11</month><volume>309</volume><issue>2</issue><fpage>e232561</fpage><pub-id pub-id-type="doi">10.1148/radiol.232561</pub-id><pub-id pub-id-type="medline">37987662</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rau</surname><given-names>A</given-names> </name><name name-style="western"><surname>Rau</surname><given-names>S</given-names> </name><name name-style="western"><surname>Zoeller</surname><given-names>D</given-names> </name><etal/></person-group><article-title>A context-based chatbot surpasses trained radiologists and generic ChatGPT in following the ACR appropriateness guidelines</article-title><source>Radiology</source><year>2023</year><month>07</month><volume>308</volume><issue>1</issue><fpage>e230970</fpage><pub-id pub-id-type="doi">10.1148/radiol.230970</pub-id><pub-id pub-id-type="medline">37489981</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sahin</surname><given-names>MC</given-names> </name><name name-style="western"><surname>Sozer</surname><given-names>A</given-names> </name><name name-style="western"><surname>Kuzucu</surname><given-names>P</given-names> </name><etal/></person-group><article-title>Beyond human in neurosurgical exams: ChatGPT&#x2019;s success in the Turkish neurosurgical society proficiency board exams</article-title><source>Comput Biol Med</source><year>2024</year><month>02</month><volume>169</volume><fpage>107807</fpage><pub-id pub-id-type="doi">10.1016/j.compbiomed.2023.107807</pub-id><pub-id pub-id-type="medline">38091727</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhu</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Ying</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Zhu</surname><given-names>J</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>H</given-names> </name></person-group><article-title>ChatGPT&#x2019;s potential role in non-english-speaking outpatient clinic settings</article-title><source>D Health</source><year>2023</year><month>06</month><day>26</day><volume>9</volume><fpage>20552076231184091</fpage><pub-id pub-id-type="doi">10.1177/20552076231184091</pub-id><pub-id pub-id-type="medline">37434733</pub-id></nlm-citation></ref><ref id="ref51"><label>51</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mohammad</surname><given-names>B</given-names> </name><name name-style="western"><surname>Supti</surname><given-names>T</given-names> </name><name name-style="western"><surname>Alzubaidi</surname><given-names>M</given-names> </name><etal/></person-group><article-title>The pros and cons of using ChatGPT in medical education: a scoping review</article-title><source>Stud Health Technol Inform</source><year>2023</year><month>06</month><day>29</day><volume>305</volume><fpage>644</fpage><lpage>647</lpage><pub-id pub-id-type="doi">10.3233/SHTI230580</pub-id><pub-id pub-id-type="medline">37387114</pub-id></nlm-citation></ref><ref id="ref52"><label>52</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Adhikari</surname><given-names>K</given-names> </name><name name-style="western"><surname>Naik</surname><given-names>N</given-names> </name><name name-style="western"><surname>Hameed</surname><given-names>BZ</given-names> </name><name name-style="western"><surname>Raghunath</surname><given-names>SK</given-names> </name><name name-style="western"><surname>Somani</surname><given-names>BK</given-names> </name></person-group><article-title>Exploring the ethical, legal, and social implications of ChatGPT in urology</article-title><source>Curr Urol Rep</source><year>2024</year><month>01</month><volume>25</volume><issue>1</issue><fpage>1</fpage><lpage>8</lpage><pub-id pub-id-type="doi">10.1007/s11934-023-01185-2</pub-id><pub-id pub-id-type="medline">37735339</pub-id></nlm-citation></ref><ref id="ref53"><label>53</label><nlm-citation citation-type="journal"><article-title>Tools such as ChatGPT threaten transparent science; here are our ground rules for their use</article-title><source>Nature</source><year>2023</year><month>01</month><volume>613</volume><issue>7945</issue><fpage>612</fpage><pub-id pub-id-type="doi">10.1038/d41586-023-00191-1</pub-id><pub-id pub-id-type="medline">36694020</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>File for the original dataset.</p><media xlink:href="mededu_v10i1e52746_app1.xlsx" xlink:title="XLSX File, 55 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>STROBE checklist cross-sectional.</p><media xlink:href="mededu_v10i1e52746_app2.docx" xlink:title="DOCX File, 32 KB"/></supplementary-material></app-group></back></article>