<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="letter"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Med Educ</journal-id><journal-id journal-id-type="publisher-id">mededu</journal-id><journal-id journal-id-type="index">20</journal-id><journal-title>JMIR Medical Education</journal-title><abbrev-journal-title>JMIR Med Educ</abbrev-journal-title><issn pub-type="epub">2369-3762</issn></journal-meta><article-meta><article-id pub-id-type="publisher-id">54283</article-id><article-id pub-id-type="doi">10.2196/54283</article-id><title-group><article-title>The Performance of ChatGPT-4V in Interpreting Images and Tables in the Japanese Medical Licensing Exam</article-title></title-group><contrib-group><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Takagi</surname><given-names>Soshi</given-names></name><degrees>BA</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Koda</surname><given-names>Masahide</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" corresp="yes" equal-contrib="yes"><name name-style="western"><surname>Watari</surname><given-names>Takashi</given-names></name><degrees>MHQS, MD, PhD</degrees><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="aff" rid="aff4">4</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib></contrib-group><aff id="aff1"><institution>Faculty of Medicine, Shimane University</institution>, <addr-line>Izumo</addr-line>, <country>Japan</country></aff><aff id="aff2"><institution>Co-learning Community Healthcare Re-innovation Office, Graduate School of Medicine, Dentistry and Pharmaceutical Sciences, Okayama University</institution>, <addr-line>Okayama</addr-line>, <country>Japan</country></aff><aff id="aff3"><institution>General Medicine Center, Shimane University Hospital</institution>, <addr-line>Izumo</addr-line>, <country>Japan</country></aff><aff id="aff4"><institution>Integrated Clinical Education Center, Kyoto University Hospital</institution>, <addr-line>Kyoto</addr-line>, <country>Japan</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Eysenbach</surname><given-names>Gunther</given-names></name></contrib><contrib contrib-type="editor"><name name-style="western"><surname>Cardoso</surname><given-names>Taiane de Azevedo</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Liu</surname><given-names>Fuxiao</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Zhu</surname><given-names>Lingxuan</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Ma</surname><given-names>Tianyu</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Takashi Watari, MHQS, MD, PhD<email>wataritari@gmail.com</email></corresp><fn fn-type="equal" id="equal-contrib1"><label>*</label><p>all authors contributed equally</p></fn></author-notes><pub-date pub-type="collection"><year>2024</year></pub-date><pub-date pub-type="epub"><day>23</day><month>5</month><year>2024</year></pub-date><volume>10</volume><elocation-id>e54283</elocation-id><history><date date-type="received"><day>06</day><month>11</month><year>2023</year></date><date date-type="rev-recd"><day>09</day><month>04</month><year>2024</year></date><date date-type="accepted"><day>22</day><month>04</month><year>2024</year></date></history><copyright-statement>&#x00A9; Soshi Takagi, Masahide Koda, Takashi Watari. Originally published in JMIR Medical Education (<ext-link ext-link-type="uri" xlink:href="https://mededu.jmir.org">https://mededu.jmir.org</ext-link>), 23.5.2024. </copyright-statement><copyright-year>2024</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Medical Education, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://mededu.jmir.org/">https://mededu.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://mededu.jmir.org/2024/1/e54283"/><kwd-group><kwd>ChatGPT</kwd><kwd>medical licensing examination</kwd><kwd>generative artificial intelligence</kwd><kwd>medical education</kwd><kwd>large language model</kwd><kwd>images</kwd><kwd>tables</kwd><kwd>artificial intelligence</kwd><kwd>AI</kwd><kwd>Japanese</kwd><kwd>reliability</kwd><kwd>medical application</kwd><kwd>medical applications</kwd><kwd>diagnostic</kwd><kwd>diagnostics</kwd><kwd>online data</kwd><kwd>web-based data</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>OpenAI&#x2019;s ChatGPT, a leading large language model (LLM), has shown promise for medical purposes. The program can pass the United States Medical Licensing Examination (USMLE) and the Japanese Medical Licensing Exam (JMLE) [<xref ref-type="bibr" rid="ref1">1</xref>-<xref ref-type="bibr" rid="ref3">3</xref>]. However, previous studies regarding this software have focused on its text-based capabilities. ChatGPT-4 Vision (ChatGPT-4V), announced on September 25, 2023, includes image input features, potentially expanding the medical applications of the program [<xref ref-type="bibr" rid="ref4">4</xref>]. To assess the multimodal performance of ChatGPT-4V in medicine, its performance on JMLE questions involving clinical images and tables was tested.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Overview</title><p>ChatGPT-4V was used to complete the 117th JMLE in the Japanese language (Figure S1 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). Its responses were compared to the passing criteria and mean human examinee score of the JMLE. This study, conducted from October 12 to 14, 2023, used the September 25, 2023, version of the LLM (ChatGPT-4V) with a knowledge cutoff date of January 2022 (<xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref> [<xref ref-type="bibr" rid="ref5">5</xref>]). Human examinees&#x2019; correct response rates were obtained from statistics based on reports from actual JMLE examinees, calculated by medu4, a preparatory school for the JMLE [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref6">6</xref>].</p></sec><sec id="s2-2"><title>Statistical Analysis</title><p>The mean and 95% CIs of the test scores are provided. A one-sample proportion test was used to compare the correct response rate of the human examinees with that of ChatGPT-4V. Statistical significance was set at <italic>P</italic>&#x003C;.05 for all 2-tailed tests. All statistical analyses were conducted using Stata statistical software (version 17; StataCor).</p></sec><sec id="s2-3"><title>Ethical Considerations</title><p>This study used previously available web-based data and did not include human participants. Therefore, Shimane University&#x2019;s Institutional Review Board did not mandate ethics approval.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Evaluation Outcomes</title><p>The responses to 386 questions from the 117th JMLE were used in this study. Using the Ministry of Health, Labor, and Welfare criteria, GPT-4V scored 85.1% on the essential knowledge section and 76.5% on the other sections of the JMLE, meeting the passing criteria [6]. For text-only questions, ChatGPT-4V achieved a correct response rate of 84.5%, similar to the mean human examinee score (<xref ref-type="table" rid="table1">Table 1</xref>). The correct response rate for questions with images was 71.9% for ChatGPT-4V, 13.1 points below the mean human examinee score (<italic>P</italic>&#x003C;.001). The correct response rate for questions with tables (including figures) was 35.0% for ChatGPT-4V, which was significantly lower than the mean human examinee score (83.9%; <italic>P</italic>&#x003C;.001).</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Correct response rates of ChatGPT-4 Vision (ChatGPT-4V) and human examinees on the Japanese Medical Licensing Examination (JMLE).</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom" colspan="2">Characteristics</td><td align="left" valign="bottom" colspan="2">Total, n (%)</td><td align="left" valign="bottom">Examinees<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup>, mean</td><td align="left" valign="bottom">GPT-4V, mean</td><td align="left" valign="bottom">95% CI</td><td align="left" valign="bottom">Difference</td><td align="left" valign="bottom"><italic>P</italic> value</td></tr></thead><tbody><tr><td align="left" valign="bottom" colspan="2">All questions</td><td align="left" valign="top" colspan="2">386 (100)</td><td align="left" valign="top">84.9</td><td align="left" valign="top">78.2</td><td align="left" valign="top">74.1-82.4</td><td align="left" valign="top">&#x2212;6.7</td><td align="left" valign="top">.003</td></tr><tr><td align="left" valign="top" colspan="9"><bold>Question category</bold></td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Essential knowledge</td><td align="left" valign="top" colspan="2">96 (24.9)</td><td align="left" valign="top">89.6</td><td align="left" valign="top">83.3</td><td align="left" valign="top">75.9-90.8</td><td align="left" valign="top">&#x2212;6.3</td><td align="left" valign="top">.04</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">General clinical knowledge</td><td align="left" valign="top" colspan="2">144 (37.3)</td><td align="left" valign="top">83.1</td><td align="left" valign="top">70.8</td><td align="left" valign="top">63.4-78.3</td><td align="left" valign="top">&#x2212;12.3</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Specific diseases</td><td align="left" valign="top" colspan="2">146 (37.8)</td><td align="left" valign="top">83.5</td><td align="left" valign="top">82.2</td><td align="left" valign="top">76.0-88.4</td><td align="left" valign="top">&#x2212;1.3</td><td align="left" valign="top">.67</td></tr><tr><td align="left" valign="top" colspan="9"><bold>Type</bold></td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">General</td><td align="left" valign="top" colspan="2">190 (49.2)</td><td align="left" valign="top">84.6</td><td align="left" valign="top">78.9</td><td align="left" valign="top">73.2-84.7</td><td align="left" valign="top">&#x2212;5.7</td><td align="left" valign="top">.03</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Clinical</td><td align="left" valign="top" colspan="2">149 (38.6)</td><td align="left" valign="top">84.1</td><td align="left" valign="top">77.2</td><td align="left" valign="top">70.4-83.0</td><td align="left" valign="top">&#x2212;6.9</td><td align="left" valign="top">.02</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Clinical sentence</td><td align="left" valign="top" colspan="2">47 (12.2)</td><td align="left" valign="top">88.5</td><td align="left" valign="top">78.7</td><td align="left" valign="top">67.0-90.4</td><td align="left" valign="top">&#x2212;9.8</td><td align="left" valign="top">.04</td></tr><tr><td align="left" valign="top" colspan="4"><bold>Imaging and table questions</bold></td><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Text only</td><td align="left" valign="top" colspan="2">252 (65.3)</td><td align="left" valign="top">84.9</td><td align="left" valign="top">84.5</td><td align="left" valign="top">80.1-89.0</td><td align="left" valign="top">&#x2212;0.4</td><td align="left" valign="top">.87</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">With images</td><td align="left" valign="top" colspan="2">114 (29.5)</td><td align="left" valign="top">85.0</td><td align="left" valign="top">71.9</td><td align="left" valign="top">63.7-80.2</td><td align="left" valign="top">&#x2212;13.1</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">With tables</td><td align="left" valign="top" colspan="2">20 (5.2)</td><td align="left" valign="top">83.9</td><td align="left" valign="top">35.0</td><td align="left" valign="top">14.1-55.9</td><td align="left" valign="top">&#x2212;48.9</td><td align="left" valign="top">&#x003C;.001</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>The correct response rates of human examinees are based on a survey of actual human examinees, reported by medu4, a preparatory school for the JMLE [<xref ref-type="bibr" rid="ref5">5</xref>].</p></fn></table-wrap-foot></table-wrap></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Results</title><p>Although ChatGPT-4V demonstrated proficiency in text-centric questions, the correct response rates were significantly lower for image and table-oriented questions. ChatGPT-4V may have poorer text comprehension skills compared to ChatGPT-4, even when image processing is not required [<xref ref-type="bibr" rid="ref7">7</xref>]. Additionally, a language bias may obscure the image context when interpreting images and texts simultaneously, potentially leading to an overreliance on prior text information, even when it contradicts the image context, a phenomenon called &#x201C;hallucination&#x201D; [<xref ref-type="bibr" rid="ref8">8</xref>]. These factors may have led to ChatGPT-4V&#x2019;s lower rate of correct responses to questions involving images.</p><p>Furthermore, responding to questions with tables requires interpreting the Japanese characters within the tables. OpenAI has verified that its GPT-4V model misrecognizes symbols, including image characters [<xref ref-type="bibr" rid="ref4">4</xref>]. Previous studies have noted that GPT-4V relies on text-based information rather than an analysis of tables when answering questions [<xref ref-type="bibr" rid="ref8">8</xref>]. In addition, the program&#x2019;s performance diminishes when interpreting characters in non-Latin languages [<xref ref-type="bibr" rid="ref9">9</xref>]. These factors may explain the observed decline in performance when interpreting tables containing Japanese characters.</p><p>The multimodal LLM GPT-4V is unreliable in interpreting information presented in image or tables, especially for medical purposes [<xref ref-type="bibr" rid="ref4">4</xref>]. Further development of the program is required for diagnostic applications.</p></sec><sec id="s4-2"><title>Limitations</title><p>This study has several limitations. First, different results may be obtained even when using the same methods owing to the inherent randomness of ChatGPT or version changes in ChatGPT. A report indicates that test results can vary with repeated responses from ChatGPT [<xref ref-type="bibr" rid="ref10">10</xref>]. Furthermore, when providing images to ChatGPT, we did not remove blank spaces, indicating that the quality of images sent to ChatGPT could also affect the outcomes. Second, the JMLE includes options that, if selected twice or more, will result in failure. However, these options are not publicly disclosed, making them unaccounted for in this study [<xref ref-type="bibr" rid="ref5">5</xref>]. Finally, although this study focused on ChatGPT, ongoing advancements in other multimodal LLMsshould also be considered.</p></sec><sec id="s4-3"><title>Conclusions</title><p>ChatGPT-4V successfully passed the 117th JMLE, demonstrating proficiency in handling including image- and table-based questions. However, more developments are needed to improve its ability to interpret tables. Further research should assess the safety and efficacy of ChatGPT-4V as a multimodal LLM in supporting medical practice, facilitating learning in clinical environments and advancing medical education.</p></sec></sec></body><back><ack><p>We would like to thank Dr Kota Sakaguchi, Shimane University Hospital, for his careful support throughout this study. We would also like to thank Dr Sanjay Saint, a professor at the University of Michigan, for his numerous contributions and support in this work.</p></ack><fn-group><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">ChatGPT-4V</term><def><p>ChatGPT 4 Vision</p></def></def-item><def-item><term id="abb2">JMLE</term><def><p>Japanese Medical Licensing Examination</p></def></def-item><def-item><term id="abb3">LLM</term><def><p>large language model</p></def></def-item><def-item><term id="abb4">USMLE</term><def><p>United States Medical Licensing Examination</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="web"><article-title>Introducing ChatGPT</article-title><source>OpenAI</source><year>2022</year><access-date>2023-11-30</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://openai.com/blog/chatgpt/">https://openai.com/blog/chatgpt/</ext-link></comment></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gilson</surname><given-names>A</given-names></name><name name-style="western"><surname>Safranek</surname><given-names>CW</given-names></name><name name-style="western"><surname>Huang</surname><given-names>T</given-names></name><etal/></person-group><article-title>How does ChatGPT perform on the United States Medical Licensing Examination? The implications of large language models for medical education and knowledge assessment</article-title><source>JMIR Med Educ</source><year>2023</year><month>02</month><day>8</day><volume>9</volume><fpage>e45312</fpage><pub-id pub-id-type="doi">10.2196/45312</pub-id><pub-id pub-id-type="medline">36753318</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Takagi</surname><given-names>S</given-names></name><name name-style="western"><surname>Watari</surname><given-names>T</given-names></name><name name-style="western"><surname>Erabi</surname><given-names>A</given-names></name><name name-style="western"><surname>Sakaguchi</surname><given-names>K</given-names></name></person-group><article-title>Performance of GPT-3.5 and GPT-4 on the Japanese Medical Licensing Examination: comparison study</article-title><source>JMIR Med Educ</source><year>2023</year><month>06</month><day>29</day><volume>9</volume><fpage>e48002</fpage><pub-id pub-id-type="doi">10.2196/48002</pub-id><pub-id pub-id-type="medline">37384388</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="web"><article-title>GPT-4V(Ision) system card</article-title><source>OpenAI</source><year>2023</year><access-date>2023-10-26</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://cdn.openai.com/papers/GPTV_System_Card.pdf">https://cdn.openai.com/papers/GPTV_System_Card.pdf</ext-link></comment></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="web"><article-title>Announcement of successful passage of the 117th National Medical Examination (Japanese) [Article in Japanese]</article-title><source>Ministry of Health</source><year>2023</year><access-date>2023-10-26</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.mhlw.go.jp/general/sikaku/successlist/2023/siken01/about.html">https://www.mhlw.go.jp/general/sikaku/successlist/2023/siken01/about.html</ext-link></comment></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="web"><article-title>Searching questions [Article in Japanese]</article-title><source>Medu4</source><year>2023</year><access-date>2023-10-26</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://medu4.com/quizzes/search">https://medu4.com/quizzes/search</ext-link></comment></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Wu</surname><given-names>Y</given-names></name><name name-style="western"><surname>Wang</surname><given-names>S</given-names></name><name name-style="western"><surname>Yang</surname><given-names>H</given-names></name><etal/></person-group><article-title>An early evaluation of GPT-4V(ision)</article-title><source>arXiv</source><access-date>2024-05-14</access-date><comment>Preprint posted online on  Oct 25, 2023</comment><comment><ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/2310.16534">https://arxiv.org/abs/2310.16534</ext-link></comment></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Liu</surname><given-names>F</given-names></name><name name-style="western"><surname>Lin</surname><given-names>K</given-names></name><name name-style="western"><surname>Li</surname><given-names>L</given-names></name><name name-style="western"><surname>Wang</surname><given-names>J</given-names></name><name name-style="western"><surname>Yacoob</surname><given-names>Y</given-names></name><name name-style="western"><surname>Wang</surname><given-names>L</given-names></name></person-group><article-title>Mitigating Hallucination in Large Multi-Modal Models via Robust Instruction Tuning</article-title><source>arXiv</source><access-date>2024-05-14</access-date><comment>Preprint posted online on  Jun 26, 2023</comment><comment><ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/2306.14565">https://arxiv.org/abs/2306.14565</ext-link></comment></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Shi</surname><given-names>Y</given-names></name><name name-style="western"><surname>Peng</surname><given-names>D</given-names></name><name name-style="western"><surname>Liao</surname><given-names>W</given-names></name><name name-style="western"><surname>Lin</surname><given-names>Z</given-names></name><name name-style="western"><surname>Chen</surname><given-names>X</given-names></name><name name-style="western"><surname>Liu</surname><given-names>C</given-names></name><etal/></person-group><article-title>Exploring OCR capabilities of GPT-4V(ision): a quantitative and in-depth evaluation</article-title><source>arXiv</source><access-date>2024-05-14</access-date><comment>Preprint posted online on  Oct 25, 2023</comment><comment><ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/2310.16809">https://arxiv.org/abs/2310.16809</ext-link></comment></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhu</surname><given-names>L</given-names></name><name name-style="western"><surname>Mou</surname><given-names>W</given-names></name><name name-style="western"><surname>Yang</surname><given-names>T</given-names></name><name name-style="western"><surname>Chen</surname><given-names>R</given-names></name></person-group><article-title>ChatGPT can pass the AHA exams: open-ended questions outperform multiple-choice format</article-title><source>Resuscitation</source><year>2023</year><month>07</month><volume>188</volume><fpage>109783</fpage><pub-id pub-id-type="doi">10.1016/j.resuscitation.2023.109783</pub-id><pub-id pub-id-type="medline">37349064</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Additional statistics.</p><media xlink:href="mededu_v10i1e54283_app1.docx" xlink:title="DOCX File, 2285 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Detailed methodology.</p><media xlink:href="mededu_v10i1e54283_app2.docx" xlink:title="DOCX File, 17 KB"/></supplementary-material></app-group></back></article>