<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JME</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Med Educ</journal-id>
      <journal-title>JMIR Medical Education</journal-title>
      <issn pub-type="epub">2369-3762</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v9i1e51199</article-id>
      <article-id pub-id-type="pmid">38153778</article-id>
      <article-id pub-id-type="doi">10.2196/51199</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Viewpoint</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Viewpoint</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Empathy and Equity: Key Considerations for Large Language Model Adoption in Health Care</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Venkatesh</surname>
            <given-names>Kaushik</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Tan</surname>
            <given-names>Si Ying</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Bizzo</surname>
            <given-names>Bernardo</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Cheng</surname>
            <given-names>Yih-Dih</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Zhu</surname>
            <given-names>Lingxuan</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Koranteng</surname>
            <given-names>Erica</given-names>
          </name>
          <degrees>MBChB, MBE</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-1332-0383</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Rao</surname>
            <given-names>Arya</given-names>
          </name>
          <degrees>BA</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-3007-4812</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Flores</surname>
            <given-names>Efren</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-1398-0426</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Lev</surname>
            <given-names>Michael</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-0236-7319</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author">
          <name name-style="western">
            <surname>Landman</surname>
            <given-names>Adam</given-names>
          </name>
          <degrees>MD, MIS, MHS, MS</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-2166-0521</ext-link>
        </contrib>
        <contrib id="contrib6" contrib-type="author">
          <name name-style="western">
            <surname>Dreyer</surname>
            <given-names>Keith</given-names>
          </name>
          <degrees>PhD, DO</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-1207-6443</ext-link>
        </contrib>
        <contrib id="contrib7" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Succi</surname>
            <given-names>Marc</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <address>
            <institution>Massachusetts General Hospital</institution>
            <addr-line>55 Fruit St</addr-line>
            <addr-line>Boston, 02114</addr-line>
            <country>United States</country>
            <phone>1 617 935 9144</phone>
            <email>msucci@mgh.harvard.edu</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-1518-3984</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Harvard Medical School</institution>
        <addr-line>Boston, MA</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Massachusetts General Hospital</institution>
        <addr-line>Boston</addr-line>
        <country>United States</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Marc Succi <email>msucci@mgh.harvard.edu</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2023</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>28</day>
        <month>12</month>
        <year>2023</year>
      </pub-date>
      <volume>9</volume>
      <elocation-id>e51199</elocation-id>
      <history>
        <date date-type="received">
          <day>24</day>
          <month>7</month>
          <year>2023</year>
        </date>
        <date date-type="rev-request">
          <day>28</day>
          <month>9</month>
          <year>2023</year>
        </date>
        <date date-type="rev-recd">
          <day>1</day>
          <month>10</month>
          <year>2023</year>
        </date>
        <date date-type="accepted">
          <day>14</day>
          <month>10</month>
          <year>2023</year>
        </date>
      </history>
      <copyright-statement>©Erica Koranteng, Arya Rao, Efren Flores, Michael Lev, Adam Landman, Keith Dreyer, Marc Succi. Originally published in JMIR Medical Education (https://mededu.jmir.org), 28.12.2023.</copyright-statement>
      <copyright-year>2023</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Medical Education, is properly cited. The complete bibliographic information, a link to the original publication on https://mededu.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://mededu.jmir.org/2023/1/e51199" xlink:type="simple"/>
      <abstract>
        <p>The growing presence of large language models (LLMs) in health care applications holds significant promise for innovative advancements in patient care. However, concerns about ethical implications and potential biases have been raised by various stakeholders. Here, we evaluate the ethics of LLMs in medicine along 2 key axes: empathy and equity. We outline the importance of these factors in novel models of care and develop frameworks for addressing these alongside LLM deployment.</p>
      </abstract>
      <kwd-group>
        <kwd>ChatGPT</kwd>
        <kwd>AI</kwd>
        <kwd>artificial intelligence</kwd>
        <kwd>large language models</kwd>
        <kwd>LLMs</kwd>
        <kwd>ethics</kwd>
        <kwd>empathy</kwd>
        <kwd>equity</kwd>
        <kwd>bias</kwd>
        <kwd>language model</kwd>
        <kwd>health care application</kwd>
        <kwd>patient care</kwd>
        <kwd>care</kwd>
        <kwd>development</kwd>
        <kwd>framework</kwd>
        <kwd>model</kwd>
        <kwd>ethical implication</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>The rapid proliferation of applications that leverage the ability of large language models (LLMs) to use large amounts of complex information to find relevant patterns and apply them to novel use cases promises great innovation in health care and many other sectors. Many health care applications, such as clinical decision support, patient education, electronic health records (EHRs), and workflow optimization, have been proposed [<xref ref-type="bibr" rid="ref1">1</xref>]. Despite the immense potential advantages of this technology, various key stakeholders have raised concerns regarding its ethical implications and potential perpetuation of existing biases and structural barriers [<xref ref-type="bibr" rid="ref2">2</xref>-<xref ref-type="bibr" rid="ref6">6</xref>]. Furthermore, its growing usage in the health care setting also raises the concern of transparency or disclosure about its use and role in patient management. Ethically incorporating LLMs into health care delivery requires honest dialogue about the principles we aim to uphold in patient care and a comprehensive analysis of the various ways in which LLMs could bolster or impair these.</p>
      <p>Studies have demonstrated the utility of LLMs as a clinical decision support tool in various settings, including in triage, diagnostics, and treatment [<xref ref-type="bibr" rid="ref7">7</xref>-<xref ref-type="bibr" rid="ref11">11</xref>]. While LLMs show great promise in improving the efficiency of clinical workflows, they lack one key facet of physician-patient encounters: empathy. Though LLMs can be trained to use empathetic language [<xref ref-type="bibr" rid="ref12">12</xref>] and have been able to use empathetic language in patient interactions [<xref ref-type="bibr" rid="ref13">13</xref>], this concept of artificial empathy is easily distinguishable from real empathy from a patient’s perspective, and real empathy matters to patients [<xref ref-type="bibr" rid="ref14">14</xref>]. The concept of artificial empathy, which aims to imbue artificial intelligence (AI) with human-like empathy, ought not to be considered interchangeable with human empathy. Efforts made to design artificial empathy, while commendable, should aim to be complementary to human empathy in order to avoid further isolating patients in their time of need by destroying the therapeutic alliance between patients and physicians [<xref ref-type="bibr" rid="ref15">15</xref>]. Loneliness is one of the key public health crises of our time, and conflating technology with human-to-human interaction will only exacerbate this [<xref ref-type="bibr" rid="ref16">16</xref>]. Empathic care for patients should be one of the core mandates of the health care sector, and true empathy requires human connection. Therefore, while LLMs show great promise in clinical workflows, they should augment, rather than replace, physician-led care (<xref ref-type="table" rid="table1">Table 1</xref>).</p>
      <p>In addition to empathy, equity is crucial in novel models of care. The current most popular LLMs, including ChatGPT, Bard, Med-PaLM, and others, are trained on vast sources of data, including wide swaths of the internet. These sources are rife with inherent biases and lack transparency regarding the contents of the training data sets. They also lack specific evaluation of model biases, which may be harbingers of ethical dilemmas via the rapid incorporation of LLMs into clinical spaces. While there is little consensus regarding the degree of bias in current LLMs, in most embedding models, which have similar underlying architecture, there is evidence of racial, gender, and age bias [<xref ref-type="bibr" rid="ref17">17</xref>]. LLMs have been demonstrated to associate negative terms with given names that are popular among the African American as well as with the masculine poles of most gender axes [<xref ref-type="bibr" rid="ref17">17</xref>]. Until systematic evaluation of LLMs is performed in clinical use cases to understand and mitigate biases against vulnerable demographics, careful risk-benefit calculations and a regulatory framework should be implemented by relevant governing bodies before LLMs are permitted in clinical care. This framework must ensure that these models are improving health care delivery and outcomes for all. Importantly, the US Food and Drug Administration lacks a robust authorization pathway for software as a medical device; this in itself is challenging, and given the rapid development of LLMs, would benefit from expeditious guidelines [<xref ref-type="bibr" rid="ref18">18</xref>] (see <xref ref-type="table" rid="table2">Table 2</xref> for proactive measures to ensure the equitable incorporation of LLMs into health care). Following a previously published ethical framework for integrating innovative domains into medicine, we suggest an LLM framework guided by Blythe et al [<xref ref-type="bibr" rid="ref19">19</xref>] grounded in principled primary motivations as detailed in <xref ref-type="table" rid="table1">Tables 1</xref> and <xref ref-type="table" rid="table2">2</xref>.</p>
      <p>Despite these ethical risks, the potential benefits of incorporating LLMs into health care are numerous. LLMs are adept at quickly synthesizing large amounts of complex data, which can form the basis for numerous applications in the health care sector, including the management and interpretation of EHRs and clinical notes, adjuncts for patient visits (eg, encounter transcription and patient translation), billing for medical services, patient education, and more [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref21">21</xref>]. Thus, the key ethical question at hand is as follows: do the benefits outweigh the risks?</p>
      <p>From a utilitarian perspective, we must consider this question to not only enhance decision-making but also take advantage of opportunities to mitigate potential harms. Proposals for the incorporation of a systematized, frequently reevaluated method of bias evaluation into clinical applications of LLMs [<xref ref-type="bibr" rid="ref3">3</xref>], the addition of human verification steps at both the input and output stages for LLM-guided generation of clinical texts [<xref ref-type="bibr" rid="ref22">22</xref>], and the implementation of self-questioning—a novel prompting strategy that encourages prompt iteration to improve accuracy in a medical context—are all steps in the correct direction. Comprehensive frameworks that include the use of diverse training data sources and continuous evaluation of bias, such as those proposed by the World Economic Forum and the Coalition for Health AI, can provide useful guardrails as new proposals for ethical validation and have been tested [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref24">24</xref>]. Furthermore, ensuring that physicians are actively involved in the development and evaluation of LLMs for health care is essential in keeping with a physician-led approach. Strategies such as these are key in navigating the ethics of empathy and equity in the development of novel clinical technologies.</p>
      <p>It is essential to approach the ethical conundrums of LLM adoption in clinical care with a balanced perspective. LLMs that were built on data with inherent systemic biases must be implemented strategically into health care through a justice-oriented innovation lens to advance health equity. To keep pace with the accelerated adoption of LLMs in the clinic, ethical evaluations should be conducted together with an evaluation of use case efficacy to ensure both efficient and ethical health care. A complete assessment of the risks and benefits associated with this technology—an admittedly challenging task—may remain elusive if not tested in real-world settings. Clinical use cases of LLMs are already being tested; delaying collaboration among all stakeholders, including health care professionals, ethicists, AI researchers, and (crucially) patients, will only delay the discovery of potential harms. Real-world pilots, therefore, should be deployed alongside regular monitoring, oversight, and feedback from all parties. As we collectively seek to make full use of this exciting new technology, we must keep empathy and equity at the forefront of our minds.</p>
      <table-wrap position="float" id="table1">
        <label>Table 1</label>
        <caption>
          <p>Approaches to the incorporation of large language models (LLMs) in clinical care.</p>
        </caption>
        <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
          <col width="340"/>
          <col width="330"/>
          <col width="330"/>
          <thead>
            <tr valign="top">
              <td>Approach</td>
              <td>Primary motivation</td>
              <td>Impact on empathy and health equity</td>
            </tr>
          </thead>
          <tbody>
            <tr valign="top">
              <td>LLM-led clinical care or patient-facing LLMs</td>
              <td>Advancement-driven: incorporation of new and sophisticated technologies mainly aimed at improving efficiency</td>
              <td>
                <list list-type="bullet">
                  <list-item>
                    <p>Perpetuates and exacerbates inequities and biases on which it was built, making it. detrimental to achieving health equity</p>
                  </list-item>
                  <list-item>
                    <p>Replaces human empathy with artificial empathy, which threatens patient dignity</p>
                  </list-item>
                </list>
              </td>
            </tr>
            <tr valign="top">
              <td>Physician-led LLM incorporation in clinical care</td>
              <td>Holistic, equitable, and empathetic health care delivery</td>
              <td>
                <list list-type="bullet">
                  <list-item>
                    <p>Early recognition of ways in which models perpetuate inequity and appropriate measures to prevent this</p>
                  </list-item>
                  <list-item>
                    <p>Opportunity to actively leverage LLMs to mitigate existing inequities</p>
                  </list-item>
                  <list-item>
                    <p>Use of LLMs as tools in a physician’s toolkit allows more time to engage in empathetic dialogue with patients</p>
                  </list-item>
                </list>
              </td>
            </tr>
          </tbody>
        </table>
      </table-wrap>
      <table-wrap position="float" id="table2">
        <label>Table 2</label>
        <caption>
          <p>Potential proactive measures for promoting equitable incorporation of large language models (LLMs) into clinical care.</p>
        </caption>
        <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
          <col width="300"/>
          <col width="700"/>
          <thead>
            <tr valign="top">
              <td>Stakeholder</td>
              <td>Examples of proactive measures</td>
            </tr>
          </thead>
          <tbody>
            <tr valign="top">
              <td>Regulatory bodies</td>
              <td>
                <list list-type="bullet">
                  <list-item>
                    <p>Development of robust regulations for software as a medical device that ensure appropriate strategies for (1) continuous evaluation of evolving technology and (2) assessment of use cases that have significant impact in health care given the broad capabilities of LLMs</p>
                  </list-item>
                </list>
              </td>
            </tr>
            <tr valign="top">
              <td>Professional societies</td>
              <td>
                <list list-type="bullet">
                  <list-item>
                    <p>Development and continuous updates of guidelines for equitable use of LLMs in health care</p>
                  </list-item>
                  <list-item>
                    <p>Allocation of grant funding toward projects that aim to use LLMs to ameliorate inequities</p>
                  </list-item>
                </list>
              </td>
            </tr>
            <tr valign="top">
              <td>Journals</td>
              <td>
                <list list-type="bullet">
                  <list-item>
                    <p>Prioritizing publications that focus on (1) novel methods of leveraging LLMs for equitable care delivery and (2) comparisons of use cases of LLMs for equitable care delivery</p>
                  </list-item>
                </list>
              </td>
            </tr>
            <tr valign="top">
              <td>Software developers and industry</td>
              <td>
                <list list-type="bullet">
                  <list-item>
                    <p>Collaboration with health care workers on model improvement strategies that improve health equity</p>
                  </list-item>
                </list>
              </td>
            </tr>
          </tbody>
        </table>
      </table-wrap>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">EHR</term>
          <def>
            <p>electronic health record</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">LLM</term>
          <def>
            <p>large language model</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>This project was supported in part by an award from the National Institute of General Medical Sciences (T32GM144273). The content is solely the responsibility of the authors and does not necessarily represent the official views of the National Institute of General Medical Sciences or the National Institutes of Health.</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>EF is co-chair of the Radiological Society of North America (RSNA) Health Equity Committee; associate editor and editorial board member of the Journal of the American College of Radiology (JACR); has received speaker honoraria for academic Grand Rounds, from WebMD and from GO2 for Lung Cancer foundation; GO2 Foundation Travel support; grant funding from NCI K08 1K08CA270430-01A1. ML is a consultant for GE Healthcare and for Takeda, Roche, and SeaGen Pharma. AL is a consultant for the Abbott Medical Device Cybersecurity Council.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dave</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Athaluri</surname>
              <given-names>SA</given-names>
            </name>
            <name name-style="western">
              <surname>Singh</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT in medicine: an overview of its applications, advantages, limitations, future prospects, and ethical considerations</article-title>
          <source>Front Artif Intell</source>
          <year>2023</year>
          <month>5</month>
          <day>4</day>
          <volume>6</volume>
          <fpage>1169595</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37215063"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/frai.2023.1169595</pub-id>
          <pub-id pub-id-type="medline">37215063</pub-id>
          <pub-id pub-id-type="pmcid">PMC10192861</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rozado</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Wide range screening of algorithmic bias in word embedding models using large sentiment lexicons reveals underreported bias types</article-title>
          <source>PLoS One</source>
          <year>2020</year>
          <month>4</month>
          <day>21</day>
          <volume>15</volume>
          <issue>4</issue>
          <fpage>e0231189</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.plos.org/10.1371/journal.pone.0231189"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pone.0231189</pub-id>
          <pub-id pub-id-type="medline">32315320</pub-id>
          <pub-id pub-id-type="pii">PONE-D-19-18498</pub-id>
          <pub-id pub-id-type="pmcid">PMC7173861</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Garrido-Muñoz </surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Montejo-Ráez </surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Martínez-Santiago </surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Ureña-López </surname>
              <given-names>LA</given-names>
            </name>
          </person-group>
          <article-title>A survey on bias in deep NLP</article-title>
          <source>Appl Sci</source>
          <year>2021</year>
          <month>04</month>
          <day>02</day>
          <volume>11</volume>
          <issue>7</issue>
          <fpage>3184</fpage>
          <pub-id pub-id-type="doi">10.3390/app11073184</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Jia</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Wei</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Vosoughi</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Quantifying and alleviating political bias in language models</article-title>
          <source>Artificial Intelligence</source>
          <year>2022</year>
          <month>03</month>
          <volume>304</volume>
          <fpage>103654</fpage>
          <pub-id pub-id-type="doi">10.1016/j.artint.2021.103654</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Moon</surname>
              <given-names>JT</given-names>
            </name>
            <name name-style="western">
              <surname>Purkayastha</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Celi</surname>
              <given-names>LA</given-names>
            </name>
            <name name-style="western">
              <surname>Trivedi</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Gichoya</surname>
              <given-names>JW</given-names>
            </name>
          </person-group>
          <article-title>Ethics of large language models in medicine and medical research</article-title>
          <source>Lancet Digital Health</source>
          <year>2023</year>
          <month>06</month>
          <volume>5</volume>
          <issue>6</issue>
          <fpage>e333</fpage>
          <lpage>e335</lpage>
          <pub-id pub-id-type="doi">10.1016/s2589-7500(23)00083-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Meskó</surname>
              <given-names>Bertalan</given-names>
            </name>
            <name name-style="western">
              <surname>Topol</surname>
              <given-names>EJ</given-names>
            </name>
          </person-group>
          <article-title>The imperative for regulatory oversight of large language models (or generative AI) in healthcare</article-title>
          <source>NPJ Digit Med</source>
          <year>2023</year>
          <month>07</month>
          <day>06</day>
          <volume>6</volume>
          <issue>1</issue>
          <fpage>120</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41746-023-00873-0"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41746-023-00873-0</pub-id>
          <pub-id pub-id-type="medline">37414860</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41746-023-00873-0</pub-id>
          <pub-id pub-id-type="pmcid">PMC10326069</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rao</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kamineni</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Pang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lie</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Dreyer</surname>
              <given-names>KJ</given-names>
            </name>
            <name name-style="western">
              <surname>Succi</surname>
              <given-names>MD</given-names>
            </name>
          </person-group>
          <article-title>Evaluating GPT as an adjunct for radiologic decision making: GPT-4 versus GPT-3.5 in a breast imaging pilot</article-title>
          <source>J Am Coll Radiol</source>
          <year>2023</year>
          <month>10</month>
          <volume>20</volume>
          <issue>10</issue>
          <fpage>990</fpage>
          <lpage>997</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jacr.2023.05.003</pub-id>
          <pub-id pub-id-type="medline">37356806</pub-id>
          <pub-id pub-id-type="pii">S1546-1440(23)00394-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rao</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kamineni</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Pang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lie</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Succi</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Evaluating ChatGPT as an adjunct for radiologic decision-making</article-title>
          <source>medRxiv</source>
          <comment>Preprint posted online February 7, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1101/2023.02.02.23285399"/>
          </comment>
          <pub-id pub-id-type="doi">10.1101/2023.02.02.23285399</pub-id>
          <pub-id pub-id-type="medline">36798292</pub-id>
          <pub-id pub-id-type="pii">2023.02.02.23285399</pub-id>
          <pub-id pub-id-type="pmcid">PMC9934725</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rao</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Pang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kamineni</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lie</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Prasad</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Landman</surname>
              <given-names>Adam</given-names>
            </name>
            <name name-style="western">
              <surname>Dreyer</surname>
              <given-names>Keith J</given-names>
            </name>
            <name name-style="western">
              <surname>Succi</surname>
              <given-names>Marc D</given-names>
            </name>
          </person-group>
          <article-title>Assessing the utility of ChatGPT throughout the entire clinical workflow</article-title>
          <source>medRxiv</source>
          <comment>Preprint posted online February 26, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1101/2023.02.21.23285886"/>
          </comment>
          <pub-id pub-id-type="doi">10.1101/2023.02.21.23285886</pub-id>
          <pub-id pub-id-type="medline">36865204</pub-id>
          <pub-id pub-id-type="pii">2023.02.21.23285886</pub-id>
          <pub-id pub-id-type="pmcid">PMC9980239</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Varney</surname>
              <given-names>ET</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>CI</given-names>
            </name>
          </person-group>
          <article-title>The potential for using ChatGPT to improve imaging appropriateness</article-title>
          <source>J Am Coll Radiol</source>
          <year>2023</year>
          <month>10</month>
          <volume>20</volume>
          <issue>10</issue>
          <fpage>988</fpage>
          <lpage>989</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jacr.2023.06.005</pub-id>
          <pub-id pub-id-type="medline">37400048</pub-id>
          <pub-id pub-id-type="pii">S1546-1440(23)00474-X</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chonde</surname>
              <given-names>DB</given-names>
            </name>
            <name name-style="western">
              <surname>Pourvaziri</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Williams</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>McGowan</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Moskos</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Alvarez</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Narayan</surname>
              <given-names>AK</given-names>
            </name>
            <name name-style="western">
              <surname>Daye</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Flores</surname>
              <given-names>EJ</given-names>
            </name>
            <name name-style="western">
              <surname>Succi</surname>
              <given-names>MD</given-names>
            </name>
          </person-group>
          <article-title>RadTranslate: an artificial intelligence-powered intervention for urgent imaging to enhance care equity for patients with limited English proficiency during the COVID-19 pandemic</article-title>
          <source>J Am Coll Radiol</source>
          <year>2021</year>
          <month>07</month>
          <volume>18</volume>
          <issue>7</issue>
          <fpage>1000</fpage>
          <lpage>1008</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/33609456"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.jacr.2021.01.013</pub-id>
          <pub-id pub-id-type="medline">33609456</pub-id>
          <pub-id pub-id-type="pii">S1546-1440(21)00032-6</pub-id>
          <pub-id pub-id-type="pmcid">PMC7847389</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sharma</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>IW</given-names>
            </name>
            <name name-style="western">
              <surname>Miner</surname>
              <given-names>AS</given-names>
            </name>
            <name name-style="western">
              <surname>Atkins</surname>
              <given-names>DC</given-names>
            </name>
            <name name-style="western">
              <surname>Althoff</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Human–AI collaboration enables more empathic conversations in text-based peer-to-peer mental health support</article-title>
          <source>Nat Mach Intell</source>
          <year>2023</year>
          <month>01</month>
          <day>23</day>
          <volume>5</volume>
          <issue>1</issue>
          <fpage>46</fpage>
          <lpage>57</lpage>
          <pub-id pub-id-type="doi">10.1038/s42256-022-00593-2</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ayers</surname>
              <given-names>JW</given-names>
            </name>
            <name name-style="western">
              <surname>Poliak</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Dredze</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Leas</surname>
              <given-names>EC</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Kelley</surname>
              <given-names>JB</given-names>
            </name>
            <name name-style="western">
              <surname>Faix</surname>
              <given-names>DJ</given-names>
            </name>
            <name name-style="western">
              <surname>Goodman</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Longhurst</surname>
              <given-names>CA</given-names>
            </name>
            <name name-style="western">
              <surname>Hogarth</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Smith</surname>
              <given-names>DM</given-names>
            </name>
          </person-group>
          <article-title>Comparing physician and artificial intelligence chatbot responses to patient questions posted to a public social media forum</article-title>
          <source>JAMA Intern Med</source>
          <year>2023</year>
          <month>06</month>
          <day>01</day>
          <volume>183</volume>
          <issue>6</issue>
          <fpage>589</fpage>
          <lpage>596</lpage>
          <pub-id pub-id-type="doi">10.1001/jamainternmed.2023.1838</pub-id>
          <pub-id pub-id-type="medline">37115527</pub-id>
          <pub-id pub-id-type="pii">2804309</pub-id>
          <pub-id pub-id-type="pmcid">PMC10148230</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Guidi</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Traversa</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Empathy in patient care: from 'Clinical Empathy' to 'Empathic Concern'</article-title>
          <source>Med Health Care Philos</source>
          <year>2021</year>
          <month>12</month>
          <day>01</day>
          <volume>24</volume>
          <issue>4</issue>
          <fpage>573</fpage>
          <lpage>585</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/34196934"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s11019-021-10033-4</pub-id>
          <pub-id pub-id-type="medline">34196934</pub-id>
          <pub-id pub-id-type="pii">10.1007/s11019-021-10033-4</pub-id>
          <pub-id pub-id-type="pmcid">PMC8557158</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Smoktunowicz</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Barak</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Andersson</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Banos</surname>
              <given-names>RM</given-names>
            </name>
            <name name-style="western">
              <surname>Berger</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Botella</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Dear</surname>
              <given-names>BF</given-names>
            </name>
            <name name-style="western">
              <surname>Donker</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Ebert</surname>
              <given-names>DD</given-names>
            </name>
            <name name-style="western">
              <surname>Hadjistavropoulos</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Hodgins</surname>
              <given-names>DC</given-names>
            </name>
            <name name-style="western">
              <surname>Kaldo</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Mohr</surname>
              <given-names>DC</given-names>
            </name>
            <name name-style="western">
              <surname>Nordgreen</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Powers</surname>
              <given-names>MB</given-names>
            </name>
            <name name-style="western">
              <surname>Riper</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Ritterband</surname>
              <given-names>LM</given-names>
            </name>
            <name name-style="western">
              <surname>Rozental</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Schueller</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Titov</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Weise</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Carlbring</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Consensus statement on the problem of terminology in psychological interventions using the internet or digital components</article-title>
          <source>Internet Interv</source>
          <year>2020</year>
          <month>09</month>
          <volume>21</volume>
          <fpage>100331</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://boris.unibe.ch/id/eprint/146055"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.invent.2020.100331</pub-id>
          <pub-id pub-id-type="medline">32577404</pub-id>
          <pub-id pub-id-type="pii">S2214-7829(20)30013-0</pub-id>
          <pub-id pub-id-type="pmcid">PMC7305336</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jaffe</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>US Surgeon General: loneliness is a public health crisis</article-title>
          <source>The Lancet</source>
          <year>2023</year>
          <month>05</month>
          <volume>401</volume>
          <issue>10388</issue>
          <fpage>1560</fpage>
          <pub-id pub-id-type="doi">10.1016/s0140-6736(23)00957-1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nadeem</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Bethke</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Reddy</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>StereoSet: Measuring stereotypical bias in pretrained language models</article-title>
          <year>2021</year>
          <conf-name>59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing</conf-name>
          <conf-date>2021</conf-date>
          <conf-loc>Online</conf-loc>
          <fpage>5356</fpage>
          <lpage>5371</lpage>
          <pub-id pub-id-type="doi">10.18653/v1/2021.acl-long.416</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dortche</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>McCarthy</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Banbury</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Yannatos</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Promoting health equity through improved regulation of artificial intelligence medical devices</article-title>
          <source>JSPG</source>
          <year>2023</year>
          <month>1</month>
          <day>23</day>
          <volume>21</volume>
          <issue>03</issue>
          <pub-id pub-id-type="doi">10.38126/JSPG210302</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Blythe</surname>
              <given-names>JA</given-names>
            </name>
            <name name-style="western">
              <surname>Flores</surname>
              <given-names>EJ</given-names>
            </name>
            <name name-style="western">
              <surname>Succi</surname>
              <given-names>MD</given-names>
            </name>
          </person-group>
          <article-title>Justice and innovation in radiology</article-title>
          <source>J Am Coll Radiol</source>
          <year>2023</year>
          <month>07</month>
          <volume>20</volume>
          <issue>7</issue>
          <fpage>667</fpage>
          <lpage>670</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jacr.2023.05.005</pub-id>
          <pub-id pub-id-type="medline">37315912</pub-id>
          <pub-id pub-id-type="pii">S1546-1440(23)00400-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>LY</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>XC</given-names>
            </name>
            <name name-style="western">
              <surname>Nejatian</surname>
              <given-names>NP</given-names>
            </name>
            <name name-style="western">
              <surname>Nasir-Moin</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Abidin</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Eaton</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Riina</surname>
              <given-names>HA</given-names>
            </name>
            <name name-style="western">
              <surname>Laufer</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Punjabi</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Miceli</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>NC</given-names>
            </name>
            <name name-style="western">
              <surname>Orillac</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Schnurman</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Livia</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Weiss</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Kurland</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Neifert</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Dastagirzada</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Kondziolka</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Cheung</surname>
              <given-names>ATM</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Cao</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Flores</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Costa</surname>
              <given-names>AB</given-names>
            </name>
            <name name-style="western">
              <surname>Aphinyanaphongs</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Cho</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Oermann</surname>
              <given-names>EK</given-names>
            </name>
          </person-group>
          <article-title>Health system-scale language models are all-purpose prediction engines</article-title>
          <source>Nature</source>
          <year>2023</year>
          <month>07</month>
          <day>07</day>
          <volume>619</volume>
          <issue>7969</issue>
          <fpage>357</fpage>
          <lpage>362</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37286606"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41586-023-06160-y</pub-id>
          <pub-id pub-id-type="medline">37286606</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41586-023-06160-y</pub-id>
          <pub-id pub-id-type="pmcid">PMC10338337</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ali</surname>
              <given-names>SR</given-names>
            </name>
            <name name-style="western">
              <surname>Dobbs</surname>
              <given-names>TD</given-names>
            </name>
            <name name-style="western">
              <surname>Hutchings</surname>
              <given-names>HA</given-names>
            </name>
            <name name-style="western">
              <surname>Whitaker</surname>
              <given-names>IS</given-names>
            </name>
          </person-group>
          <article-title>Using ChatGPT to write patient clinic letters</article-title>
          <source>Lancet Digital Health</source>
          <year>2023</year>
          <month>04</month>
          <volume>5</volume>
          <issue>4</issue>
          <fpage>e179</fpage>
          <lpage>e181</lpage>
          <pub-id pub-id-type="doi">10.1016/s2589-7500(23)00048-1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Singh</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Djalilian</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ali</surname>
              <given-names>MJ</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT and ophthalmology: exploring its potential with discharge summaries and operative notes</article-title>
          <source>Semin Ophthalmol</source>
          <year>2023</year>
          <month>07</month>
          <day>03</day>
          <volume>38</volume>
          <issue>5</issue>
          <fpage>503</fpage>
          <lpage>507</lpage>
          <pub-id pub-id-type="doi">10.1080/08820538.2023.2209166</pub-id>
          <pub-id pub-id-type="medline">37133418</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="web">
          <article-title>A Blueprint for Equity and Inclusion in Artificial Intelligence 2022</article-title>
          <source>World Economic Forum</source>
          <access-date>2023-11-14</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.weforum.org/whitepapers/a-blueprint-for-equity-and-inclusion-in-artificial-intelligence/">https://www.weforum.org/whitepapers/a-blueprint-for-equity-and-inclusion-in-artificial-intelligence/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="web">
          <article-title>Blueprint for Trustworthy AI Implementation Guidance and Assurance for Healthcare 2023</article-title>
          <source>Coalition for Health AI</source>
          <access-date>2023-11-14</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.coalitionforhealthai.org/papers/blueprint-for-trustworthy-ai_V1.0.pdf">https://www.coalitionforhealthai.org/papers/blueprint-for-trustworthy-ai_V1.0.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
