<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JME</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Med Educ</journal-id>
      <journal-title>JMIR Medical Education</journal-title>
      <issn pub-type="epub">2369-3762</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v10i1e50869</article-id>
      <article-id pub-id-type="pmid">38175695</article-id>
      <article-id pub-id-type="doi">10.2196/50869</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Viewpoint</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Viewpoint</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Patients, Doctors, and Chatbots</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Venkatesh</surname>
            <given-names>Kaushik</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Mihalache</surname>
            <given-names>Andrew</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Patil</surname>
            <given-names>Nikhil</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Mircheva</surname>
            <given-names>Iskra</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Erren</surname>
            <given-names>Thomas C</given-names>
          </name>
          <degrees>MPH, MD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Institute and Policlinic for Occupational Medicine, Environmental Medicine and Prevention Research</institution>
            <institution>University Hospital of Cologne</institution>
            <institution>University of Cologne</institution>
            <addr-line>Berlin-Kölnische Allee 4</addr-line>
            <addr-line>Köln (Zollstock), 50937</addr-line>
            <country>Germany</country>
            <phone>49 022147876780</phone>
            <fax>49 022147876795</fax>
            <email>tim.erren@uni-koeln.de</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-7110-1031</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Institute and Policlinic for Occupational Medicine, Environmental Medicine and Prevention Research</institution>
        <institution>University Hospital of Cologne</institution>
        <institution>University of Cologne</institution>
        <addr-line>Köln (Zollstock)</addr-line>
        <country>Germany</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Thomas C Erren <email>tim.erren@uni-koeln.de</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2024</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>4</day>
        <month>1</month>
        <year>2024</year>
      </pub-date>
      <volume>10</volume>
      <elocation-id>e50869</elocation-id>
      <history>
        <date date-type="received">
          <day>14</day>
          <month>7</month>
          <year>2023</year>
        </date>
        <date date-type="rev-request">
          <day>14</day>
          <month>10</month>
          <year>2023</year>
        </date>
        <date date-type="rev-recd">
          <day>19</day>
          <month>10</month>
          <year>2023</year>
        </date>
        <date date-type="accepted">
          <day>8</day>
          <month>11</month>
          <year>2023</year>
        </date>
      </history>
      <copyright-statement>©Thomas C Erren. Originally published in JMIR Medical Education (https://mededu.jmir.org), 04.01.2024.</copyright-statement>
      <copyright-year>2024</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Medical Education, is properly cited. The complete bibliographic information, a link to the original publication on https://mededu.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://mededu.jmir.org/2024/1/e50869" xlink:type="simple"/>
      <abstract>
        <p>Medical advice is key to the relationship between doctor and patient. The question I will address is “how may chatbots affect the interaction between patients and doctors in regards to medical advice?”  I describe what lies ahead when using chatbots and identify questions galore for the daily work of doctors. I conclude with a gloomy outlook, expectations for the urgently needed ethical discourse, and a hope in relation to humans and machines.</p>
      </abstract>
      <kwd-group>
        <kwd>chatbot</kwd>
        <kwd>ChatGPT</kwd>
        <kwd>medical advice</kwd>
        <kwd>ethics</kwd>
        <kwd>patients</kwd>
        <kwd>doctors</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <disp-quote>
        <p>While I strive to provide accurate and helpful information, I am not a substitute for medical advice or professional judgment, and it’s always important for patients and healthcare providers to work together to develop a personalized treatment plan that takes into account a patient’s individual needs and circumstances.</p>
        <attrib>ChatGPT, 2023</attrib>
      </disp-quote>
      <p>Medical advice (MA) is key to the relationship between doctor and patient. The question I will address is “how may chatbots affect the interaction between patients and doctors in regards to medical advice?” To this end, I shall consider—and go beyond—what was recently outlined regarding MA in “A Conversation With ChatGPT” [<xref ref-type="bibr" rid="ref1">1</xref>].</p>
      <p>Advances in artificial intelligence (AI) and chatbots are changing the world, including medicine [<xref ref-type="bibr" rid="ref2">2</xref>-<xref ref-type="bibr" rid="ref4">4</xref>]. ChatGPT is a generative pretrained transformer model based on GPT-3 from OpenAI. Based on word correlations in its 175 billion–parameter database, ChatGPT floods us with meaningful but also nonsensical information.</p>
      <p>Concerning the interaction between patients, doctors, and chatbots, I describe what lies ahead when using chatbots and identify many questions for the daily work of doctors. I conclude with a gloomy outlook, expectations for urgently needed ethical discourse [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref6">6</xref>], and a hope in relation to humans and machines [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref7">7</xref>].</p>
    </sec>
    <sec>
      <title>Weighing ChatGPT’s Quote</title>
      <p>How ChatGPT describes its role [<xref ref-type="bibr" rid="ref1">1</xref>]—“I am not a substitute for medical advice”—should be a fact. Doctors, as the only authoritative providers of professional MA, must always be in the driver’s seat. Chatbots have the potential to help with the task of contributing general information to an information chain. Importantly, doctors need to review and question all AI output and see if and how it contributes to a patient’s understanding and fits within MA. Depending on the expectations and hopes that ChatGPT raises in patients, this task could become an unprecedented challenge.</p>
      <p>With their up-to-date knowledge and medical experience and expertise, doctors need to integrate personal, specific, and general information into their comprehensive MA to the patients. Chatbots are limited to general information stored in databases. Concerningly, ChatGPT invents facts, called a hallucination in AI [<xref ref-type="bibr" rid="ref3">3</xref>]. Moreover, ChatGPT can produce nonsensical or “bullshit” [<xref ref-type="bibr" rid="ref8">8</xref>] information, nicely worded and seemingly justified but disregarding truth and facts—disconcertingly, we do not readily know how often and when ChatGPT offers “bullshit” or nonsense responses.</p>
    </sec>
    <sec>
      <title>The Daily Work of Doctors: Question Galore</title>
      <p>Nevertheless, ChatGPT will be used by many simply because it is there and seemingly easy and, importantly, free to use.</p>
      <p>Is it, therefore, likely that we can do without chatbots? No, because society will not abandon ChatGPT or other advanced chatbot tools [<xref ref-type="bibr" rid="ref3">3</xref>]. The sooner we understand chatbot information for patients, the better. Realistically, ChatGPT is just the tip of an AI iceberg. The “Godfather of AI” [<xref ref-type="bibr" rid="ref9">9</xref>] Hinton and OpenAI’s chief executive officer Altman [<xref ref-type="bibr" rid="ref10">10</xref>] have warned forcefully about the speed, impact, and inevitability of AI developments.</p>
      <p>Doctors routinely deal with both informed and misinformed patients, fuelled by online health searches (eg, “Dr Google” [<xref ref-type="bibr" rid="ref11">11</xref>]). Indeed, the internet has become the starting point for many to ask questions about health, disrupting traditional doctor-patient relationships [<xref ref-type="bibr" rid="ref12">12</xref>] and leading to potential harm from online misinformation [<xref ref-type="bibr" rid="ref11">11</xref>]. Importantly, neither patients nor doctors should give away too much information when using AI. Even if MA could get better with more details, can we know if this information is being used beyond MA? Indeed, to what extent may creating MA be used as an AI Trojan horse to extract information for other purposes, including business benefits? Which biases go into AI-based medical information, for instance, through training data that neither represent the ethnicity nor the financial options of diverse patients? That medically advanced AI may become expensive raises questions of equity: who will have access to these technologies?</p>
      <p>What knowledge do doctors need to understand medical AI advice? How can AI-based medical information be used [<xref ref-type="bibr" rid="ref13">13</xref>], and how do you deal with medical information that AI cannot explain [<xref ref-type="bibr" rid="ref14">14</xref>]? Could doctors working with chatbot-provided diagnoses and AI-recommended treatments miss the true picture and become overreliant on AI? Who is liable when doctors use AI medical information, and to come full circle, who is liable when they do not [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref15">15</xref>]? Could there come a time when not considering AI such as ChatGPT constitutes less than adequate advice and nonstandard care [<xref ref-type="bibr" rid="ref15">15</xref>]? Doctors should ask their liability insurer how (ie, under what conditions) and to what extent the insurer covers the use, or nonuse, of AI in practice [<xref ref-type="bibr" rid="ref15">15</xref>].</p>
      <p>Key orientation for interactions between patients, doctors, and chatbots regarding MA can come from physicians’ professional organizations and the US Food and Drug Administration. Similar to practice guidelines [<xref ref-type="bibr" rid="ref15">15</xref>], recommendations and guardrails for practice-specific medical information via chatbots may have to be developed.</p>
    </sec>
    <sec>
      <title>A Gloomy Outlook, Expectations From Much-Needed Ethical Discourse, and a Hope in Relation to Humans and Machines</title>
      <p>That ChatGPT “strive(s) to provide accurate and helpful information” [<xref ref-type="bibr" rid="ref1">1</xref>] has a stale empirical aftertaste. In fact, according to OpenAI, advanced AI [<xref ref-type="bibr" rid="ref16">16</xref>] will make reviewing chatbot information even more difficult. GPT-4 (eg, in Microsoft Bing and ChatGPT Plus), with 571 times as many learned parameters as GPT-3, has “learned” to deliver incorrect work more convincingly than earlier models. Such mistakes will pose severe problems even if “[ChatGPT] admits these when challenged” [<xref ref-type="bibr" rid="ref1">1</xref>].</p>
      <p>PubMed-listed comparisons between GPT-3 and GPT-4 suggest that the latter may provide more accurate patient information in nuclear medicine [<xref ref-type="bibr" rid="ref17">17</xref>]. Another study suggested that both free and paid versions of ChatGPT risk providing misleading responses when used without expert MA [<xref ref-type="bibr" rid="ref18">18</xref>]. Chatbot medical information written at a college reading level suggested that such AI devices may be used supplementarily but not as a primary source for medical information [<xref ref-type="bibr" rid="ref19">19</xref>], emphasizing the doctor’s key role in MA. More research is needed on MA in numerous medical fields and settings, for numerous applications, and for various populations.</p>
      <p>Overall, when AI experts at the University of California, Berkeley explored and discussed the implications of ChatGPT and AI and future challenges in the spring of 2023, there was an explicit call for more ethical considerations [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref20">20</xref>]. Priority safety measures include strict regulations for patient privacy and ethical practices [<xref ref-type="bibr" rid="ref21">21</xref>]. While the questions above are not exhaustive, it is time to systematically answer them regarding MA and the unavoidable interaction of patients, doctors, and chatbots.</p>
      <p>Ultimately, we can only hope that the boundaries between humans and machines [<xref ref-type="bibr" rid="ref3">3</xref>] will never become so blurred that patients cannot distinguish the MA of a human doctor from the general information provided by ChatGPT [<xref ref-type="bibr" rid="ref22">22</xref>] or other AI.</p>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">MA</term>
          <def>
            <p>medical advice</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>TCE acknowledges stimulating working conditions as a visiting scholar at the University of California, Berkeley. Support is acknowledged for the article processing charge from the DFG (Deutsche Forschungsgemeinschaft / German Research Foundation, 491454339).</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Eysenbach</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>The role of ChatGPT, generative language models, and artificial intelligence in medical education: a conversation with ChatGPT and a call for papers</article-title>
          <source>JMIR Med Educ</source>
          <year>2023</year>
          <month>03</month>
          <day>06</day>
          <volume>9</volume>
          <fpage>e46885</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mededu.jmir.org/2023//e46885/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/46885</pub-id>
          <pub-id pub-id-type="medline">36863937</pub-id>
          <pub-id pub-id-type="pii">v9i1e46885</pub-id>
          <pub-id pub-id-type="pmcid">PMC10028514</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Haupt</surname>
              <given-names>CE</given-names>
            </name>
            <name name-style="western">
              <surname>Marks</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>AI-generated medical advice-GPT and beyond</article-title>
          <source>JAMA</source>
          <year>2023</year>
          <month>04</month>
          <day>25</day>
          <volume>329</volume>
          <issue>16</issue>
          <fpage>1349</fpage>
          <lpage>1350</lpage>
          <pub-id pub-id-type="doi">10.1001/jama.2023.5321</pub-id>
          <pub-id pub-id-type="medline">36972070</pub-id>
          <pub-id pub-id-type="pii">2803077</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shaw</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Morfeld</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Erren</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>The (mis)use of ChatGPT in science and education: Turing, Djerassi, "athletics" &#38; ethics</article-title>
          <source>EMBO Rep</source>
          <year>2023</year>
          <month>07</month>
          <day>05</day>
          <volume>24</volume>
          <issue>7</issue>
          <fpage>e57501</fpage>
          <pub-id pub-id-type="doi">10.15252/embr.202357501</pub-id>
          <pub-id pub-id-type="medline">37259767</pub-id>
          <pub-id pub-id-type="pmcid">PMC10328063</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Coghlan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Leins</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Sheldrick</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Cheong</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Gooding</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>D'Alfonso</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>To chat or bot to chat: ethical issues with using chatbots in mental health</article-title>
          <source>Digit Health</source>
          <year>2023</year>
          <volume>9</volume>
          <fpage>20552076231183542</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://journals.sagepub.com/doi/10.1177/20552076231183542?url_ver=Z39.88-2003&#38;rfr_id=ori:rid:crossref.org&#38;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/20552076231183542</pub-id>
          <pub-id pub-id-type="medline">37377565</pub-id>
          <pub-id pub-id-type="pii">10.1177_20552076231183542</pub-id>
          <pub-id pub-id-type="pmcid">PMC10291862</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Akerson</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Andazola</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Moore</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>DeCamp</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>More than just a pretty face? Nudging and bias in chatbots</article-title>
          <source>Ann Intern Med</source>
          <year>2023</year>
          <month>07</month>
          <volume>176</volume>
          <issue>7</issue>
          <fpage>997</fpage>
          <lpage>998</lpage>
          <pub-id pub-id-type="doi">10.7326/M23-0877</pub-id>
          <pub-id pub-id-type="medline">37276595</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Erren</surname>
              <given-names>TC</given-names>
            </name>
            <name name-style="western">
              <surname>Lewis</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Shaw</surname>
              <given-names>DM</given-names>
            </name>
          </person-group>
          <article-title>Brave (in a) new world: an ethical perspective on chatbots for medical advice</article-title>
          <source>Front Public Health</source>
          <year>2023</year>
          <volume>11</volume>
          <fpage>1254334</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37663854"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fpubh.2023.1254334</pub-id>
          <pub-id pub-id-type="medline">37663854</pub-id>
          <pub-id pub-id-type="pmcid">PMC10470018</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Turing</surname>
              <given-names>AM</given-names>
            </name>
          </person-group>
          <article-title>I.—Computing machinery and intelligence</article-title>
          <source>Mind</source>
          <year>1950</year>
          <month>10</month>
          <volume>LIX</volume>
          <issue>236</issue>
          <fpage>433</fpage>
          <lpage>460</lpage>
          <pub-id pub-id-type="doi">10.1093/mind/LIX.236.433</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Frankfurt</surname>
              <given-names>HG</given-names>
            </name>
          </person-group>
          <source>On Bullshit</source>
          <year>2005</year>
          <publisher-loc>Princeton, NJ</publisher-loc>
          <publisher-name>Princeton University Press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Metz</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>‘The Godfather of A.I.’ leaves google and warns of danger ahead</article-title>
          <source>The New York Times</source>
          <year>2023</year>
          <month>05</month>
          <day>02</day>
          <access-date>2023-10-18</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.nytimes.com/2023/05/01/technology/ai-google-chatbot-engineer-quits-hinton.html">https://www.nytimes.com/2023/05/01/technology/ai-google-chatbot-engineer-quits-hinton.html</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fung</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Mr. ChatGPT goes to Washington: OpenAI CEO Sam Altman testifies before Congress on AI risks</article-title>
          <source>CNN</source>
          <year>2023</year>
          <month>05</month>
          <day>16</day>
          <access-date>2023-10-18</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://edition.cnn.com/2023/05/16/tech/sam-altman-openai-congress/index.html">https://edition.cnn.com/2023/05/16/tech/sam-altman-openai-congress/index.html</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hyman</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>The risks of consulting Dr. Google</article-title>
          <source>Psychology Today</source>
          <year>2020</year>
          <month>04</month>
          <day>29</day>
          <access-date>2023-10-19</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.psychologytoday.com/us/blog/mental-mishaps/202004/the-risks-consulting-dr-google">https://www.psychologytoday.com/us/blog/mental-mishaps/202004/the-risks-consulting-dr-google</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Freckelton</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Internet disruptions in the doctor-patient relationship</article-title>
          <source>Med Law Rev</source>
          <year>2020</year>
          <month>08</month>
          <day>01</day>
          <volume>28</volume>
          <issue>3</issue>
          <fpage>502</fpage>
          <lpage>525</lpage>
          <pub-id pub-id-type="doi">10.1093/medlaw/fwaa008</pub-id>
          <pub-id pub-id-type="medline">32417891</pub-id>
          <pub-id pub-id-type="pii">5838319</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Pu</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>"I am chatbot, your virtual mental health adviser." What drives citizens' satisfaction and continuance intention toward mental health chatbots during the COVID-19 pandemic? An empirical study in China</article-title>
          <source>Digit Health</source>
          <year>2022</year>
          <volume>8</volume>
          <fpage>20552076221090031</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://journals.sagepub.com/doi/10.1177/20552076221090031?url_ver=Z39.88-2003&#38;rfr_id=ori:rid:crossref.org&#38;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/20552076221090031</pub-id>
          <pub-id pub-id-type="medline">35381977</pub-id>
          <pub-id pub-id-type="pii">10.1177_20552076221090031</pub-id>
          <pub-id pub-id-type="pmcid">PMC8971968</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Kaushal</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Khullar</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Should health care demand interpretable artificial intelligence or accept "Black Box" medicine?</article-title>
          <source>Ann Intern Med</source>
          <year>2020</year>
          <month>01</month>
          <day>07</day>
          <volume>172</volume>
          <issue>1</issue>
          <fpage>59</fpage>
          <lpage>60</lpage>
          <pub-id pub-id-type="doi">10.7326/M19-2548</pub-id>
          <pub-id pub-id-type="medline">31842204</pub-id>
          <pub-id pub-id-type="pii">2757636</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Price</surname>
              <given-names>WN</given-names>
            </name>
            <name name-style="western">
              <surname>Gerke</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Cohen</surname>
              <given-names>IG</given-names>
            </name>
          </person-group>
          <article-title>Potential liability for physicians using artificial intelligence</article-title>
          <source>JAMA</source>
          <year>2019</year>
          <month>11</month>
          <day>12</day>
          <volume>322</volume>
          <issue>18</issue>
          <fpage>1765</fpage>
          <lpage>1766</lpage>
          <pub-id pub-id-type="doi">10.1001/jama.2019.15064</pub-id>
          <pub-id pub-id-type="medline">31584609</pub-id>
          <pub-id pub-id-type="pii">2752750</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="web">
          <article-title>GPT-4 technical report</article-title>
          <source>OpenAI</source>
          <year>2023</year>
          <month>03</month>
          <day>27</day>
          <access-date>2023-10-18</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://cdn.openai.com/papers/gpt-4.pdf">https://cdn.openai.com/papers/gpt-4.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Currie</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Robbie</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Tually</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT and patient information in nuclear medicine: GPT-3.5 versus GPT-4</article-title>
          <source>J Nucl Med Technol</source>
          <year>2023</year>
          <month>12</month>
          <day>05</day>
          <volume>51</volume>
          <issue>4</issue>
          <fpage>307</fpage>
          <lpage>313</lpage>
          <pub-id pub-id-type="doi">10.2967/jnmt.123.266151</pub-id>
          <pub-id pub-id-type="medline">37699647</pub-id>
          <pub-id pub-id-type="pii">jnmt.123.266151</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Deiana</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Dettori</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Arghittu</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Azara</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Gabutti</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Castiglia</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence and public health: evaluating ChatGPT responses to vaccination myths and misconceptions</article-title>
          <source>Vaccines (Basel)</source>
          <year>2023</year>
          <month>07</month>
          <day>07</day>
          <volume>11</volume>
          <issue>7</issue>
          <fpage>1217</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://boris.unibe.ch/id/eprint/185126"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/vaccines11071217</pub-id>
          <pub-id pub-id-type="medline">37515033</pub-id>
          <pub-id pub-id-type="pii">vaccines11071217</pub-id>
          <pub-id pub-id-type="pmcid">PMC10386180</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pan</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Musheyev</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Bockelman</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Loeb</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kabarriti</surname>
              <given-names>AE</given-names>
            </name>
          </person-group>
          <article-title>Assessment of artificial intelligence chatbot responses to top searched queries about cancer</article-title>
          <source>JAMA Oncol</source>
          <year>2023</year>
          <month>10</month>
          <day>01</day>
          <volume>9</volume>
          <issue>10</issue>
          <fpage>1437</fpage>
          <lpage>1440</lpage>
          <pub-id pub-id-type="doi">10.1001/jamaoncol.2023.2947</pub-id>
          <pub-id pub-id-type="medline">37615960</pub-id>
          <pub-id pub-id-type="pii">2808733</pub-id>
          <pub-id pub-id-type="pmcid">PMC10450581</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Manke</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>AI lectures at Berkeley to explore possibilities, implications of ChatGPT</article-title>
          <source>Berkeley News</source>
          <year>2023</year>
          <month>03</month>
          <day>10</day>
          <access-date>2023-10-19</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://news.berkeley.edu/2023/03/10/ai-lectures-at-berkeley-to-explore-possibilities-implications-of-chatgpt/">https://news.berkeley.edu/2023/03/10/ai-lectures-at-berkeley-to-explore-possibilities-implications-of-chatgpt/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <collab>The Lancet</collab>
          </person-group>
          <article-title>AI in medicine: creating a safe and equitable future</article-title>
          <source>Lancet</source>
          <year>2023</year>
          <month>08</month>
          <day>12</day>
          <volume>402</volume>
          <issue>10401</issue>
          <fpage>503</fpage>
          <pub-id pub-id-type="doi">10.1016/S0140-6736(23)01668-9</pub-id>
          <pub-id pub-id-type="medline">37573071</pub-id>
          <pub-id pub-id-type="pii">S0140-6736(23)01668-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nov</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Singh</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Mann</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Putting ChatGPT's medical advice to the (Turing) test: survey study</article-title>
          <source>JMIR Med Educ</source>
          <year>2023</year>
          <month>07</month>
          <day>10</day>
          <volume>9</volume>
          <fpage>e46939</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mededu.jmir.org/2023//e46939/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/46939</pub-id>
          <pub-id pub-id-type="medline">37428540</pub-id>
          <pub-id pub-id-type="pii">v9i1e46939</pub-id>
          <pub-id pub-id-type="pmcid">PMC10366957</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
