<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Med Educ</journal-id><journal-id journal-id-type="publisher-id">mededu</journal-id><journal-id journal-id-type="index">20</journal-id><journal-title>JMIR Medical Education</journal-title><abbrev-journal-title>JMIR Med Educ</abbrev-journal-title><issn pub-type="epub">2369-3762</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v12i1e85228</article-id><article-id pub-id-type="doi">10.2196/85228</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Understanding Clinicians&#x2019; Informational Needs for AI-Driven Clinical Decision Support Systems: Qualitative Interview Study</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Mingels</surname><given-names>Simone</given-names></name><degrees>MSc</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Piehl</surname><given-names>Hannah</given-names></name><degrees>MA</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Therrien</surname><given-names>Madeline</given-names></name><degrees>MSc</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Akhmad</surname><given-names>Ekaterina</given-names></name><degrees>MSc</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>van Hienen</surname><given-names>Anniek</given-names></name><degrees>MSc</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>van Soest</surname><given-names>Johan</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Hochstenbach</surname><given-names>Laura</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Dekker</surname><given-names>Andre</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Damman</surname><given-names>Olga</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff4">4</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Fijten</surname><given-names>Rianne</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib></contrib-group><aff id="aff1"><institution>Department of Radiation Oncology (Maastro), GROW Research Institute for Oncology and Reproduction, Maastricht University Medical Centre+, Maastricht University</institution><addr-line>Paul Henri Spaaklaan 1</addr-line><addr-line>Maastricht</addr-line><country>The Netherlands</country></aff><aff id="aff2"><institution>Brightlands Institute for Smart Society (BISS), Faculty of Science and Engineering, Maastricht University</institution><addr-line>Heerlen</addr-line><country>The Netherlands</country></aff><aff id="aff3"><institution>Department of Health Services Research, Care and Public Health Research Institute (CAPHRI), Maastricht University</institution><addr-line>Maastricht</addr-line><country>The Netherlands</country></aff><aff id="aff4"><institution>Department of Public and Occupational Health and Amsterdam Public Health Research Institute, Quality of Care, Amsterdam UMC location VUmc</institution><addr-line>Amsterdam</addr-line><country>The Netherlands</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Stone</surname><given-names>Alicia</given-names></name></contrib><contrib contrib-type="editor"><name name-style="western"><surname>Leung</surname><given-names>Tiffany</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Chuang</surname><given-names>Elizabeth</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Bai</surname><given-names>Lu</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Simone Mingels, MSc, Department of Radiation Oncology (Maastro), GROW Research Institute for Oncology and Reproduction, Maastricht University Medical Centre+, Maastricht University, Paul Henri Spaaklaan 1, Maastricht, 6229 EN, The Netherlands, +31 (0)43 38 81863; <email>simone.mingels@maastrichtuniversity.nl</email></corresp></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>12</day><month>3</month><year>2026</year></pub-date><volume>12</volume><elocation-id>e85228</elocation-id><history><date date-type="received"><day>03</day><month>10</month><year>2025</year></date><date date-type="rev-recd"><day>18</day><month>12</month><year>2025</year></date><date date-type="accepted"><day>07</day><month>01</month><year>2026</year></date></history><copyright-statement>&#x00A9; Simone Mingels, Hannah Piehl, Madeline Therrien, Ekaterina Akhmad, Anniek van Hienen, Johan van Soest, Laura Hochstenbach, Andre Dekker, Olga Damman, Rianne Fijten. Originally published in JMIR Medical Education (<ext-link ext-link-type="uri" xlink:href="https://mededu.jmir.org">https://mededu.jmir.org</ext-link>), 12.3.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Medical Education, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://mededu.jmir.org/">https://mededu.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://mededu.jmir.org/2026/1/e85228"/><abstract><sec><title>Background</title><p>Advancements in artificial intelligence (AI) are transforming health care, particularly through AI-driven clinical decision support systems (AI-CDSS) that aid in predicting disease progression and personalizing treatment. Despite their potential, adoption remains limited due to clinician concerns about algorithm misuse, misinterpretation, and lack of transparency.</p></sec><sec><title>Objective</title><p>This qualitative study explores the informational needs and preferences of clinicians to better understand and appropriately use AI-CDSS in decision-making. In parallel, this study explores AI experts&#x2019; perspectives on what information should be communicated to enable safe and appropriate use of AI-CDSS.</p></sec><sec sec-type="methods"><title>Methods</title><p>A qualitative description design study was conducted using semistructured interviews with 16 participants (8 clinicians and 8 AI experts). Discussions focused on experiences with AI, informational needs, and feedback on existing reporting standards, including Model Cards, Model Facts<named-content content-type="#ffeb3b"/><named-content content-type="#ffeb3b"/>, and the Transparent Reporting of a multivariable prediction model for Individual Prognosis Or Diagnosis&#x2013;Artificial Intelligence (TRIPOD-AI) checklist. The transcripts were analyzed through codebook thematic analysis.</p></sec><sec sec-type="results"><title>Results</title><p>Four key themes were identified: (1) clinicians need clear information on training data, its origin, size, and inclusion and exclusion criteria, to judge model applicability; (2) performance metrics must go beyond the area under the curve (AUC) and be clinically relevant to support informed decisions; (3) limitations and warnings about inappropriate use should be specific and clearly communicated to prevent misuse; and (4) information should be presented in layered, customizable formats within existing clinical software, avoiding unnecessary jargon, and allowing optional deeper explanations. While each of the reviewed reporting standards offered strengths, none were considered sufficient alone. Participants recommended a combined and clinician-centered approach to information delivery. Alignment of reporting standards with clinical workflows and decision thresholds was thought to be crucial to bridge the usability gap.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>To improve AI-CDSS adoption in clinical practice, reporting standards must be designed for better clinician comprehension and usability. Enhancing transparency, particularly regarding training data and performance, can likely help clinicians assess AI-CDSS more effectively. Information should be delivered in an accessible, layered format, fitting clinical workflows. Co-creation with clinicians throughout AI-CDSS development was a cross-cutting theme, highlighting its importance in ensuring tools are not only technically sound but also practically usable. Future research should explore how to structurally report on performance and validation metrics for clinician understanding and assess the impact of information provision on AI-CDSS adoption.</p></sec></abstract><kwd-group><kwd>artificial intelligence</kwd><kwd>AI implementation</kwd><kwd>delivery of health care</kwd><kwd>informational needs</kwd><kwd>reporting standard</kwd><kwd>transparency</kwd><kwd>co-creation</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Advancements in artificial intelligence (AI) are rapidly transforming the health care sector [<xref ref-type="bibr" rid="ref1">1</xref>]. AI has the potential to aid clinicians and patients in comparing various treatment options by predicting future events based on individual patient characteristics to determine which treatment would benefit the patient the most [<xref ref-type="bibr" rid="ref2">2</xref>]. Implementing these AI-driven clinical decision support systems (AI-CDSS) offers the potential for highly personalized medicine by predicting disease survival probabilities and potential treatment side effects [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref4">4</xref>]. Despite its potential, achieving sustained adoption of AI-CDSS in routine clinical care frequently encounters challenges [<xref ref-type="bibr" rid="ref5">5</xref>].</p><p>One of these challenges is clinicians&#x2019; resistance to adopting AI-CDSS, which is exacerbated by their concerns about the high potential risks to patient safety and quality of care [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref7">7</xref>]. Among clinicians, there can be a lack of in-depth knowledge of how algorithms are constructed and how they generate predictions and recommendations [<xref ref-type="bibr" rid="ref8">8</xref>]. This can make clinicians unprepared to assess if the algorithm is usable within their specific clinical setting, which can cause (unintended) misuse of algorithms [<xref ref-type="bibr" rid="ref9">9</xref>-<xref ref-type="bibr" rid="ref11">11</xref>]. An example of such misuse can be found in the study by Zhao et al [<xref ref-type="bibr" rid="ref12">12</xref>], where the INFLUENCE tool (IKNL) was applied to guide decisions about primary therapy. However, this tool was originally designed to estimate individual, time-dependent risks of recurrence or metastasis in patients with breast cancer who have already completed curative treatment [<xref ref-type="bibr" rid="ref13">13</xref>]. Using the model outside its intended context led to inadequate treatment recommendations [<xref ref-type="bibr" rid="ref14">14</xref>].</p><p>The lack of knowledge that can exist amongst clinicians is not surprising, since there is an overall inadequate adherence to reporting standards within the AI development field [<xref ref-type="bibr" rid="ref15">15</xref>-<xref ref-type="bibr" rid="ref17">17</xref>]. Even when developers adhere to these reporting standards, the reports are often too technical for clinical end users to adequately understand the potential risks or the intended use of the models [<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref19">19</xref>]. Descriptions of AI-CDSS, such as intended use and target population, can help clinicians better understand the meaning of the outcome produced by a model before acting on its recommendation, which can prevent mistakes made due to misinterpretation of the outcome [<xref ref-type="bibr" rid="ref20">20</xref>].</p><p>To stimulate reporting of prediction models in published literature, several initiatives have been introduced [<xref ref-type="bibr" rid="ref21">21</xref>]. The Transparent Reporting of a multivariable prediction model for Individual Prognosis Or Diagnosis&#x2013;Artificial Intelligence (TRIPOD-AI) statement is a set of recommendations for the reporting of studies developing, validating, or updating a prediction model and is commonly used and/or required by journal publications [<xref ref-type="bibr" rid="ref22">22</xref>-<xref ref-type="bibr" rid="ref24">24</xref>]. In addition, other guidelines such as the CONSORT-AI (Consolidated Standards of Reporting Trials extension for Artificial Intelligence) [<xref ref-type="bibr" rid="ref25">25</xref>], Developmental and Exploratory Clinical Investigations of Decision Support Systems Driven by Artificial Intelligence (DECIDE-AI) [<xref ref-type="bibr" rid="ref26">26</xref>], and SPIRIT-AI (Standard Protocol Items: Recommendations for Interventional Trials involving Artificial Intelligence) [<xref ref-type="bibr" rid="ref27">27</xref>] were created. However, the focus of these reporting standards is on transparent reporting within clinical trial reports or protocols, not on improving understanding of AI for clinicians [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref29">29</xref>]. Furthermore, these reporting standards are developed to report on an AI model and not the environment or software system in which it is operating [<xref ref-type="bibr" rid="ref30">30</xref>].</p><p>In an attempt to further standardize reporting of AI model information, Google established the concept of &#x201C;Model Cards&#x201D;, which is comparable to providing a recipe and nutritional facts to a meal [<xref ref-type="bibr" rid="ref31">31</xref>]. Through Model Cards, developers provide a one- or 2-page record highlighting characteristics of training data, intended use cases, and performance [<xref ref-type="bibr" rid="ref32">32</xref>]. This Model Card can boost conformation to reporting standards but does not contain actionable information and guidance for applying AI in clinical practice. Sendak et al [<xref ref-type="bibr" rid="ref33">33</xref>] reconstructed the idea of Model Cards using the concept of drug fact boxes. Drug fact boxes are used to understandably communicate benefits and risks associated with medications to patients [<xref ref-type="bibr" rid="ref34">34</xref>]. These concepts combined resulted in &#x201C;Model Facts&#x201D; [<xref ref-type="bibr" rid="ref33">33</xref>], an overview to &#x201C;collate relevant, actionable information in a compact overview to ensure that clinicians know how, when, how not, and when not to use model output in their clinical decisions&#x201D; [<xref ref-type="bibr" rid="ref33">33</xref>].</p><p>Although the Model Facts concept is not thoroughly tested yet, the initiative has sparked a discussion on how to effectively communicate the potential risks of AI models to health care professionals [<xref ref-type="bibr" rid="ref33">33</xref>]. This highlights a broader need to understand what information clinicians require in order to evaluate and appropriately use AI-driven recommendations in their clinical decision-making. Reflecting these developments, the study investigates the following questions:</p><list list-type="bullet"><list-item><p>What are the informational needs and preferences of clinicians for understanding and appropriately using AI-driven recommendations on medical decision-making?</p></list-item><list-item><p>What information, according to AI experts, should be communicated to clinicians to enable their safe and appropriate use of AI-CDSS?</p></list-item></list></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Study Design</title><p>This qualitative description design study [<xref ref-type="bibr" rid="ref35">35</xref>] was conducted using semistructured interviews to explore how to effectively communicate information about AI-CDSS in medical decision-making. Interviews were conducted among two groups: (1) clinicians, to understand their preferences and informational needs, and (2) AI experts, to gain insights into what experts consider necessary for clinicians to safely and appropriately use AI tools. For this study, AI expertise was established based on practical professional experience rather than a specific formal education in AI. The expert group consisted of individuals with direct experience in developing or implementing AI models for or in the health care sector. The inclusion of both groups was based on the assumption that informational needs go beyond the clinicians&#x2019; expressed preferences, but also expert-identified information is necessary for informed and safe use of AI in clinical settings. This article was written in accordance with the COREQ (Consolidated Criteria for Reporting Qualitative Research) checklist (<xref ref-type="supplementary-material" rid="app6">Checklist 1</xref>) [<xref ref-type="bibr" rid="ref36">36</xref>].</p></sec><sec id="s2-2"><title>Recruitment of Participants</title><p>Study participants were recruited by convenience and snowball sampling between July and November 2024. A target sample size of 8 participants per group was predetermined. This fixed quota was established to accommodate the limited availability of clinicians while ensuring balanced representation between the 2 groups. Consequently, recruitment was concluded upon reaching this target rather than based on theoretical saturation. Potential participants were identified through professional networks associated with the research team and targeted searches on LinkedIn (Microsoft). The potential participants were contacted by email, accompanied by study information detailing the study&#x2019;s objectives, procedures, and confidentiality measures for the recording of the interviews. The interviewees interested in participating filled in a consent form and sent back an email to schedule a meeting, either online or face-to-face. Inclusion criteria required participants to be either (1) practicing clinicians in the Netherlands or (2) AI experts, entailing those with experience in developing and/or implementing diagnostic or treatment recommendation AI models within the health care setting. Participants who had less than 2 years of work experience were excluded.</p></sec><sec id="s2-3"><title>Data Collection</title><p>The semistructured interviews were conducted by a researcher (SM), who is proficient in both Dutch and English. The interviews followed an interview guide (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendices 1</xref> and <xref ref-type="supplementary-material" rid="app2">2</xref>), which contained different questions for clinicians and AI experts, each consisting of 4 parts (<xref ref-type="table" rid="table1">Table 1</xref>). The guides were translated to English for those with no sufficient command of the Dutch language by researchers SM and EA.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Outline of the interview guide topics for clinicians and artificial intelligence (AI) experts regarding AI-driven clinical decision support systems (AI-CDSS) use.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Clinicians</td><td align="left" valign="bottom">AI<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup> experts</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="2">Demographic information</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Experience with AI in care settings</td><td align="left" valign="top">Experience developing AI for care settings</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Current received information about AI models</td><td align="left" valign="top">Provided information concerning AI models</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Information they would prefer to receive</td><td align="left" valign="top">Information they think they should provide</td></tr><tr><td align="left" valign="top" colspan="2">Examples of the Model Card, TRIPOD-AI,<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup> and Model Facts were shown, and participants were requested to provide feedback concerning amount, structure, and comprehensiveness of the information<sup><xref ref-type="table-fn" rid="table1fn3">c</xref></sup></td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>AI: artificial intelligence.</p></fn><fn id="table1fn2"><p><sup>b</sup>TRIPOD: Transparent Reporting of a multivariable prediction model for Individual Prognosis Or Diagnosis&#x2013;Artificial Intelligence.</p></fn><fn id="table1fn3"><p><sup>c</sup>This part was equal for clinicians and AI experts. Demographic information was also available for both the clinicians as well as for the AI experts.</p></fn></table-wrap-foot></table-wrap><p>In part one, the participants were asked demographic questions concerning their age, medical or technical specialization, and work experience history. In the second part, clinicians were asked about their experiences with AI in the health care setting and about instructions they received before using AI. AI experts received questions about their experience in developing AI for clinical use and which information they provided to the end users. Part three contained questions about which information they would ideally either receive as clinical end users or provide as AI developers.</p><p>In the last part, all participants received an example of a Model Card [<xref ref-type="bibr" rid="ref32">32</xref>], Model Facts [<xref ref-type="bibr" rid="ref33">33</xref>], and the TRIPOD-AI checklist [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref24">24</xref>] (<xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>). The formats were discussed sequentially: participants reviewed one example and provided immediate feedback on its information before moving to the next. This stepwise, fixed sequence ensured that each format was evaluated individually. These three formats were selected because they originate from distinct contexts and serve different purposes. Model Cards are intended for communication among developers and are therefore more technical in nature. In contrast, Model Facts are specifically designed for clinicians. The TRIPOD-AI checklist is a reporting standard on AI models in academic publications. To create these examples, a Bayesian network was selected from a public repository, as it provided a balanced level of complexity, being more complex than a linear regression but still more transparent than a deep learning model. One researcher (EA) completed each format based on the available model information. To preview a variation in communication styles, the same type of information was presented in bullet points in one format, while in another format, it was written in narrative form, and yet another displayed this information in a table. After examining the templates, participants were asked to provide feedback on the amount, structure, and comprehensiveness of the information listed in these examples.</p></sec><sec id="s2-4"><title>Research Group</title><p>SMcandidate in Clinical Data Science (CDS) at Maastricht University, working in a department that is generally tech-positive, which may have shaped her perspectives on AI in health care. Over the course of this research project, SM engaged with different paradigms, initially approaching research from a postpositivist stance but increasingly shifting toward a constructivist orientation. This perspective informs the study&#x2019;s recognition that a single &#x201C;ground truth&#x201D; of information provision is not attainable and that knowledge is context-dependent and co-constructed.</p><p>HP is a PhD student researching trust in AI within CDS. MT is a PhD student at Maastro Clinic, researching risk communication and shared decision-making in breast cancer radiation therapy, with a focus on patient decision aids. EA is a PhD student within CDS investigating AI model descriptions based on the model representation standards and requirements. AvH is a PhD student in shared decision-making within radiation oncology (CDS and Maastro), with advanced training in qualitative methodology, and advised SM on qualitative methods. RF is an assistant professor in AI and clinical decision-making within CDS and Maastro and supervises the PhD candidates.</p></sec><sec id="s2-5"><title>Data Analysis</title><p>After data collection, recordings were transcribed verbatim and analyzed according to codebook thematic analysis [<xref ref-type="bibr" rid="ref37">37</xref>] using the Atlas.ti software (version 25.0.1; ATLAS.ti Scientific Software Development GmbH). This method helped identify common themes and topics that were mentioned repeatedly in interviews. The analysis process started with open, inductive coding by a researcher (SM). As no predefined coding framework was used, codes were created to descriptively summarize the underlying meaning of the selected text segments. This initial phase resulted in the construction of a preliminary codebook. Two other researchers (MT and HP) independently applied codes from the codebook constructed by the first researcher (SM) to a subset of the interviews (n=4). Disagreements in codes were resolved by revisiting the transcripts to verify context and debating code definitions until consensus was reached. This iterative process changed the codebook by renaming codes, refining code definitions, ensuring consistent application of the codes, and merging overlapping codes. These codes were later grouped into general themes by identifying clusters within assigned codes for clinicians (<xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref>) and for AI experts (<xref ref-type="supplementary-material" rid="app5">Multimedia Appendix 5</xref>). A member check of the analysis was not conducted to avoid influencing participants&#x2019; perspectives in potential follow-up research.</p></sec><sec id="s2-6"><title>Ethical Considerations</title><p>Before the start of the study, the Research Ethics form of the Faculty of Health, Medicine, and Life Sciences (FHML-REC, FHML/HDT/2024.022.) was evaluated, and ethical approval was granted. Prior to the interviews, all participants received an information letter detailing the study&#x2019;s objectives and data handling procedures. Written informed consent was obtained from all participants for both participation and audio recording. To ensure confidentiality, all transcripts were pseudonymized, and any identifying information was removed. Participants were provided access to both their audio recording and the corresponding transcript, which they could review at any time. Participants received no compensation for their participation.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Participant Characteristics</title><p>In total, 26 clinicians and 11 AI experts were invited to participate. Among invited clinicians, 8 declined due to lack of time, while 10 did not provide a reason. Three invited AI experts also did not provide a reason for nonparticipation. Between August and November 2024, 16 interviews were conducted with clinicians (n=8) and AI experts (n=8). Half of the clinician interviews were held through Microsoft Teams, and the other half were face-to-face, whereas most AI expert interviews were face-to-face (n=6), 2 AI expert interviews were held in English, while all remaining interviews, including all clinician interviews, were conducted in Dutch. Interviews lasted between 30 minutes and an hour. All participating clinicians practiced within tertiary care settings, employed either at academic medical centers or a specialized institute. Concerning prior experience with AI among clinicians, all reported having encountered AI tools in their professional practice. The majority (n=5) had experience with prediction models; some (n=2) had worked exclusively with AI-driven imaging applications or generative AI in the form of chatbots (n=1). Participant characteristics are presented in <xref ref-type="table" rid="table2">Table 2</xref>.</p><p>Through conducting thematic analysis, 4 key themes were identified that clinicians and AI experts expressed as essential for effective communication and use of AI-CDSS. These themes include (1) understanding the target population, which refers to the need to understand the data on which the model was trained; (2) clinically meaningful outcomes, focusing on how model reliability and clinical relevance are communicated; (3) warnings, limits, and safe use, highlighting the importance of clearly defining boundaries, limitations, and misuse risks; and (4) accessible design, which addresses the structure and presentation of content. These themes and subthemes can be found in <xref ref-type="table" rid="table3">Table 3</xref> and are discussed in detail below.</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Demographic and professional characteristics of the 16 participants interviewed between August and November 2024 in the Netherlands.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Characteristics</td><td align="left" valign="bottom">Clinicians (n=8)</td><td align="left" valign="bottom">AI<sup><xref ref-type="table-fn" rid="table2fn1">a</xref></sup> experts (n=8)</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="3">Gender</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Men</td><td align="left" valign="top">6</td><td align="left" valign="top">5</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Women</td><td align="left" valign="top">2</td><td align="left" valign="top">3</td></tr><tr><td align="left" valign="top">Age (years), median (range)</td><td align="left" valign="top">44 (29&#x2013;59)</td><td align="left" valign="top">37 (28&#x2013;48)</td></tr><tr><td align="left" valign="top">Work experience (years), median (range)</td><td align="left" valign="top">14 (2&#x2013;22)</td><td align="left" valign="top">11 (3&#x2013;21)</td></tr><tr><td align="left" valign="top">Specialties or roles (n)</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Internal medicine (2)</p></list-item><list-item><p>Oncology (2)</p></list-item><list-item><p>Gastroenterology (2)</p></list-item><list-item><p>Cardiology (1)</p></list-item><list-item><p>Radiology (1)</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Medical imaging AI (3)</p></list-item><list-item><p>Innovation and implementation management (3)</p></list-item><list-item><p>Data infrastructure and FAIR<sup><xref ref-type="table-fn" rid="table2fn2">b</xref></sup> principles (2)</p></list-item></list></td></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>a</sup>AI: artificial intelligence.</p></fn><fn id="table2fn2"><p><sup>b</sup>FAIR: Findable, Accessible, Interoperable, and Reusable.</p></fn></table-wrap-foot></table-wrap><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Overview of themes and subthemes derived from interviews with clinicians and artificial intelligence (AI) experts regarding informational needs and preferences for using AI-driven clinical decision support systems (AI-CDSS).</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Theme</td><td align="left" valign="bottom">Subthemes</td></tr></thead><tbody><tr><td align="left" valign="top">Understanding the target population</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Origin of the data</p></list-item><list-item><p>Class imbalance</p></list-item><list-item><p>Inclusion and exclusion</p></list-item><list-item><p>Training data size</p></list-item></list></td></tr><tr><td align="left" valign="top">Clinically meaningful outcomes</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Difficult AUC<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Uncertainty bands</p></list-item><list-item><p>Performance graphs and comparative visualizations</p></list-item></list></td></tr><tr><td align="left" valign="top">Warnings, limits, and safe use</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Underused, poorly highlighted, and filled in too vaguely</p></list-item><list-item><p>Active warnings</p></list-item></list></td></tr><tr><td align="left" valign="top">Accessible design</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Available at all times</p></list-item><list-item><p>Language barrier</p></list-item><list-item><p>Customizable or layered information provision</p></list-item></list></td></tr></tbody></table><table-wrap-foot><fn id="table3fn1"><p><sup>a</sup>AUC: area under the curve.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s3-2"><title>Understanding the Target Population</title><p>A central theme expressed by most clinicians and experts was the ability to compare an individual patient during consultation with the training population for the AI model. This was fundamental for clinicians to evaluate whether the recommendations from the model would be reliable in a specific clinical context or not.</p><disp-quote><p>What I think they should know is on which population it (AI-CDSS) is based. So for which patient are they allowed to use it and for which patient are they not. If they have a new patient in front of them, when does it (the patient) belong within the population of the model.</p><attrib>AI Expert 2</attrib></disp-quote><p>Initially, participants described this need as knowledge about the origin of the data, more specifically why, where, and how the data were collected. Clinicians stressed that this way, they could extrapolate whether this reflected local populations, which was especially important when the model was not locally validated.</p><disp-quote><p>Where is the population from? This is clearly a Dutch population, I can extrapolate this to our own thing, but I would want to see the patient characteristics.</p><attrib>Clinician 4</attrib></disp-quote><p>Although the concern about the representativeness of the data and its relevance to local populations was present from the start, it became more concrete when participants reviewed the TRIPOD-AI example. Clinicians remarked that the section &#x201C;class imbalance&#x201D; was of great importance to make the comparison. However, the way it was written in the example felt overwhelming and too technical. Solutions for this were given, such as a table or graph to show the class imbalance, but also a digital and interactive solution for training data comparisons. AI experts already stressed the importance of providing information about the class imbalance before being shown the TRIPOD-AI example.</p><disp-quote><p>(<italic>Showed class balance in TRIPOD-AI example) Yes, basically yes, yes. Look, and if you see that they&#x2019;re all N1 patients, then is it also still suitable for N2? If your patient happens to be N2, so all those kind of things. Yeah I think that&#x2019;s really important to take into account.</italic></p><attrib>Clinician 5</attrib></disp-quote><p>Later in the TRIPOD-AI document, the inclusion and exclusion graph for the training data, which is often provided in papers, was shown. Clinicians stressed that without understanding who was systematically left out of the training data, whether by design or by oversight, they felt the context to interpret the applicability lacked.</p><disp-quote><p>For example, it doesn&#x2019;t apply to pregnancy or people who are overweight with a BMI over 30, I&#x2019;m just saying something. Because otherwise, I think you lose sight of that when you&#x2019;re working in practice.</p><attrib>Clinician 7</attrib></disp-quote><p>Finally, both clinicians and AI experts noted that information about the training data size was missing in the Model Facts example. This was, however, crucial information according to clinicians, since a great size of the training data is associated with better model performance.</p><disp-quote><p>And actually, I&#x2019;d want to know how many patients there were, like I&#x2019;d want to know for reliability, how many patients were in the model so I can estimate how good it is.</p><attrib>Clinician 4</attrib></disp-quote></sec><sec id="s3-3"><title>Clinically Meaningful Outcomes</title><p>Another key theme was the need for clear evidence of the performance and reliability of the model. Both groups stressed that for an AI-CDSS to be integrated into clinical practice, its performance must be demonstrably robust and easily interpretable. However, the manner in which the performance should be presented, as well as when it was acceptable, was not agreed upon by participants.</p><p>Clinicians, in particular, emphasized the importance of understanding what the reported performance metrics mean in practice. They noted that values like area under the curve (AUC), while technically informative, do not always translate into clinical insight without additional context. Some participants mentioned a desire for performance measures to align more closely with clinical outcomes, like sensitivity and specificity and their relevant thresholds. Clinicians and AI experts suggested that these measures can also easily be translated into statistical language such as false positives or negatives, which was thought to be more familiar to clinicians.</p><disp-quote><p>The AUC just gives an overall picture of whether it is a good predictive model. But if it&#x2019;s 0.88 and the balance between sensitivity and specificity for your specific question isn&#x2019;t optimal, then it still isn&#x2019;t a good model.</p><attrib>Clinician 3</attrib></disp-quote><p>Experts mentioned that besides the performance measures, there should also be a focus on providing an uncertainty band to clinicians. This should give insights into when the model is handling an edge case. AI experts noted that in the AI development at the time of this writing, developers chase a high-performance score such as the AUC but might overlook clinical utility or model robustness by doing so.</p><disp-quote><p>We often try to get that AUC as high as possible, but that applies to the entire population, so maybe we should start saying, maybe we need to develop models that actually filter out the worst-off patients or the ones who do the best.</p><attrib>AI expert 2</attrib></disp-quote><p>Ultimately, while clinicians overall claimed to be familiar with performance metrics, they expressed that information on model validation often remains buried in dense technical language or tables. However, to be actionable in a clinical setting, they emphasized that information must be presented in a more structured and visually digestible manner, such as performance graphs and comparative visualizations. Besides presenting the performance metrics, clinicians expressed a desire to know when they could use an outcome to make a treatment decision, in other words, when it was &#x201C;good enough&#x201D; to rely on.</p><disp-quote><p>Yes, these are always somewhat abstract concepts if you don&#x2019;t work with them daily. And yes, each time I can reason it out &#x2013; like, what it means &#x2013; but I can&#x2019;t immediately say whether it&#x2019;s good or not, right?</p><attrib>Clinician 5</attrib></disp-quote></sec><sec id="s3-4"><title>Warnings, Limits, and Safe Use</title><p>An important theme for both clinicians and experts was the need for clear warnings and boundaries for AI-CDSS use. Explicit indicators or limitations were desired, especially when the system may be applied in situations that fall outside its intended use or scope.</p><p>Participants praised that limitations and inappropriate uses were included in the presented reporting standards. However, many AI experts emphasized that these sections were often underused, poorly highlighted, and filled in too vaguely. AI experts also mentioned that biases and limitations of the model are difficult to formulate and fill in with only their background as developers. They mentioned that defining these biases and formulating necessary warnings for misuse of the model should be done in interdisciplinary settings. They also emphasized that misuses of the model can be identified during prototype testing or practice sessions.</p><disp-quote><p>So I would say that, yes, this requires critical evaluation. The problem, I think, is that if someone developing the model doesn&#x2019;t have much clinical background, it becomes very difficult for them to judge what is critical and what should be reported.</p><attrib>AI expert 3</attrib></disp-quote><p>Besides explicitly mentioning these limitations, participants also stressed that more active warnings should be included. AI experts mentioned that it would be safest to lock down the scope of the AI-CDSS within the software, such as limiting input variables or blocking predictions outside the scope. However, clinicians had varying opinions about this, with some arguing that they can decide on the model scope themselves. Another suggestion made by AI experts to warn clinicians about the correct use of an AI-CDSS was to provide pop-ups when using the model out of scope. However, opinions amongst clinicians were divided, with concerns regarding pop-up tiredness and the pop-up not being read or respected. Similarly, they thought that disclaimers were likely not to be read or forgotten about during the clinical use of the model.</p><disp-quote><p>There are people in their 60s who function at the level of someone over 70, or even over 80, so to speak. And there are people who are 72 where you think, well, you actually function better than your calendar age would suggest. So it&#x2019;s all a matter of interpretation. And I think that this is something that should be left to a clinician.</p><attrib>Clinician 8</attrib></disp-quote></sec><sec id="s3-5"><title>Accessible Design</title><p>Participants emphasized that how information is presented is just as critical as the content itself. Even when essential data about the system is available, it may be ignored or misunderstood in clinical settings if it is not accessible, clearly structured, or presented in a usable format.</p><p>First, the information needs to be available at all times while using the AI-CDSS, according to participants. They stressed that information should preferably be integrated within the software. However, it was also thought to be important to distribute information in different ways, such as presentations, workshops, and emails. According to participants, clinicians should also familiarize themselves with the model, which can be done through e-learning and practice sessions, which is important for retention of their understanding of the model.</p><disp-quote><p>But if you, for example, give a presentation and a doctor can actually use your model that same day, then that might also have a big impact on retention.</p><attrib>Clinician 1</attrib></disp-quote><p>Second, participants stressed the importance of the information being conveyed in an understandable manner. AI experts often spoke about the &#x201C;language barrier&#x201D; they experience when communicating about AI with clinicians, and clinicians confirmed this gap when they remarked on developer jargon. Participants suggested communicating in statistical language, such as <italic>P</italic> values or false positives, which is generally more in line with clinicians&#x2019; education and academic training. Others suggested shrinking this &#x201C;language barrier&#x201D; by providing general AI training for clinicians, for example, from the hospital or during medical training. Another familiar way of providing information would be in the form of papers, which clinicians are accustomed to reading. The TRIPOD-AI example was well received by participants, as its resemblance to the layout of traditional academic papers appeared to create a sense of familiarity. However, some clinicians stressed that they then might as well read the entire paper, while AI experts showed significant information was still missing due to word count restraints.</p><disp-quote><p>I think that, in any case, education about AI will simply be necessary for everyone in the near future, because it&#x2019;s gradually being used more and more in medicine. And it&#x2019;s important to understand how it works or how something like that even comes about.</p><attrib>Clinician 3</attrib></disp-quote><p>Third, clinicians often labeled themselves as being intrigued by AI and intrinsically motivated to gather information about new technologies. However, many clinicians also stressed that they themselves or their coworkers would not have enough time to read up about a model. This shows a difference in needs between clinicians, which calls for customizable or layered information provision. An example of how this works out in practice would be through providing information blocks or pop-ups on demand or linking to (validation) papers and other useful resources.</p><disp-quote><p>And references &#x2013; well, of course people aren&#x2019;t going to look at them very much either, so they quickly become less important. And people don&#x2019;t have endless time.</p><attrib>Clinician 6</attrib></disp-quote><p>Finally, of all the examples, the Model Card example was perceived as the least desired method by the participants, while the first page of the Model Facts was the most appreciated. However, according to participants, Model Facts still missed essential information blocks that were provided in the TRIPOD-AI, such as class imbalance, inclusion and exclusion criteria, and population size. Participants recommended that this information be integrated as much as possible within the software to use the AI-CDSS.</p><disp-quote><p>Well, if you want to know some of the background, then the TRIPOD would be my preference. But if it&#x2019;s purely about how to apply it in the clinic, then the Model Facts is fine &#x2013; it&#x2019;s a bit shorter and more focused on clinical applicability.</p><attrib>Clinician 2</attrib></disp-quote></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><p>This qualitative study explored the informational needs and preferences of clinicians for understanding and appropriately using AI-CDSS through semistructured interviews. Besides, this study explored AI experts&#x2019; perspectives on what information should be communicated to clinicians to enable safe and appropriate use of AI-CDSS. From the thematic analysis, four key domains were identified that the participants considered essential: (1) understanding the population the model was trained on, (2) interpreting performance metrics in a clinically meaningful way, (3) being clearly informed about limitations and boundaries of use, and (4) having the information presented in an accessible and integrated format. Elements of the presented Model Facts, Model Card, and TRIPOD-AI examples were seen as valuable in addressing these needs, though none were considered fully sufficient on their own.</p><p>The first aspect concerns the ability to assess whether the patient in front of the clinician matches the training population of the AI-CDSS. Information on the data&#x2019;s origin, distribution, and size was thus seen as crucial to avoid inappropriate extrapolation and potential harm. This aligns with guidelines made for AI in clinical research in which proper use of predictive models increases when end users understand the AI models better [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref29">29</xref>]. At the time of this writing, many technical solutions aim to address dataset limitations, such as undersampling in the presence of class imbalances [<xref ref-type="bibr" rid="ref38">38</xref>]. However, little research exists on how to equip end users to recognize and act upon known data limitations in real-time use. The TRIPOD-AI example was praised by participants for including elements like class imbalance and inclusion and exclusion criteria. However, participants suggested more use of visual tools or interactive comparison modules, which reflects a desire for more transparency delivered in comprehensive formats.</p><p>Additionally, there was a need to understand model performance in ways that directly inform clinical decision-making. Although clinicians appreciated seeing performance metrics, like the one shown in the Model Facts table, they found that metrics such as the AUC lacked context. This was also the case in the study performed by Frey et al [<xref ref-type="bibr" rid="ref39">39</xref>], in which the Model Facts and the table containing information on performance and validation were not understood. These concerns are in line with other existing literature that argues for optimizing performance reporting to the clinical context and translating technical metrics into practical terms [<xref ref-type="bibr" rid="ref40">40</xref>]. Instead, participants preferred metrics linked to treatment-relevant thresholds. Several AI experts also pointed out that communicating uncertainty is vital for identifying edge cases where model performance might be unreliable. Performance should therefore not only be reported on a global level but also in ways that match the decision points clinicians face.</p><p>Another key finding concerned the need for transparent warnings and clear boundaries for appropriate model use. While participants acknowledged that standards such as TRIPOD-AI and Model Facts do include these elements, these sections are often too vaguely phrased, making them less actionable for clinicians. The importance of clear, structured warnings is well established in other areas of health care, for example, in drug labels, which provide standardized information on indications, contraindications, and potential risks [<xref ref-type="bibr" rid="ref41">41</xref>]. Such an approach would move beyond abstract disclaimers, offering clinicians tangible guidance to support responsible decision-making. Besides, some participants proposed integrating restrictions directly into the software, like locking out-of-scope inputs or showing warnings when the model is applied beyond its intended population. However, these suggestions were met with mixed reactions due to concerns about alert fatigue, which is an already frequently experienced mental strain in clinical decision support tools [<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref43">43</xref>].</p><p>Finally, participants focused on how the information is structured and delivered to the end user. Participants advocated for layered information design, in which summary-level information is accessible at first glance, with additional detail available on demand [<xref ref-type="bibr" rid="ref44">44</xref>]. This design principle is widely used in both health care settings, for example, in patient decision aids [<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref46">46</xref>], and in other industries or webpages, like Wikipedia (Wikimedia Foundation). Interactive data visualizations were also suggested for understanding how individual patients compare to the broader training dataset. This preference mirrors trends in other industries, such as finance and data analytics, where interactive visual tools enhance sense-making and decision quality [<xref ref-type="bibr" rid="ref47">47</xref>]. Clinicians also emphasized the importance of integrating information in the AI-CDSS interface and supporting information dissemination through workshops or e-learning modules. While the Model Facts format was seen as more accessible and clinically relevant, it lacked essential components, including training data information, which were better covered in TRIPOD-AI.</p><p>In general, participants throughout this study consistently emphasized that AI-CDSS can only be made safe, effective, and clinically relevant through co-creation with clinicians at every stage of development and deployment. This would start with jointly defining the model&#x2019;s purpose, identifying which clinical decisions it should support, and determining what constitutes a &#x201C;good enough&#x201D; model in clinical practice. Such early alignment likely helps to determine whether the model&#x2019;s goals are both clinically valuable and technically feasible. Beyond initial development, it seems that clinicians should also be involved in designing the model to fit existing workflows, identifying potential limitations, and establishing appropriate safeguards. AI experts noted they cannot define these elements alone because these require clinical insight. While the value of co-creation in AI development is well recognized in theory, it remains inconsistently practiced [<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref49">49</xref>]. One approach formalizing co-creation is design thinking, which is a user-centered, iterative process that emphasizes empathy, problem definition, rapid prototyping, and testing in real-world contexts [<xref ref-type="bibr" rid="ref50">50</xref>]. Embedding such methods more systematically into AI development is essential to close the gap between technical promise and clinical utility [<xref ref-type="bibr" rid="ref51">51</xref>].</p><p>Although this study was rooted in the Dutch tertiary care context, the findings likely extend to other modernized health care systems preparing for AI implementation. Particularly, these findings are transferable within Europe, given the European AI Act&#x2019;s mandate for ensuring AI literacy among professional users. This regulation requires end users across all member states to be competent in using high-risk AI systems [<xref ref-type="bibr" rid="ref52">52</xref>]. The educational and informational needs identified here are directly relevant to compliance efforts across the continent. Beyond the regulatory context, the core themes reflect universal medical concerns, extending the study&#x2019;s relevance to other modernized health care systems globally.</p><p>The results of this study should be interpreted in light of several limitations. First, the use of convenience sampling may have introduced selection bias, as individuals who voluntarily participate in research are often more engaged with and positive about the topic. Second, while the study aimed to explore clinicians&#x2019; informational needs regarding AI-CDSS, many clinicians seemed to lack in-depth understanding of AI, which may have limited their ability to fully articulate their needs. To address this, perspectives from AI experts were included to enrich the findings. Third, the interdisciplinary approach introduced heterogeneity, due to participants coming from a range of professional backgrounds or clinical specialties. While this breadth is a strength, allowing for a more comprehensive understanding of cross-disciplinary needs, it may also obscure discipline-specific differences. For example, certain formats or tools may be more relevant or familiar to specific specialties. Fourth, the examples of AI model documentation were presented to all participants in a fixed order. The lack of randomization in the sequence may have introduced order effects, such as primacy bias, which could have influenced comparative feedback. Future research should explore how disciplinary context shapes informational needs and preferences, ideally through larger-scale studies using quantitative methods that allow for broader demographic and professional representation and more robust comparisons across subgroups.</p><p>In conclusion, when using AI in clinical practice, it is important that clinicians use these technologies in a safe and informed way. However, this information provision likely falls short in meeting their informational needs, contributing to the lack of large-scale adoption. AI-CDSS developers should clearly communicate both who the model is for, including characteristics of its training data, and how it performs in clinically meaningful terms. This must include explicit limitations and appropriate warnings, presented in a way that is understandable, visual, and ideally interactive. While existing formats such as Model Cards, Model Facts, and TRIPOD-AI offer valuable starting points, none fully meet these needs on their own. Future efforts should consider combining and adapting elements from these formats to better support clinical use. Effective integration of AI-CDSS requires collaboration with clinicians during development and targeted education during implementation. Addressing informational needs of clinicians is critical to ensure appropriate use and supports the safe and effective integration of AI-CDSS into clinical practice.</p></sec></body><back><ack><p>The authors would like to thank the clinicians and AI experts who participated in this study. Their time and insights resulted in thorough discussions, which were essential to advancing the understanding of informational needs related to AI-driven clinical decision support systems.</p><p>The authors declare the use of generative AI in the research and writing process. According to the GAIDeT taxonomy [<xref ref-type="bibr" rid="ref53">53</xref>], the following tasks were delegated to generative AI tools under full human supervision: (1) idea generation and (2) proofreading and editing. The generative AI tool used was ChatGPT 4. Responsibility for the final manuscript lies entirely with the authors. Generative AI tools are not listed as authors and do not bear responsibility for the final outcomes.</p></ack><notes><sec><title>Funding</title><p>This research was funded by the Dutch Research Council (NWO).</p></sec><sec><title>Data Availability</title><p>The datasets gathered and analyzed during the study are not publicly available due to the presence of potentially identifiable information within the interview transcripts. However, deidentified excerpts or relevant portions of the data may be made available from the corresponding author upon reasonable request and subject to institutional and ethical approvals.</p></sec></notes><fn-group><fn fn-type="con"><p>Conceptualization: SM, EA, JvS, LH, OD, RF</p><p>Data curation: SM</p><p>Formal Analysis: SM, HP, MT</p><p>Investigation: SM, HP, MT</p><p>Methodology: SM, EA, AvH, JvS, LH, OD, RF</p><p>Project administration: SM, LH, RF</p><p>Resources: SM, EA, RF</p><p>Supervision: LH, OD, RF</p><p>Writing &#x2013; original draft: SM</p><p>Writing &#x2013; review &#x0026; editing: SM, HP, MT, EA, AvH, JvS, LH, AD, OD, RF</p></fn><fn fn-type="conflict"><p>The primary author declares that they have no known competing financial interests. The primary author is affiliated with a department where AI-based systems are developed. Some of the authors receive research grants related to the development and implementation of AI. In addition, some of the respondents interviewed were known to the primary author prior to the study. JvS and AD are shareholders and receiving salary from Medical Data Works BV, although company activities are unrelated to this manuscript. Outside of the submitted work, RF&#x2019;s research group receives funding from public-private research consortia, including contributions from Janssen-Cilag and Takeda. These companies had no role in the present study.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">AI-CDSS</term><def><p>artificial intelligence-driven clinical decision support systems</p></def></def-item><def-item><term id="abb3">AUC</term><def><p>area under the curve</p></def></def-item><def-item><term id="abb4">CDS</term><def><p>Clinical Data Science</p></def></def-item><def-item><term id="abb5">CONSORT-AI</term><def><p> Consolidated Standards Of Reporting Trials&#x2013;Artificial Intelligence</p></def></def-item><def-item><term id="abb6">COREQ</term><def><p> Consolidated Criteria for Reporting Qualitative Research</p></def></def-item><def-item><term id="abb7">DECIDE-AI</term><def><p> Developmental and Exploratory Clinical Investigation of Decision support systems driven by Artificial Intelligence</p></def></def-item><def-item><term id="abb8">FHML-REC</term><def><p> Faculty of Health, Medicine, and Life Sciences&#x2013;Research Ethics Committee</p></def></def-item><def-item><term id="abb9">SPIRIT-AI</term><def><p> Standard Protocol Items: Recommendations for Interventional Trials&#x2013;Artificial Intelligence</p></def></def-item><def-item><term id="abb10">TRIPOD-AI</term><def><p> Transparent Reporting of a multivariable prediction model for Individual Prognosis Or Diagnosis &#x2013; Artificial Intelligence</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Aung</surname><given-names>YYM</given-names> </name><name name-style="western"><surname>Wong</surname><given-names>DCS</given-names> </name><name name-style="western"><surname>Ting</surname><given-names>DSW</given-names> </name></person-group><article-title>The promise of artificial intelligence: a review of the opportunities and challenges of artificial intelligence in healthcare</article-title><source>Br Med Bull</source><year>2021</year><month>09</month><day>10</day><volume>139</volume><issue>1</issue><fpage>4</fpage><lpage>15</lpage><pub-id pub-id-type="doi">10.1093/bmb/ldab016</pub-id><pub-id pub-id-type="medline">34405854</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ankolekar</surname><given-names>A</given-names> </name><name name-style="western"><surname>van der Heijden</surname><given-names>B</given-names> </name><name name-style="western"><surname>Dekker</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Clinician perspectives on clinical decision support systems in lung cancer: implications for shared decision-making</article-title><source>Health Expect</source><year>2022</year><month>08</month><volume>25</volume><issue>4</issue><fpage>1342</fpage><lpage>1351</lpage><pub-id pub-id-type="doi">10.1111/hex.13457</pub-id><pub-id pub-id-type="medline">35535474</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Goirand</surname><given-names>M</given-names> </name><name name-style="western"><surname>Austin</surname><given-names>E</given-names> </name><name name-style="western"><surname>Clay-Williams</surname><given-names>R</given-names> </name></person-group><article-title>Implementing ethics in healthcare AI-based applications: a scoping review</article-title><source>Sci Eng Ethics</source><year>2021</year><month>09</month><day>3</day><volume>27</volume><issue>5</issue><fpage>61</fpage><pub-id pub-id-type="doi">10.1007/s11948-021-00336-3</pub-id><pub-id pub-id-type="medline">34480239</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shaw</surname><given-names>J</given-names> </name><name name-style="western"><surname>Rudzicz</surname><given-names>F</given-names> </name><name name-style="western"><surname>Jamieson</surname><given-names>T</given-names> </name><name name-style="western"><surname>Goldfarb</surname><given-names>A</given-names> </name></person-group><article-title>Artificial intelligence and the implementation challenge</article-title><source>J Med Internet Res</source><year>2019</year><month>07</month><day>10</day><volume>21</volume><issue>7</issue><fpage>e13659</fpage><pub-id pub-id-type="doi">10.2196/13659</pub-id><pub-id pub-id-type="medline">31293245</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gama</surname><given-names>F</given-names> </name><name name-style="western"><surname>Tyskbo</surname><given-names>D</given-names> </name><name name-style="western"><surname>Nygren</surname><given-names>J</given-names> </name><name name-style="western"><surname>Barlow</surname><given-names>J</given-names> </name><name name-style="western"><surname>Reed</surname><given-names>J</given-names> </name><name name-style="western"><surname>Svedberg</surname><given-names>P</given-names> </name></person-group><article-title>Implementation frameworks for artificial intelligence translation into health care practice: scoping review</article-title><source>J Med Internet Res</source><year>2022</year><month>01</month><day>27</day><volume>24</volume><issue>1</issue><fpage>e32215</fpage><pub-id pub-id-type="doi">10.2196/32215</pub-id><pub-id pub-id-type="medline">35084349</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Petersson</surname><given-names>L</given-names> </name><name name-style="western"><surname>Larsson</surname><given-names>I</given-names> </name><name name-style="western"><surname>Nygren</surname><given-names>JM</given-names> </name><etal/></person-group><article-title>Challenges to implementing artificial intelligence in healthcare: a qualitative interview study with healthcare leaders in Sweden</article-title><source>BMC Health Serv Res</source><year>2022</year><month>07</month><day>1</day><volume>22</volume><issue>1</issue><fpage>850</fpage><pub-id pub-id-type="doi">10.1186/s12913-022-08215-8</pub-id><pub-id pub-id-type="medline">35778736</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Davenport</surname><given-names>T</given-names> </name><name name-style="western"><surname>Kalakota</surname><given-names>R</given-names> </name></person-group><article-title>The potential for artificial intelligence in healthcare</article-title><source>Future Healthc J</source><year>2019</year><month>06</month><volume>6</volume><issue>2</issue><fpage>94</fpage><lpage>98</lpage><pub-id pub-id-type="doi">10.7861/futurehosp.6-2-94</pub-id><pub-id pub-id-type="medline">31363513</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kelly</surname><given-names>CJ</given-names> </name><name name-style="western"><surname>Karthikesalingam</surname><given-names>A</given-names> </name><name name-style="western"><surname>Suleyman</surname><given-names>M</given-names> </name><name name-style="western"><surname>Corrado</surname><given-names>G</given-names> </name><name name-style="western"><surname>King</surname><given-names>D</given-names> </name></person-group><article-title>Key challenges for delivering clinical impact with artificial intelligence</article-title><source>BMC Med</source><year>2019</year><month>10</month><day>29</day><volume>17</volume><issue>1</issue><fpage>195</fpage><pub-id pub-id-type="doi">10.1186/s12916-019-1426-2</pub-id><pub-id pub-id-type="medline">31665002</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Esmaeilzadeh</surname><given-names>P</given-names> </name></person-group><article-title>Challenges and strategies for wide-scale artificial intelligence (AI) deployment in healthcare practices: a perspective for healthcare organizations</article-title><source>Artif Intell Med</source><year>2024</year><month>05</month><volume>151</volume><fpage>102861</fpage><pub-id pub-id-type="doi">10.1016/j.artmed.2024.102861</pub-id><pub-id pub-id-type="medline">38555850</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Smith</surname><given-names>H</given-names> </name><name name-style="western"><surname>Downer</surname><given-names>J</given-names> </name><name name-style="western"><surname>Ives</surname><given-names>J</given-names> </name></person-group><article-title>Clinicians and AI use: where is the professional guidance?</article-title><source>J Med Ethics</source><year>2024</year><month>06</month><day>21</day><volume>50</volume><issue>7</issue><fpage>437</fpage><lpage>441</lpage><pub-id pub-id-type="doi">10.1136/jme-2022-108831</pub-id><pub-id pub-id-type="medline">37607805</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jabbour</surname><given-names>S</given-names> </name><name name-style="western"><surname>Fouhey</surname><given-names>D</given-names> </name><name name-style="western"><surname>Shepard</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Measuring the Impact of AI in the diagnosis of hospitalized patients: a randomized clinical vignette survey study</article-title><source>JAMA</source><year>2023</year><month>12</month><day>19</day><volume>330</volume><issue>23</issue><fpage>2275</fpage><lpage>2284</lpage><pub-id pub-id-type="doi">10.1001/jama.2023.22295</pub-id><pub-id pub-id-type="medline">38112814</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhao</surname><given-names>A</given-names> </name><name name-style="western"><surname>Larbi</surname><given-names>M</given-names> </name><name name-style="western"><surname>Miller</surname><given-names>K</given-names> </name><name name-style="western"><surname>O&#x2019;Neill</surname><given-names>S</given-names> </name><name name-style="western"><surname>Jayasekera</surname><given-names>J</given-names> </name></person-group><article-title>A scoping review of interactive and personalized web-based clinical tools to support treatment decision making in breast cancer</article-title><source>Breast</source><year>2022</year><month>02</month><volume>61</volume><fpage>43</fpage><lpage>57</lpage><pub-id pub-id-type="doi">10.1016/j.breast.2021.12.003</pub-id><pub-id pub-id-type="medline">34896693</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>V&#x00F6;lkel</surname><given-names>V</given-names> </name><name name-style="western"><surname>Hueting</surname><given-names>TA</given-names> </name><name name-style="western"><surname>Draeger</surname><given-names>T</given-names> </name><etal/></person-group><article-title>Improved risk estimation of locoregional recurrence, secondary contralateral tumors and distant metastases in early breast cancer: the INFLUENCE 2.0 model</article-title><source>Breast Cancer Res Treat</source><year>2021</year><month>10</month><volume>189</volume><issue>3</issue><fpage>817</fpage><lpage>826</lpage><pub-id pub-id-type="doi">10.1007/s10549-021-06335-z</pub-id><pub-id pub-id-type="medline">34338943</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>van Maaren</surname><given-names>MC</given-names> </name><name name-style="western"><surname>Hueting</surname><given-names>TA</given-names> </name><name name-style="western"><surname>V&#x00F6;lkel</surname><given-names>V</given-names> </name><name name-style="western"><surname>van Hezewijk</surname><given-names>M</given-names> </name><name name-style="western"><surname>Strobbe</surname><given-names>LJ</given-names> </name><name name-style="western"><surname>Siesling</surname><given-names>S</given-names> </name></person-group><article-title>The use and misuse of risk prediction tools for clinical decision-making</article-title><source>Breast</source><year>2023</year><month>06</month><volume>69</volume><fpage>428</fpage><lpage>430</lpage><pub-id pub-id-type="doi">10.1016/j.breast.2023.01.006</pub-id><pub-id pub-id-type="medline">36709092</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Collins</surname><given-names>GS</given-names> </name><name name-style="western"><surname>Moons</surname><given-names>KGM</given-names> </name></person-group><article-title>Reporting of artificial intelligence prediction models</article-title><source>The Lancet</source><year>2019</year><month>04</month><volume>393</volume><issue>10181</issue><fpage>1577</fpage><lpage>1579</lpage><pub-id pub-id-type="doi">10.1016/S0140-6736(19)30037-6</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fehr</surname><given-names>J</given-names> </name><name name-style="western"><surname>Citro</surname><given-names>B</given-names> </name><name name-style="western"><surname>Malpani</surname><given-names>R</given-names> </name><name name-style="western"><surname>Lippert</surname><given-names>C</given-names> </name><name name-style="western"><surname>Madai</surname><given-names>VI</given-names> </name></person-group><article-title>A trustworthy AI reality-check: the lack of transparency of artificial intelligence products in healthcare</article-title><source>Front Digit Health</source><year>2024</year><volume>6</volume><fpage>1267290</fpage><pub-id pub-id-type="doi">10.3389/fdgth.2024.1267290</pub-id><pub-id pub-id-type="medline">38455991</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Van Calster</surname><given-names>B</given-names> </name><name name-style="western"><surname>Wynants</surname><given-names>L</given-names> </name><name name-style="western"><surname>Timmerman</surname><given-names>D</given-names> </name><name name-style="western"><surname>Steyerberg</surname><given-names>EW</given-names> </name><name name-style="western"><surname>Collins</surname><given-names>GS</given-names> </name></person-group><article-title>Predictive analytics in health care: how can we know it works?</article-title><source>J Am Med Inform Assoc</source><year>2019</year><month>12</month><day>1</day><volume>26</volume><issue>12</issue><fpage>1651</fpage><lpage>1654</lpage><pub-id pub-id-type="doi">10.1093/jamia/ocz130</pub-id><pub-id pub-id-type="medline">31373357</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Campbell</surname><given-names>DJ</given-names> </name></person-group><article-title>The clinical utility curve: a proposal to improve the translation of information provided by prediction models to clinicians</article-title><source>BMC Res Notes</source><year>2016</year><month>04</month><day>14</day><volume>9</volume><issue>1</issue><fpage>219</fpage><pub-id pub-id-type="doi">10.1186/s13104-016-2028-0</pub-id><pub-id pub-id-type="medline">27080381</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Meshaka</surname><given-names>R</given-names> </name><name name-style="western"><surname>Pinto Dos Santos</surname><given-names>D</given-names> </name><name name-style="western"><surname>Arthurs</surname><given-names>OJ</given-names> </name><name name-style="western"><surname>Sebire</surname><given-names>NJ</given-names> </name><name name-style="western"><surname>Shelmerdine</surname><given-names>SC</given-names> </name></person-group><article-title>Artificial intelligence reporting guidelines: what the pediatric radiologist needs to know</article-title><source>Pediatr Radiol</source><year>2022</year><month>10</month><volume>52</volume><issue>11</issue><fpage>2101</fpage><lpage>2110</lpage><pub-id pub-id-type="doi">10.1007/s00247-021-05129-1</pub-id><pub-id pub-id-type="medline">34196729</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yao</surname><given-names>S</given-names> </name><name name-style="western"><surname>Shen</surname><given-names>P</given-names> </name><name name-style="western"><surname>Dai</surname><given-names>T</given-names> </name><etal/></person-group><article-title>Human understandable thyroid ultrasound imaging AI report system - a bridge between AI and clinicians</article-title><source>iScience</source><year>2023</year><month>04</month><day>21</day><volume>26</volume><issue>4</issue><fpage>106530</fpage><pub-id pub-id-type="doi">10.1016/j.isci.2023.106530</pub-id><pub-id pub-id-type="medline">37123225</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Boag</surname><given-names>W</given-names> </name><name name-style="western"><surname>Hasan</surname><given-names>A</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>JY</given-names> </name><etal/></person-group><article-title>The algorithm journey map: a tangible approach to implementing AI solutions in healthcare</article-title><source>NPJ Digit Med</source><year>2024</year><month>04</month><day>9</day><volume>7</volume><issue>1</issue><fpage>87</fpage><pub-id pub-id-type="doi">10.1038/s41746-024-01061-4</pub-id><pub-id pub-id-type="medline">38594344</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Collins</surname><given-names>GS</given-names> </name><name name-style="western"><surname>Reitsma</surname><given-names>JB</given-names> </name><name name-style="western"><surname>Altman</surname><given-names>DG</given-names> </name><name name-style="western"><surname>Moons</surname><given-names>KGM</given-names> </name></person-group><article-title>Transparent reporting of a multivariable prediction model for individual prognosis or diagnosis (TRIPOD): the TRIPOD Statement</article-title><source>BMC Med</source><year>2015</year><month>01</month><day>6</day><volume>13</volume><issue>1</issue><fpage>1</fpage><pub-id pub-id-type="doi">10.1186/s12916-014-0241-z</pub-id><pub-id pub-id-type="medline">25563062</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Collins</surname><given-names>GS</given-names> </name><name name-style="western"><surname>Dhiman</surname><given-names>P</given-names> </name><name name-style="western"><surname>Andaur Navarro</surname><given-names>CL</given-names> </name><etal/></person-group><article-title>Protocol for development of a reporting guideline (TRIPOD-AI) and risk of bias tool (PROBAST-AI) for diagnostic and prognostic prediction model studies based on artificial intelligence</article-title><source>BMJ Open</source><year>2021</year><month>07</month><day>9</day><volume>11</volume><issue>7</issue><fpage>e048008</fpage><pub-id pub-id-type="doi">10.1136/bmjopen-2020-048008</pub-id><pub-id pub-id-type="medline">34244270</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Collins</surname><given-names>GS</given-names> </name><name name-style="western"><surname>Moons</surname><given-names>KGM</given-names> </name><name name-style="western"><surname>Dhiman</surname><given-names>P</given-names> </name><etal/></person-group><article-title>TRIPOD+AI statement: updated guidance for reporting clinical prediction models that use regression or machine learning methods</article-title><source>BMJ</source><year>2024</year><month>04</month><day>16</day><volume>385</volume><fpage>e078378</fpage><pub-id pub-id-type="doi">10.1136/bmj-2023-078378</pub-id><pub-id pub-id-type="medline">38626948</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liu</surname><given-names>X</given-names> </name><name name-style="western"><surname>Cruz Rivera</surname><given-names>S</given-names> </name><name name-style="western"><surname>Moher</surname><given-names>D</given-names> </name><name name-style="western"><surname>Calvert</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>Denniston</surname><given-names>AK</given-names> </name><collab>SPIRIT-AI and CONSORT-AI Working Group</collab></person-group><article-title>Reporting guidelines for clinical trial reports for interventions involving artificial intelligence: the CONSORT-AI extension</article-title><source>Nat Med</source><year>2020</year><month>09</month><volume>26</volume><issue>9</issue><fpage>1364</fpage><lpage>1374</lpage><pub-id pub-id-type="doi">10.1038/s41591-020-1034-x</pub-id><pub-id pub-id-type="medline">32908283</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Vasey</surname><given-names>B</given-names> </name><name name-style="western"><surname>Nagendran</surname><given-names>M</given-names> </name><name name-style="western"><surname>Campbell</surname><given-names>B</given-names> </name><etal/></person-group><article-title>Reporting guideline for the early-stage clinical evaluation of decision support systems driven by artificial intelligence: DECIDE-AI</article-title><source>Nat Med</source><year>2022</year><month>05</month><volume>28</volume><issue>5</issue><fpage>924</fpage><lpage>933</lpage><pub-id pub-id-type="doi">10.1038/s41591-022-01772-9</pub-id><pub-id pub-id-type="medline">35585198</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cruz Rivera</surname><given-names>S</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>X</given-names> </name><name name-style="western"><surname>Chan</surname><given-names>AW</given-names> </name><etal/></person-group><article-title>Guidelines for clinical trial protocols for interventions involving artificial intelligence: the SPIRIT-AI extension</article-title><source>Nat Med</source><year>2020</year><month>09</month><volume>26</volume><issue>9</issue><fpage>1351</fpage><lpage>1363</lpage><pub-id pub-id-type="doi">10.1038/s41591-020-1037-7</pub-id><pub-id pub-id-type="medline">32908284</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Topol</surname><given-names>EJ</given-names> </name></person-group><article-title>Welcoming new guidelines for AI clinical research</article-title><source>Nat Med</source><year>2020</year><month>09</month><volume>26</volume><issue>9</issue><fpage>1318</fpage><lpage>1320</lpage><pub-id pub-id-type="doi">10.1038/s41591-020-1042-x</pub-id><pub-id pub-id-type="medline">32908274</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Volovici</surname><given-names>V</given-names> </name><name name-style="western"><surname>Syn</surname><given-names>NL</given-names> </name><name name-style="western"><surname>Ercole</surname><given-names>A</given-names> </name><name name-style="western"><surname>Zhao</surname><given-names>JJ</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>N</given-names> </name></person-group><article-title>Steps to avoid overuse and misuse of machine learning in clinical research</article-title><source>Nat Med</source><year>2022</year><month>10</month><volume>28</volume><issue>10</issue><fpage>1996</fpage><lpage>1999</lpage><pub-id pub-id-type="doi">10.1038/s41591-022-01961-6</pub-id><pub-id pub-id-type="medline">36097217</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lavin</surname><given-names>A</given-names> </name><name name-style="western"><surname>Gilligan-Lee</surname><given-names>CM</given-names> </name><name name-style="western"><surname>Visnjic</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Technology readiness levels for machine learning systems</article-title><source>Nat Commun</source><year>2022</year><month>10</month><day>20</day><volume>13</volume><issue>1</issue><fpage>6039</fpage><pub-id pub-id-type="doi">10.1038/s41467-022-33128-9</pub-id><pub-id pub-id-type="medline">36266298</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="report"><person-group person-group-type="author"><name name-style="western"><surname>Fang</surname><given-names>H</given-names> </name><name name-style="western"><surname>Miao</surname><given-names>H</given-names> </name></person-group><article-title>Introducing the model card toolkit for easier model transparency reporting</article-title><year>2020</year><month>07</month><day>29</day><access-date>2026-02-25</access-date><publisher-name>Google Research</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://research.google/blog/introducing-the-model-card-toolkit-for-easier-model-transparency-reporting/">https://research.google/blog/introducing-the-model-card-toolkit-for-easier-model-transparency-reporting/</ext-link></comment></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mitchell</surname><given-names>M</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>S</given-names> </name><name name-style="western"><surname>Zaldivar</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Model cards for model reporting</article-title><source>FAT* &#x2019;19: Proceedings of the Conference on Fairness, Accountability, and Transparency</source><year>2019</year><month>01</month><day>29</day><fpage>220</fpage><lpage>229</lpage><pub-id pub-id-type="doi">10.1145/3287560.3287596</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sendak</surname><given-names>MP</given-names> </name><name name-style="western"><surname>Gao</surname><given-names>M</given-names> </name><name name-style="western"><surname>Brajer</surname><given-names>N</given-names> </name><name name-style="western"><surname>Balu</surname><given-names>S</given-names> </name></person-group><article-title>Presenting machine learning model information to clinical end users with model facts labels</article-title><source>NPJ Digit Med</source><year>2020</year><volume>3</volume><issue>1</issue><fpage>41</fpage><pub-id pub-id-type="doi">10.1038/s41746-020-0253-3</pub-id><pub-id pub-id-type="medline">32219182</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Schwartz</surname><given-names>LM</given-names> </name><name name-style="western"><surname>Woloshin</surname><given-names>S</given-names> </name><name name-style="western"><surname>Welch</surname><given-names>HG</given-names> </name></person-group><article-title>Using a drug facts box to communicate drug benefits and harms: two randomized trials</article-title><source>Ann Intern Med</source><year>2009</year><month>04</month><day>21</day><volume>150</volume><issue>8</issue><fpage>516</fpage><lpage>527</lpage><pub-id pub-id-type="doi">10.7326/0003-4819-150-8-200904210-00106</pub-id><pub-id pub-id-type="medline">19221371</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bradshaw</surname><given-names>C</given-names> </name><name name-style="western"><surname>Atkinson</surname><given-names>S</given-names> </name><name name-style="western"><surname>Doody</surname><given-names>O</given-names> </name></person-group><article-title>Employing a qualitative description approach in health care research</article-title><source>Glob Qual Nurs Res</source><year>2017</year><volume>4</volume><fpage>2333393617742282</fpage><pub-id pub-id-type="doi">10.1177/2333393617742282</pub-id><pub-id pub-id-type="medline">29204457</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tong</surname><given-names>A</given-names> </name><name name-style="western"><surname>Sainsbury</surname><given-names>P</given-names> </name><name name-style="western"><surname>Craig</surname><given-names>J</given-names> </name></person-group><article-title>Consolidated criteria for reporting qualitative research (COREQ): a 32-item checklist for interviews and focus groups</article-title><source>Int J Qual Health Care</source><year>2007</year><month>12</month><volume>19</volume><issue>6</issue><fpage>349</fpage><lpage>357</lpage><pub-id pub-id-type="doi">10.1093/intqhc/mzm042</pub-id><pub-id pub-id-type="medline">17872937</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Braun</surname><given-names>V</given-names> </name><name name-style="western"><surname>Clarke</surname><given-names>V</given-names> </name></person-group><article-title>One size fits all? What counts as quality practice in (reflexive) thematic analysis?</article-title><source>Qual Res Psychol</source><year>2021</year><month>07</month><day>3</day><volume>18</volume><issue>3</issue><fpage>328</fpage><lpage>352</lpage><pub-id pub-id-type="doi">10.1080/14780887.2020.1769238</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Salmi</surname><given-names>M</given-names> </name><name name-style="western"><surname>Atif</surname><given-names>D</given-names> </name><name name-style="western"><surname>Oliva</surname><given-names>D</given-names> </name><name name-style="western"><surname>Abraham</surname><given-names>A</given-names> </name><name name-style="western"><surname>Ventura</surname><given-names>S</given-names> </name></person-group><article-title>Handling imbalanced medical datasets: review of a decade of research</article-title><source>Artif Intell Rev</source><year>2024</year><volume>57</volume><issue>10</issue><pub-id pub-id-type="doi">10.1007/s10462-024-10884-2</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Frey</surname><given-names>N</given-names> </name><name name-style="western"><surname>Agha-Mir-Salim</surname><given-names>L</given-names> </name><name name-style="western"><surname>Hinz</surname><given-names>E</given-names> </name><name name-style="western"><surname>Poncette</surname><given-names>AS</given-names> </name><name name-style="western"><surname>Balzer</surname><given-names>F</given-names> </name></person-group><article-title>Assessing healthcare stakeholder understanding of machine learning documentation</article-title><source>Stud Health Technol Inform</source><year>2025</year><month>05</month><day>15</day><volume>327</volume><fpage>318</fpage><lpage>322</lpage><pub-id pub-id-type="doi">10.3233/SHTI250336</pub-id><pub-id pub-id-type="medline">40380447</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Reyna</surname><given-names>MA</given-names> </name><name name-style="western"><surname>Nsoesie</surname><given-names>EO</given-names> </name><name name-style="western"><surname>Clifford</surname><given-names>GD</given-names> </name></person-group><article-title>Rethinking algorithm performance metrics for artificial intelligence in diagnostic medicine</article-title><source>JAMA</source><year>2022</year><month>07</month><day>26</day><volume>328</volume><issue>4</issue><fpage>329</fpage><lpage>330</lpage><pub-id pub-id-type="doi">10.1001/jama.2022.10561</pub-id><pub-id pub-id-type="medline">35802382</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sullivan</surname><given-names>HW</given-names> </name><name name-style="western"><surname>O&#x2019;Donoghue</surname><given-names>AC</given-names> </name><name name-style="western"><surname>Aikin</surname><given-names>KJ</given-names> </name></person-group><article-title>Primary care physicians&#x2019; use of FDA-approved prescription drug labels</article-title><source>J Am Board Fam Med</source><year>2014</year><volume>27</volume><issue>5</issue><fpage>694</fpage><lpage>698</lpage><pub-id pub-id-type="doi">10.3122/jabfm.2014.05.140039</pub-id><pub-id pub-id-type="medline">25201939</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kesselheim</surname><given-names>AS</given-names> </name><name name-style="western"><surname>Cresswell</surname><given-names>K</given-names> </name><name name-style="western"><surname>Phansalkar</surname><given-names>S</given-names> </name><name name-style="western"><surname>Bates</surname><given-names>DW</given-names> </name><name name-style="western"><surname>Sheikh</surname><given-names>A</given-names> </name></person-group><article-title>Clinical decision support systems could be modified to reduce &#x201C;alert fatigue&#x201D; while still minimizing the risk of litigation</article-title><source>Health Aff (Millwood)</source><year>2011</year><month>12</month><volume>30</volume><issue>12</issue><fpage>2310</fpage><lpage>2317</lpage><pub-id pub-id-type="doi">10.1377/hlthaff.2010.1111</pub-id><pub-id pub-id-type="medline">22147858</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>McGreevey</surname><given-names>JD</given-names> </name><name name-style="western"><surname>Mallozzi</surname><given-names>CP</given-names> </name><name name-style="western"><surname>Perkins</surname><given-names>RM</given-names> </name><name name-style="western"><surname>Shelov</surname><given-names>E</given-names> </name><name name-style="western"><surname>Schreiber</surname><given-names>R</given-names> </name></person-group><article-title>Reducing alert burden in electronic health records: state of the art recommendations from four health systems</article-title><source>Appl Clin Inform</source><year>2020</year><month>01</month><volume>11</volume><issue>1</issue><fpage>1</fpage><lpage>12</lpage><pub-id pub-id-type="doi">10.1055/s-0039-3402715</pub-id><pub-id pub-id-type="medline">31893559</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Boxwala</surname><given-names>AA</given-names> </name><name name-style="western"><surname>Rocha</surname><given-names>BH</given-names> </name><name name-style="western"><surname>Maviglia</surname><given-names>S</given-names> </name><etal/></person-group><article-title>A multi-layered framework for disseminating knowledge for computer-based decision support</article-title><source>J Am Med Inform Assoc</source><year>2011</year><month>12</month><volume>18 Suppl 1</volume><issue>Suppl 1</issue><fpage>i132</fpage><lpage>9</lpage><pub-id pub-id-type="doi">10.1136/amiajnl-2011-000334</pub-id><pub-id pub-id-type="medline">22052898</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>van Strien-Knippenberg</surname><given-names>IS</given-names> </name><name name-style="western"><surname>Boshuizen</surname><given-names>MCS</given-names> </name><name name-style="western"><surname>Determann</surname><given-names>D</given-names> </name><name name-style="western"><surname>de Boer</surname><given-names>JH</given-names> </name><name name-style="western"><surname>Damman</surname><given-names>OC</given-names> </name></person-group><article-title>Cocreation with Dutch patients of decision-relevant information to support shared decision-making about adjuvant treatment in breast cancer care</article-title><source>Health Expect</source><year>2022</year><month>08</month><volume>25</volume><issue>4</issue><fpage>1664</fpage><lpage>1677</lpage><pub-id pub-id-type="doi">10.1111/hex.13510</pub-id><pub-id pub-id-type="medline">35579109</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Roumen</surname><given-names>C</given-names> </name><name name-style="western"><surname>Hasannejadasl</surname><given-names>H</given-names> </name><name name-style="western"><surname>Swart</surname><given-names>R</given-names> </name><etal/></person-group><article-title>Breast cancer patients&#x2019; most important quality of life themes for a radiotherapy decision aid</article-title><source>Breast</source><year>2022</year><month>10</month><volume>65</volume><fpage>8</fpage><lpage>14</lpage><pub-id pub-id-type="doi">10.1016/j.breast.2022.06.002</pub-id><pub-id pub-id-type="medline">35728438</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Perdana</surname><given-names>A</given-names> </name><name name-style="western"><surname>Rob</surname><given-names>A</given-names> </name><name name-style="western"><surname>Rohde</surname><given-names>F</given-names> </name></person-group><article-title>Does visualization matter? The role of interactive data visualization to make sense of information</article-title><source>AJIS</source><year>2018</year><volume>22</volume><pub-id pub-id-type="doi">10.3127/ajis.v22i0.1681</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Laka</surname><given-names>M</given-names> </name><name name-style="western"><surname>Carter</surname><given-names>D</given-names> </name><name name-style="western"><surname>Milazzo</surname><given-names>A</given-names> </name><name name-style="western"><surname>Merlin</surname><given-names>T</given-names> </name></person-group><article-title>Challenges and opportunities in implementing clinical decision support systems (CDSS) at scale: interviews with Australian policymakers</article-title><source>Health Policy Technol</source><year>2022</year><month>09</month><volume>11</volume><issue>3</issue><fpage>100652</fpage><pub-id pub-id-type="doi">10.1016/j.hlpt.2022.100652</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Swan</surname><given-names>EL</given-names> </name><name name-style="western"><surname>Peltier</surname><given-names>JW</given-names> </name><name name-style="western"><surname>Dahl</surname><given-names>AJ</given-names> </name></person-group><article-title>Artificial intelligence in healthcare: the value co-creation process and influence of other digital health transformations</article-title><source>JRIM</source><year>2024</year><month>01</month><day>30</day><volume>18</volume><issue>1</issue><fpage>109</fpage><lpage>126</lpage><pub-id pub-id-type="doi">10.1108/JRIM-09-2022-0293</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Davis</surname><given-names>J</given-names> </name><name name-style="western"><surname>Docherty</surname><given-names>CA</given-names> </name><name name-style="western"><surname>Dowling</surname><given-names>K</given-names> </name></person-group><article-title>Design thinking and innovation: synthesising concepts of knowledge co-creation in spaces of professional development</article-title><source>The Design Journal</source><year>2016</year><month>01</month><day>2</day><volume>19</volume><issue>1</issue><fpage>117</fpage><lpage>139</lpage><pub-id pub-id-type="doi">10.1080/14606925.2016.1109205</pub-id></nlm-citation></ref><ref id="ref51"><label>51</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sreenivasan</surname><given-names>A</given-names> </name><name name-style="western"><surname>Suresh</surname><given-names>M</given-names> </name></person-group><article-title>Design thinking and artificial intelligence: a systematic literature review exploring synergies</article-title><source>Int J Innov Stud</source><year>2024</year><month>09</month><volume>8</volume><issue>3</issue><fpage>297</fpage><lpage>312</lpage><pub-id pub-id-type="doi">10.1016/j.ijis.2024.05.001</pub-id></nlm-citation></ref><ref id="ref52"><label>52</label><nlm-citation citation-type="web"><article-title>Article 4: AI literacy</article-title><source>EU Artificial Intelligence Act</source><access-date>2025-12-18</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://artificialintelligenceact.eu/article/4/">https://artificialintelligenceact.eu/article/4/</ext-link></comment></nlm-citation></ref><ref id="ref53"><label>53</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Suchikova</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Tsybuliak</surname><given-names>N</given-names> </name><name name-style="western"><surname>Teixeira da Silva</surname><given-names>JA</given-names> </name><name name-style="western"><surname>Nazarovets</surname><given-names>S</given-names> </name></person-group><article-title>GAIDeT (Generative AI Delegation Taxonomy): a taxonomy for humans to delegate tasks to generative artificial intelligence in scientific research and publishing</article-title><source>Account Res</source><year>2025</year><month>08</month><day>8</day><fpage>1</fpage><lpage>27</lpage><pub-id pub-id-type="doi">10.1080/08989621.2025.2544331</pub-id><pub-id pub-id-type="medline">40781729</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Interview guide clinicians.</p><media xlink:href="mededu_v12i1e85228_app1.docx" xlink:title="DOCX File, 19 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Interview guide artificial intelligence (AI) experts.</p><media xlink:href="mededu_v12i1e85228_app2.docx" xlink:title="DOCX File, 20 KB"/></supplementary-material><supplementary-material id="app3"><label>Multimedia Appendix 3</label><p>Examples of Model Card, Transparent Reporting of a multivariable prediction model for Individual Prognosis Or Diagnosis&#x2013;Artificial Intelligence (TRIPOD-AI), and Model Facts.</p><media xlink:href="mededu_v12i1e85228_app3.docx" xlink:title="DOCX File, 38 KB"/></supplementary-material><supplementary-material id="app4"><label>Multimedia Appendix 4</label><p>Clusters with themes clinicians.</p><media xlink:href="mededu_v12i1e85228_app4.docx" xlink:title="DOCX File, 433 KB"/></supplementary-material><supplementary-material id="app5"><label>Multimedia Appendix 5</label><p>Clusters with themes artificial intelligence (AI) experts.</p><media xlink:href="mededu_v12i1e85228_app5.docx" xlink:title="DOCX File, 395 KB"/></supplementary-material><supplementary-material id="app6"><label>Checklist 1</label><p>Consolidated Criteria for Reporting Qualitative Research (COREQ) checklist.</p><media xlink:href="mededu_v12i1e85228_app6.docx" xlink:title="DOCX File, 27 KB"/></supplementary-material></app-group></back></article>