<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.0" xmlns:xlink="http://www.w3.org/1999/xlink">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR XR Spatial Comput</journal-id>
      <journal-title>JMIR XR and Spatial Computing</journal-title>
      <issn pub-type="epub">2818-3045</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v1i1e52904</article-id>
      <article-id pub-id-type="pmid"/>
      <article-id pub-id-type="doi">10.2196/52904</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Viewpoint</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Viewpoint</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>What is Diminished Virtuality? A Directional and Layer-Based Taxonomy for the Reality-Virtuality Continuum</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Leung</surname>
            <given-names>Tiffany</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Hirota</surname>
            <given-names>Masakazu</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Sharrad</surname>
            <given-names>Kelsey</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Egger</surname>
            <given-names>Jan</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Institute of Computer Graphics and Vision</institution>
            <institution>Graz University of Technology</institution>
            <addr-line>Inffeldgasse 16c/2</addr-line>
            <addr-line>Graz, 8010</addr-line>
            <country>Austria</country>
            <phone>43 316 873 5076</phone>
            <email>egger@icg.tugraz.at</email>
          </address>
          <xref rid="aff2" ref-type="aff">2</xref>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-5225-1982</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Gsaxner</surname>
            <given-names>Christina</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff4" ref-type="aff">4</xref>
          <xref rid="aff5" ref-type="aff">5</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-2227-3523</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Kleesiek</surname>
            <given-names>Jens</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <xref rid="aff6" ref-type="aff">6</xref>
          <xref rid="aff7" ref-type="aff">7</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-8686-0682</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Puladi</surname>
            <given-names>Behrus</given-names>
          </name>
          <degrees>MD, DMD</degrees>
          <xref rid="aff4" ref-type="aff">4</xref>
          <xref rid="aff5" ref-type="aff">5</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-5909-6105</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Institute of Computer Graphics and Vision</institution>
        <institution>Graz University of Technology</institution>
        <addr-line>Graz</addr-line>
        <country>Austria</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Center for Virtual and Extended Reality in Medicine</institution>
        <institution>Essen University Hospital</institution>
        <addr-line>Essen</addr-line>
        <country>Germany</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Institute for Artificial Intelligence in Medicine</institution>
        <institution>Essen University Hospital</institution>
        <addr-line>Essen</addr-line>
        <country>Germany</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>Department of Oral and Maxillofacial Surgery</institution>
        <institution>University Hospital RWTH Aachen</institution>
        <addr-line>Aachen</addr-line>
        <country>Germany</country>
      </aff>
      <aff id="aff5">
        <label>5</label>
        <institution>Institute of Medical Informatics</institution>
        <institution>University Hospital RWTH Aachen</institution>
        <addr-line>Aachen</addr-line>
        <country>Germany</country>
      </aff>
      <aff id="aff6">
        <label>6</label>
        <institution>Partner Site Essen</institution>
        <institution>German Cancer Consortium</institution>
        <addr-line>Essen</addr-line>
        <country>Germany</country>
      </aff>
      <aff id="aff7">
        <label>7</label>
        <institution>Department of Physics</institution>
        <institution>TU Dortmund University</institution>
        <addr-line>Dortmund</addr-line>
        <country>Germany</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Jan Egger <email>egger@icg.tugraz.at</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2024</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>31</day>
        <month>1</month>
        <year>2024</year>
      </pub-date>
      <volume>1</volume>
      <elocation-id>e52904</elocation-id>
      <history>
        <date date-type="received">
          <day>19</day>
          <month>9</month>
          <year>2023</year>
        </date>
        <date date-type="rev-request">
          <day>24</day>
          <month>10</month>
          <year>2023</year>
        </date>
        <date date-type="rev-recd">
          <day>14</day>
          <month>11</month>
          <year>2023</year>
        </date>
        <date date-type="accepted">
          <day>26</day>
          <month>12</month>
          <year>2023</year>
        </date>
      </history>
      <copyright-statement>©Jan Egger, Christina Gsaxner, Jens Kleesiek, Behrus Puladi. Originally published in JMIR XR and Spatial Computing (https://xr.jmir.org), 31.01.2024.</copyright-statement>
      <copyright-year>2024</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR XR and Spatial Computing, is properly cited. The complete bibliographic information, a link to the original publication on https://xr.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://xr.jmir.org/2024/1/e52904" xlink:type="simple"/>
      <abstract>
        <p>The concept of reality-virtuality (RV) continuum was introduced by Paul Milgram and Fumio Kishino in 1994. It describes a spectrum that ranges from a purely physical reality (the real world) to a purely virtual reality (a completely computer-generated environment), with various degrees of mixed reality in between. This continuum is “realized” by different types of displays to encompass different levels of immersion and interaction, allowing for the classification of different types of environments and experiences. What is often overlooked in this concept is the act of diminishing real objects (or persons, animals, etc) from the reality, that is, a diminution, rather than augmenting it, that is, an augmentation. Hence, we want to propose in this contribution an update or modification of the RV continuum where the diminished reality aspect is more prominent. We hope this will help users, especially those who are new to the field, to get a better understanding of the entire extended reality (XR) topic, as well as assist in the decision-making for hardware (devices) and software or algorithms that are needed for new diminished reality applications. However, we also propose another, more sophisticated directional and layer-based taxonomy for the RV continuum that we believe goes beyond the mediated and multimediated realities. Furthermore, we initiate the question of whether the RV continuum truly ends on one side with physical reality.</p>
      </abstract>
      <kwd-group>
        <kwd>reality-virtuality continuum</kwd>
        <kwd>diminished virtuality</kwd>
        <kwd>Apple Vision Pro</kwd>
        <kwd>VR</kwd>
        <kwd>virtual reality</kwd>
        <kwd>reality-virtuality</kwd>
        <kwd>mixed reality</kwd>
        <kwd>augmented reality</kwd>
        <kwd>XR</kwd>
        <kwd>extended reality</kwd>
        <kwd>taxonomy</kwd>
        <kwd>classification</kwd>
        <kwd>classifications</kwd>
        <kwd>concept</kwd>
        <kwd>concepts</kwd>
        <kwd>conceptual</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>The reality-virtuality (RV) continuum is a concept introduced by Paul Milgram and Fumio Kishino [<xref ref-type="bibr" rid="ref1">1</xref>] in 1994. It describes a spectrum that ranges from a purely physical reality (the real world) to a purely virtual reality (VR; a completely computer-generated environment), with various degrees of mixed reality (MR) in between. This continuum is “realized” by different types of displays [<xref ref-type="bibr" rid="ref2">2</xref>] to encompass different levels of immersion and interaction, allowing for the classification of different types of environments and experiences. The RV continuum helps us understand the varying levels of immersion and interactivity that technology can provide. As technology advances, the boundaries between these immersion levels can become more fluid, and new hybrid experiences can emerge. The continuum is particularly relevant in fields such as VR, augmented reality (AR), and MR, where researchers and developers aim to create more compelling and natural experiences that bridge the gap between the physical and virtual worlds. We used ChatGPT (OpenAI) [<xref ref-type="bibr" rid="ref3">3</xref>] to gauge the current state of the RV continuum. According to ChatGPT, the continuum is often divided into several main categories (note, we adapted the ChatGPT results and enhanced it with concrete examples, where necessary; <xref ref-type="boxed-text" rid="box1">Textbox 1</xref> [<xref ref-type="bibr" rid="ref4">4</xref>]). The original ChatGPT transcript is shown in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref> [<xref ref-type="bibr" rid="ref3">3</xref>].</p>
      <boxed-text id="box1" position="float">
        <title>Main categories of the reality-virtuality continuum, modified from ChatGPT.</title>
        <p>
          <bold>ChatGPT prompt:</bold>
        </p>
        <list list-type="bullet">
          <list-item>
            <p>What is the reality-virtuality continuum?</p>
          </list-item>
        </list>
        <p>
          <bold>Main categories (modified ChatGPT output):</bold>
        </p>
        <list list-type="bullet">
          <list-item>
            <p>Physical reality (real environment): This is the state of the unmediated physical world, where all sensory perceptions are naturally experienced without any technological augmentation.</p>
          </list-item>
          <list-item>
            <p>Augmented reality (AR): In this category, virtual elements are overlaid onto the real world. AR enhances the user’s perception of the physical world by adding computer-generated visual, auditory, or haptic information. Examples include smartphone apps that display digital information on top of real-world views, such as Pokémon GO [<xref ref-type="bibr" rid="ref5">5</xref>].</p>
          </list-item>
          <list-item>
            <p>Mixed reality (MR): MR environments combine virtual and physical elements in a way that allows them to interact in real time. Users can manipulate both virtual and real objects, and the distinction between the 2 can be blurred. Microsoft’s HoloLens is an example of an MR device that enables users to interact with holographic objects in their real-world environment [<xref ref-type="bibr" rid="ref6">6</xref>].</p>
          </list-item>
          <list-item>
            <p>Virtual reality (VR): In VR, users are completely immersed in a computer-generated environment that can simulate various sensory experiences. VR typically involves the use of head-mounted displays and other input devices to provide a sense of presence within the virtual environment [<xref ref-type="bibr" rid="ref7">7</xref>]. Prominent examples are the HTC Vive and the Meta Quest.</p>
          </list-item>
          <list-item>
            <p>Augmented virtuality (AV): This term is less commonly used than the others. It refers to scenarios where real-world elements are brought into a virtual environment. For example, capturing real objects or people and placing them into a virtual space. The Varjo XR-3 is capable of providing such a function and is able to make a video stream into the virtual world (VR). A concrete example could be showing a video stream of the (real) smartphone in VR, so the user can answer a text message without actually leaving VR (removing the headset and thus breaking the illusion being in “another world,” the simulated virtual environment).</p>
          </list-item>
        </list>
      </boxed-text>
    </sec>
    <sec>
      <title>Diminished Reality</title>
      <p>What is often overlooked in this concept is the act of <italic>diminishing</italic> real objects (or persons, animals, etc) from reality, rather than <italic>augmenting</italic> the reality with virtual things [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>]. An introduction to the topic can be found in Cheng et al [<xref ref-type="bibr" rid="ref10">10</xref>]. A reason for this is that diminishing something from reality needs, in general, a sophisticated understanding of the real scene or environment to make the <italic>diminishing</italic> aspect convincing. In AR, the real world is <italic>just</italic> overwritten with a virtual object. In diminished reality (DR), however, the real-world part that is <italic>augmented</italic> or <italic>diminished</italic> needs to seemingly <italic>fit</italic> to the reality around it. In addition, this should all be performed in real time when a user is walking around the real world, and an algorithm has to do the following (note that the first 3 items are part of the Extent of World Knowledge axis of the taxonomy by Milgram and Kishino [<xref ref-type="bibr" rid="ref1">1</xref>]):</p>
      <list list-type="bullet">
        <list-item>
          <p>Detect and track the real object that has to be removed or diminished;</p>
        </list-item>
        <list-item>
          <p>Perform geometric modeling of the scene and objects to be added or subtracted (preexisting or captured once or in real time);</p>
        </list-item>
        <list-item>
          <p>Apply the lighting model of the scene to objects added or to part of the revealed scene when something is removed (preexisting or captured once or in real time); and then</p>
        </list-item>
        <list-item>
          <p>Combine all the previous points together as the scene description for the rendering algorithm.</p>
        </list-item>
      </list>
      <p>All of this has to be done not only in real time but also with very high precision. The inserted virtual object has to fit seamlessly into and make sense with the reality; minor discrepancies will appear to be a glitch and will be noticed immediately by the user, as we recently observed in a DR user study [<xref ref-type="bibr" rid="ref11">11</xref>]. In fact, we think that diminution and augmentation require fundamentally different technologies. In our opinion, an augmentation may be needed to alter reality at a certain position with regard to other (real) objects (eg, displaying a patient’s tumor as an AR hologram on the patient in front of you, at the real position, such as for needle guidance [<xref ref-type="bibr" rid="ref12">12</xref>]), but no seamless and semantic fitting is necessary. As soon as a virtual object needs to fit into the scene semantically, we consider this to require diminution. Hence, for augmentation, you <italic>only</italic> need a volume rendering process with some basic options, such as position, size, and transparency. For diminution, however, additional fundamentally different technologies are needed. The scene has to be analyzed and <italic>understood</italic>, and a meaningful replacement has to be generated and <italic>inserted</italic> as an AR hologram. An example could be glasses that are <italic>removed</italic> from a person in front of you.</p>
      <p>In summary, the user has to get the impression that the real, diminished object does not exist at all in reality [<xref ref-type="bibr" rid="ref13">13</xref>]. Besides sophisticated algorithms, this course of action needs a considerable amount of computing power. Fortunately, there has been tremendous progress in both areas during the last years, with deep learning–based approaches and GPUs that can run these kinds of algorithms, even in real time. As a result, DR has already found its way into some applications [<xref ref-type="bibr" rid="ref5">5</xref>], such as virtual furniture removal for redecorating purposes (eg, IKEA Kreativ [<xref ref-type="bibr" rid="ref14">14</xref>]). Other possible applications for DR include the following:</p>
      <list list-type="bullet">
        <list-item>
          <p>Privacy enhancing: In a live video feed, certain objects or information can be blurred or removed in real time to protect sensitive or private data.</p>
        </list-item>
        <list-item>
          <p>Training and education: DR can be used to remove distractions in a learning environment or highlight specific items to focus on.</p>
        </list-item>
        <list-item>
          <p>Therapeutic applications: For someone with a phobia of spiders, a DR system could recognize spiders in the person’s field of view and diminish or replace them with less threatening images to reduce anxiety. Additionally, sensory overload, a feature of autism, could be diminished with a DR system, to reduce overstimulation.</p>
        </list-item>
      </list>
    </sec>
    <sec>
      <title>Directional and Layer-Based Taxonomy</title>
      <p>Nevertheless, for all these aforementioned reasons, we think that DR needs to be more prominent on the RV continuum, as shown in <xref rid="figure1" ref-type="fig">Figure 1</xref> [<xref ref-type="bibr" rid="ref15">15</xref>], without delving deeper into the broad topics of mediated reality [<xref ref-type="bibr" rid="ref9">9</xref>] or even multimediated reality [<xref ref-type="bibr" rid="ref16">16</xref>]. This will not only assist in the decision-making for hardware (devices) and software that are needed for new DR applications but also help unfamiliar users to get a better understanding of the entire extended reality (XR) topic (note that we are addressing this revision to the continuum purely from an application or user point of view [POV], not from the POV of an MR researcher or engineer). An example application for DR could be the real-time anonymization of a face via XR. There is a huge difference if a device detects the eye area and <italic>simply</italic> inpaints a black bar over the eyes (without considering the surrounding facial area) or inpaints the eyes with <italic>different</italic> or <italic>meaningful</italic> ones that fit perfectly to the surrounding facial area. The black bar approach can probably be performed on a current smartphone, whereas the second approach needs much more sophisticated hardware and computing power, with an integrated GPU that can run a trained, deep inpainting neural network in real time (note that a user with an XR headset would move around in general, which also changes the POV on the face to be anonymized, so the inpainting algorithms also has to be executed continuously in real time). In this context, we also think that the upcoming Apple Vision Pro will push the limits in DR, because it is a video-see-through device that can enable DR to reach its full potential [<xref ref-type="bibr" rid="ref17">17</xref>]. In fact, the <italic>Digital Crown</italic> hardware of the Apple Vision Pro, which also exists for the Apple Watch, should enable us to seamlessly <italic>walk</italic> along the whole RV continuum (back and forth) and bring medical DR applications to reality, which are still almost nonexistent currently [<xref ref-type="bibr" rid="ref18">18</xref>]. A potential example of the photo-editing capabilities of newer cell phones as a diminution operation is shown in <xref rid="figure2" ref-type="fig">Figure 2</xref> [<xref ref-type="bibr" rid="ref15">15</xref>]. In this medical example, DR enables the <italic>removal</italic> of a skin tumor virtually from a patient’s face before surgery.</p>
      <p>Diminished virtuality (DV), however, remains a curiosity, and not even ChatGPT—which has been fed with 300 billion words from books, web texts, Wikipedia, articles, etc [<xref ref-type="bibr" rid="ref4">4</xref>]—can handle this term as of January 2024 (<xref ref-type="boxed-text" rid="box2">Textbox 2</xref>; the original ChatGPT transcript is shown in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref> [<xref ref-type="bibr" rid="ref3">3</xref>]).</p>
      <fig id="figure1" position="float">
        <label>Figure 1</label>
        <caption>
          <p>An updated reality-virtuality continuum where “diminishing” is more prominent to ensure a better understanding. The figure shows a real operation room (OR), which is “real” in the two left columns and computer-generated in the two right columns. In “reality,” a real physician is standing inside an OR without any computer-generated objects. In “augmented reality,” the real physician wears extended reality (XR) glasses in the OR and looks at a computer-generated skull of the patient to be treated. In “augmented-diminished reality,” the real OR table has been removed. In “diminished reality,” the OR table has been removed (but also note that the computer-generated skull is not visualized). On the right side is “virtuality,” that is, virtual reality (VR), where a computer-generated OR with a table and a skull are shown in VR (to a user wearing VR glasses). In “augmented virtuality,” the real physician is shown inside the VR OR. In “augmented-diminished virtuality” (ie, mediated virtuality), the computer-generated OR table is removed, but note that the real physician is still shown. In “diminished virtuality,” the OR table has been removed, but the real physician is also not shown. The “augmenting-diminishing” continuum shows the degree of augmentation and diminution (note that this does not apply for “reality” and “virtuality”). Scenarios where a diminution happens belong to “disrupted reality.” The images within the figure were generated by DALL·E 3 (OpenAI) [<xref ref-type="bibr" rid="ref15">15</xref>] and then further modified by the authors.</p>
        </caption>
        <graphic xlink:href="xr_v1i1e52904_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
      <fig id="figure2" position="float">
        <label>Figure 2</label>
        <caption>
          <p>An example of the photo-editing capabilities of newer cell phones as a diminution operation, enabling the “removal” of a skin tumor virtually from a patient’s face before surgery. This figure was generated by DALL·E 3 (OpenAI) [<xref ref-type="bibr" rid="ref15">15</xref>] and then further modified by the authors.</p>
        </caption>
        <graphic xlink:href="xr_v1i1e52904_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
      <boxed-text id="box2" position="float">
        <title>Asking ChatGPT about “Diminished Virtuality” (as of January 2024).</title>
        <p>
          <bold>ChatGPT prompt:</bold>
        </p>
        <list list-type="bullet">
          <list-item>
            <p>What is “Diminished Virtuality”?</p>
          </list-item>
        </list>
        <p>
          <bold>ChatGPT output:</bold>
        </p>
        <list list-type="bullet">
          <list-item>
            <p>“Diminished Virtuality” is not a widely recognized term or concept as of my last knowledge update in January 2022. It’s possible that this term has been coined or introduced in a specific context or field after that date, or it may be a term used in a niche or specialized area that is not widely known.</p>
          </list-item>
          <list-item>
            <p>If you have more context or information about where you came across this term or its usage, I would be happy to try to provide more information or insight based on that context.</p>
          </list-item>
        </list>
      </boxed-text>
      <p>DV can perhaps be best imagined as starting on the extreme right side on the RV continuum, which contains only virtual objects (virtual environment and VR), and going along the continuum toward the left to reach diminished virtual elements (see the figure of the RV continuum by Iriqat and Vatansever [<xref ref-type="bibr" rid="ref19">19</xref>], and note that this is especially of interest for computer-generated structures that are very complex and where only some parts need to be diminished). This should be possible with the <italic>Digital Crown</italic> hardware of the Apple Vision Pro: when fully immersed, the virtuality is reduced to make the real world partly visible. Apple describes scenarios where people might want to increase the pass-through when they intend to interact with a nearby physical object or read text on another device. Another real-world scenario could be a so-called tumor board meeting that is held in VR, where several experts from around the world gather to discuss a rare case and certain virtual elements (images, texts, etc) are diminished because of privacy issues (note that the mixture of reality here is also modifiable): an externally tuned-in expert (who is traveling) can see certain patient information because he or she belongs to the clinic where the patient is located, whereas another externally tuned-in expert who does not belong to the clinic of the patient should not see or hear certain patient information. However, the full potential of DV would unfold when the virtuality is diminished in a way that also fits with the upcoming reality. An example would be a real-world person showing up in VR between virtual objects. Imagine layers of virtual and real context stacked up but still seamlessly interplay with each other for the final output. In fact, this layer-based concept could also extend to mediated and even multimediated realities, where, for example, different sounds (real and virtual) interplay with each other, depending on their location and <italic>depth</italic>. “Enaudio” (the “hearing” equivalent to envision) hearing real rain outside in the real world that are mixed with virtual sounds while inside a virtual room. Real and virtual input from different senses could also be mixed; for example, there is a (virtual) hole in the VR room and the (real) rain falls inside this virtual room, generating simulated sounds as they hit virtual objects in the VR room. The same also works for outputs and their combinations; for example, the real voice from someone wearing the headset is mixed with virtual sounds, or real smells are mixed into VR by generating the corresponding virtual objects there. Finally, some could argue whether the RV continuum really ends on <italic>one side</italic> with physical reality [<xref ref-type="bibr" rid="ref20">20</xref>], because the brain creates reality—comparable to a head-mounted display creating VR—but does not detect it. This could be discussed and explored by the community in the future, and in this regard, we want to propose a diagram of XR that loops back (as opposed to a continuum on a straight line with 2 ends) as an inspiration (<xref rid="figure3" ref-type="fig">Figure 3</xref>). In summary, XR is a niche yet growing topic, because more and more consumer devices with better hardware have become available during the last few years. In parallel, artificial intelligence (AI)–based algorithms have made 1 breakthrough after another, which can, for example, be explored to develop novel applications for an intelligent health care metaverse [<xref ref-type="bibr" rid="ref21">21</xref>].</p>
      <fig id="figure3" position="float">
        <label>Figure 3</label>
        <caption>
          <p>The extended reality (XR) continuum with its subsections (virtual reality [VR], mixed reality [MR], augmented reality [AR], and augmented virtuality [AV]) folded in an outside-in fashion with a circular representation.</p>
        </caption>
        <graphic xlink:href="xr_v1i1e52904_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>Asking ChatGPT about "the reality-virtuality continuum."</p>
        <media xlink:href="xr_v1i1e52904_app1.png" xlink:title="PNG File , 303 KB"/>
      </supplementary-material>
      <supplementary-material id="app2">
        <label>Multimedia Appendix 2</label>
        <p>Asking ChatGPT about “diminished virtuality.”</p>
        <media xlink:href="xr_v1i1e52904_app2.png" xlink:title="PNG File , 212 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">AR</term>
          <def>
            <p>augmented reality</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">AV</term>
          <def>
            <p>augmented virtuality</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">DR</term>
          <def>
            <p>diminished reality</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">DV</term>
          <def>
            <p>diminished virtuality</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">MR</term>
          <def>
            <p>mixed reality</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">POV</term>
          <def>
            <p>point of view</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">RV</term>
          <def>
            <p>reality-virtuality</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">VR</term>
          <def>
            <p>virtual reality</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">XR</term>
          <def>
            <p>extended reality</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>This work was supported by TU Graz Open Access Publishing Fund. BP was funded by the Medical Faculty of the RWTH Aachen University in Germany as part of the Clinician Scientist Program. CG was funded by the Advanced Research Opportunities Program (AROP) from the RWTH Aachen University. Furthermore, we acknowledge the Center for Virtual and Extended Reality in Medicine (<italic>Zentrum für virtuelle und erweiterte Realität in der Medizin</italic>; ZvRM [<xref ref-type="bibr" rid="ref22">22</xref>]) of the University Hospital in Essen, Germany. We used the generative AI tool ChatGPT by OpenAI to identify the main categories of the reality-virtuality continuum and the status of diminished virtuality, which were further review and revised by the study group. The original ChatGPT transcripts are made available as <xref ref-type="supplementary-material" rid="app1">Multimedia Appendices 1</xref> and <xref ref-type="supplementary-material" rid="app2">2</xref>. Parts of <xref rid="figure1" ref-type="fig">Figures 1</xref> and <xref rid="figure2" ref-type="fig">2</xref> were created using the generative AI tool DALL·E 3 by OpenAI [<xref ref-type="bibr" rid="ref15">15</xref>].</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>BP is an associate editor of the <italic>Journal of Medical Internet Research</italic>. The other authors have no conflicts of interest to declare.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Milgram</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Kishino</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>A taxonomy of mixed reality visual displays</article-title>
          <source>IEICE Transactions on Information and Systems</source>
          <year>1994</year>
          <month>12</month>
          <volume>E77-D</volume>
          <issue>12</issue>
          <fpage>1321</fpage>
          <lpage>1329</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://cs.gmu.edu/~zduric/cs499/Readings/r76JBo-Milgram_IEICE_1994.pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Milgram</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Takemura</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Utsumi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kishino</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Augmented reality: a class of displays on the reality-virtuality continuum</article-title>
          <source>SPIE Proceedings, Volume 2351, Telemanipulator and Telepresence Technologies</source>
          <year>1995</year>
          <month>12</month>
          <day>21</day>
          <conf-name>Photonics for Industrial Applications 1994</conf-name>
          <conf-date>October 31 to November 4, 1994</conf-date>
          <conf-loc>Boston, MA</conf-loc>
          <fpage>282</fpage>
          <lpage>292</lpage>
          <pub-id pub-id-type="doi">10.1117/12.197321</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="web">
          <article-title>ChatGPT</article-title>
          <source>OpenAI</source>
          <access-date>2024-01-23</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://chat.openai.com/">https://chat.openai.com/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>JK</given-names>
            </name>
            <name name-style="western">
              <surname>Chua</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Rickard</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lorenzo</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT and large language model (LLM) chatbots: the current state of acceptability and a proposal for guidelines on utilization in academic medicine</article-title>
          <source>J Pediatr Urol</source>
          <year>2023</year>
          <month>10</month>
          <volume>19</volume>
          <issue>5</issue>
          <fpage>598</fpage>
          <lpage>604</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jpurol.2023.05.018</pub-id>
          <pub-id pub-id-type="medline">37328321</pub-id>
          <pub-id pub-id-type="pii">S1477-5131(23)00224-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lawler-Sagarin</surname>
              <given-names>KA</given-names>
            </name>
            <name name-style="western">
              <surname>Sagarin</surname>
              <given-names>BJ</given-names>
            </name>
            <name name-style="western">
              <surname>Pederson</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Enhanced community through augmented reality: social benefits of Pokémon Go</article-title>
          <source>Psychol Rep</source>
          <year>2023</year>
          <month>08</month>
          <day>22</day>
          <fpage>332941231197155</fpage>
          <pub-id pub-id-type="doi">10.1177/00332941231197155</pub-id>
          <pub-id pub-id-type="medline">37605801</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gsaxner</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Pepe</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Jin</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Kleesiek</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Schmalstieg</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Egger</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>The HoloLens in medicine: a systematic review and taxonomy</article-title>
          <source>Med Image Anal</source>
          <year>2023</year>
          <month>04</month>
          <volume>85</volume>
          <fpage>102757</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S1361-8415(23)00018-X"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.media.2023.102757</pub-id>
          <pub-id pub-id-type="medline">36706637</pub-id>
          <pub-id pub-id-type="pii">S1361-8415(23)00018-X</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gruber</surname>
              <given-names>LJ</given-names>
            </name>
            <name name-style="western">
              <surname>Egger</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Bönsch</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kraeima</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ulbrich</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>van den Bosch</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Motmaen</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Wilpert</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Ooms</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Isfort</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Hölzle</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Puladi</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Accuracy and precision of mandible segmentation and its clinical implications: virtual reality, desktop screen and artificial intelligence</article-title>
          <source>Expert Syst Appl</source>
          <year>2024</year>
          <month>04</month>
          <volume>239</volume>
          <fpage>122275</fpage>
          <pub-id pub-id-type="doi">10.1016/j.eswa.2023.122275</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mori</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ikeda</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Saito</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>A survey of diminished reality: techniques for visually concealing, eliminating, and seeing through real objects</article-title>
          <source>IPSJ Transactions on Computer Vision and Applications</source>
          <year>2017</year>
          <month>06</month>
          <day>28</day>
          <volume>9</volume>
          <fpage>17</fpage>
          <pub-id pub-id-type="doi">10.1186/s41074-017-0028-1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mann</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Mediated reality with implementations for everyday life</article-title>
          <source>Presence Connect</source>
          <year>2002</year>
          <month>8</month>
          <day>6</day>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://wearcam.org/presence_connect/"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>YF</given-names>
            </name>
            <name name-style="western">
              <surname>Yin</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Yan</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Gugenheimer</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lindlbauer</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Towards understanding diminished reality</article-title>
          <year>2022</year>
          <month>4</month>
          <conf-name>CHI '22: CHI Conference on Human Factors in Computing Systems</conf-name>
          <conf-date>April 29 to May 5, 2022</conf-date>
          <conf-loc>New Orleans, LA</conf-loc>
          <fpage>1</fpage>
          <lpage>16</lpage>
          <pub-id pub-id-type="doi">10.1145/3491102.3517452</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gsaxner</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Mori</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Schmalstieg</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Egger</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Paar</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Bailer</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Kalkofen</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>DeepDR: deep structure-aware RGB-D inpainting for diminished reality</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online on December 1, 2023</comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2312.00532</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gsaxner</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Pepe</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Schmalstieg</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Egger</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Inside-out instrument tracking for surgical navigation in augmented reality</article-title>
          <year>2021</year>
          <month>12</month>
          <conf-name>VRST '21: 27th ACM Symposium on Virtual Reality Software and Technology</conf-name>
          <conf-date>December 8-10, 2021</conf-date>
          <conf-loc>Osaka, Japan</conf-loc>
          <fpage>1</fpage>
          <lpage>11</lpage>
          <pub-id pub-id-type="doi">10.1145/3489849.3489863</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schmidt</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Blended spaces: perception and interaction in projection-based spatial augmented reality environments [dissertation]</article-title>
          <source>University of Hamburg</source>
          <year>2020</year>
          <access-date>2024-01-22</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ediss.sub.uni-hamburg.de/bitstream/ediss/8644/1/dissertation.pdf">https://ediss.sub.uni-hamburg.de/bitstream/ediss/8644/1/dissertation.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liang</surname>
              <given-names>YW</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>YH</given-names>
            </name>
          </person-group>
          <article-title>Exploration of user experience in mixed reality for product virtual interaction and display</article-title>
          <year>2023</year>
          <conf-name>2023 IEEE 6th International Conference on Knowledge Innovation and Invention (ICKII)</conf-name>
          <conf-date>August 11-13, 2023</conf-date>
          <conf-loc>Sapporo, Japan</conf-loc>
          <fpage>404</fpage>
          <lpage>409</lpage>
          <pub-id pub-id-type="doi">10.1109/ICKII58656.2023.10332563</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="web">
          <article-title>DALL·E 3</article-title>
          <source>OpenAI</source>
          <access-date>2024-01-25</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://openai.com/dall-e-3">https://openai.com/dall-e-3</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mann</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Furness</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Yuan</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Iorio</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Z</given-names>
            </name>
          </person-group>
          <article-title>All reality: virtual, augmented, mixed (X), mediated (X,Y), and multimediated reality</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online on April 20, 2018</comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.1804.08386</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Egger</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Gsaxner</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Bian</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kleesiek</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Puladi</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Apple Vision Pro for healthcare: "the ultimate display"? - entering the wonderland of precision medicine</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online on August 8, 2023</comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2308.04313</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ienaga</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Bork</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Meerits</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Mori</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Fallavollita</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Navab</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Saito</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>First deployment of diminished reality for anatomy education</article-title>
          <year>2016</year>
          <conf-name>2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)</conf-name>
          <conf-date>September 19-23, 2016</conf-date>
          <conf-loc>Merida, Mexico</conf-loc>
          <fpage>294</fpage>
          <lpage>296</lpage>
          <pub-id pub-id-type="doi">10.1109/ismar-adjunct.2016.0099</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Iriqat</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Vatansever</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Comparison of reality types</article-title>
          <source>Uludağ University Journal of The Faculty of Engineering</source>
          <year>2020</year>
          <month>12</month>
          <day>31</day>
          <volume>25</volume>
          <issue>3</issue>
          <fpage>1155</fpage>
          <lpage>1168</lpage>
          <pub-id pub-id-type="doi">10.17482/uumfd.789985</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hutzler</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Reverse inference is not a fallacy per se: cognitive processes can be inferred from functional imaging data</article-title>
          <source>Neuroimage</source>
          <year>2014</year>
          <month>1</month>
          <day>1</day>
          <volume>84</volume>
          <fpage>1061</fpage>
          <lpage>1069</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S1053-8119(13)00014-1"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.neuroimage.2012.12.075</pub-id>
          <pub-id pub-id-type="medline">23313571</pub-id>
          <pub-id pub-id-type="pii">S1053-8119(13)00014-1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Badal</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Jia</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Maltz</surname>
              <given-names>JS</given-names>
            </name>
            <name name-style="western">
              <surname>Mueller</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Myers</surname>
              <given-names>KJ</given-names>
            </name>
            <name name-style="western">
              <surname>Niu</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Vannier</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Yan</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Zeng</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Development of metaverse for intelligent healthcare</article-title>
          <source>Nat Mach Intell</source>
          <year>2022</year>
          <month>11</month>
          <volume>4</volume>
          <issue>11</issue>
          <fpage>922</fpage>
          <lpage>929</lpage>
          <pub-id pub-id-type="doi">10.1038/s42256-022-00549-6</pub-id>
          <pub-id pub-id-type="medline">36935774</pub-id>
          <pub-id pub-id-type="pmcid">PMC10015955</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="web">
          <article-title>Zentrum für virtuelle und erweiterte Realität in der Medizin</article-title>
          <source>Universitätsklinikum Essen</source>
          <access-date>2023-01-23</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://zvrm.ume.de/">https://zvrm.ume.de/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
