<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR XR Spatial Comput</journal-id><journal-id journal-id-type="publisher-id">xr</journal-id><journal-id journal-id-type="index">46</journal-id><journal-title>JMIR XR and Spatial Computing (JMXR)</journal-title><abbrev-journal-title>JMIR XR Spatial Comput</abbrev-journal-title><issn pub-type="epub">2818-3045</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v3i1e81236</article-id><article-id pub-id-type="doi">10.2196/81236</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Predictive Factors of Augmented Reality&#x2013;Based Clinical Task Performance Among Novice Users: Cross-Sectional Quantitative Study</article-title></title-group><contrib-group><contrib contrib-type="author"><name name-style="western"><surname>Vellore</surname><given-names>Amogh J</given-names></name><degrees>BS</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Bhatia</surname><given-names>Shovan</given-names></name><degrees>BS</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Kann</surname><given-names>Michael R</given-names></name><degrees>BE</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Kass</surname><given-names>Nicol&#x00E1;s M</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Shanahan</surname><given-names>Regan M</given-names></name><degrees>BA</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Jardini</surname><given-names>Jacquelyn</given-names></name><degrees>BS</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Miner</surname><given-names>Jayne</given-names></name><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Daulat</surname><given-names>Sohail R</given-names></name><degrees>BS</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Hurt</surname><given-names>Griffin</given-names></name><degrees>BPhil</degrees><xref ref-type="aff" rid="aff4">4</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Basdeo</surname><given-names>Rishi</given-names></name><degrees>MS</degrees><xref ref-type="aff" rid="aff5">5</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Don</surname><given-names>Nicole</given-names></name><degrees>MA</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Biehl</surname><given-names>Jacob T</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff4">4</xref></contrib><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Andrews</surname><given-names>Edward G</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib></contrib-group><aff id="aff1"><institution>Department of Neurological Surgery, University of Pittsburgh Medical Center</institution><addr-line>200 Lothrop St, STE B-400</addr-line><addr-line>Pittsburgh</addr-line><addr-line>PA</addr-line><country>United States</country></aff><aff id="aff2"><institution>Department of Orthopaedic Surgery, University of Pittsburgh Medical Center</institution><addr-line>Pittsburgh</addr-line><addr-line>PA</addr-line><country>United States</country></aff><aff id="aff3"><institution>Department of Plastic Surgery, University of Pittsburgh Medical Center</institution><addr-line>Pittsburgh</addr-line><addr-line>PA</addr-line><country>United States</country></aff><aff id="aff4"><institution>Department of Computer Science, School of Computing and Information, University of Pittsburgh</institution><addr-line>Pittsburgh</addr-line><addr-line>PA</addr-line><country>United States</country></aff><aff id="aff5"><institution>Department of Mechanical Engineering, Carnegie Mellon University</institution><addr-line>Pittsburgh</addr-line><addr-line>PA</addr-line><country>United States</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Brini</surname><given-names>Stefano</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Bullock</surname><given-names>Tom</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Yang</surname><given-names>Yue</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Edward G Andrews, MD, Department of Neurological Surgery, University of Pittsburgh Medical Center, 200 Lothrop St, STE B-400, Pittsburgh, PA, 15213, United States, 1 412-647-3685; <email>andrewse2@upmc.edu</email></corresp></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>10</day><month>4</month><year>2026</year></pub-date><volume>3</volume><elocation-id>e81236</elocation-id><history><date date-type="received"><day>24</day><month>07</month><year>2025</year></date><date date-type="rev-recd"><day>12</day><month>03</month><year>2026</year></date><date date-type="accepted"><day>12</day><month>03</month><year>2026</year></date></history><copyright-statement>&#x00A9; Amogh J Vellore, Shovan Bhatia, Michael R Kann, Nicol&#x00E1;s M Kass, Regan M Shanahan, Jacquelyn Jardini, Jayne Miner, Sohail R Daulat, Griffin Hurt, Rishi Basdeo, Nicole Don, Jacob T Biehl, Edward G Andrews. Originally published in JMIR XR and Spatial Computing (<ext-link ext-link-type="uri" xlink:href="https://xr.jmir.org">https://xr.jmir.org</ext-link>), 10.4.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR XR and Spatial Computing, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://xr.jmir.org/">https://xr.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://xr.jmir.org/2026/1/e81236"/><abstract><sec><title>Background</title><p>Augmented reality (AR) can provide risk-free training for medical trainees, yet little is known about which learner characteristics facilitate adoption or inform training design.</p></sec><sec><title>Objective</title><p>We aimed to identify which learner characteristics predict AR performance in novices. We hypothesized that higher visuospatial ability and greater video game experience would be associated with faster completion times and fewer errors.</p></sec><sec sec-type="methods"><title>Methods</title><p>In this cross-sectional study, 21 undergraduate, graduate, and medical students (median age 22, IQR 21-24 years) without previous AR experience were recruited between June and December 2024. Participants completed a technology experience survey, the mental rotation task (MRT) for visuospatial ability, a standardized 7-task AR protocol mimicking clinical use on the Microsoft HoloLens 2 (hologram manipulation, orbit tracing, anatomical plane visualization, and hologram-to-object registration), and the National Aeronautics and Space Administration Task Load Index for cognitive load assessment. Outcome measures included completion time, slips (unintentional errors), and tracing quality.</p></sec><sec sec-type="results"><title>Results</title><p>All analyses used a significance of &#x03B1;=.05. MRT scores did not predict baseline performance time (Pearson <italic>r</italic>=0.15, 95% CI &#x2212;0.32 to 0.55; <italic>P</italic>=.54) or error rates (<italic>r</italic>=0.18, 95% CI &#x2212;0.27 to 0.57; <italic>P</italic>=.43). Participants with extensive video game experience (&#x003E;5 hours/week) made fewer slips (unpaired <italic>t</italic> test; mean difference &#x2212;2.62 slips, 95% CI &#x2212;5.19 to &#x2212;0.04; <italic>P</italic>=.047), without faster completion times (Mann-Whitney test; median difference &#x2212;22 seconds, 95% CI &#x2212;7.00 to 57.00; <italic>P</italic>=.24). Video game experience did not predict baseline performance time (Pearson <italic>r</italic>=&#x2212;0.35, 95% CI &#x2212;0.69 to 0.13; <italic>P</italic>=.14). Significant learning effects emerged in unadjusted analyses: completion times decreased on attempts 2 and 3 compared with attempt 1 (mixed-effects analysis: mean difference 28.75 seconds, 95% CI 12.98-44.52; <italic>P</italic>&#x003C;.001; 28.00 seconds, 95% CI 10.75-45.25; <italic>P</italic>=.002, respectively) with fewer slips (Friedman test: <italic>&#x03C7;</italic><sup>2</sup><sub>2</sub>=17.8; <italic>P</italic>&#x003C;.001; Dunn post hoc: <italic>P</italic>=.008 and <italic>P</italic>&#x003C;.001, respectively). Orbit tracing (Wilcoxon test: median difference &#x2212;5 seconds; <italic>P</italic>=.004) and virtual landmark placement times improved (Friedman test: <italic>&#x03C7;</italic><sup>2</sup><sub>3</sub>=14.6; <italic>P</italic>=.002; Dunn post hoc; <italic>P</italic>=.009 and <italic>P</italic>=.02), but physical landmark placement did not. Covariate-adjusted models revealed no significant trial-by-covariate interactions.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>Visuospatial ability does not predict clinically relevant AR performance, while extensive video game experience was associated with fewer errors. Despite previous studies emphasizing inherent learner characteristics in laparoscopy and endoscopy, covariate-adjusted models showed that AR learning curves were not significantly modified by MRT or video game experience. These findings suggest that early AR performance improvements among novice users are primarily driven by learning rather than visuospatial ability, supporting training approaches that emphasize structured practice, although the modest sample size limits detection of smaller effects.</p></sec></abstract><kwd-group><kwd>mixed reality</kwd><kwd>augmented reality</kwd><kwd>virtual reality</kwd><kwd>mental rotation task</kwd><kwd>visuospatial ability</kwd><kwd>medical education</kwd><kwd>video games</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>The rise in augmented reality (AR) and virtual reality (VR) technology has greatly impacted a range of industries, including education, entertainment, and medicine [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref2">2</xref>]. AR enables the supplementation of real-world visibility with digital information, which can be shown through projections onto head-mounted displays (HMD) on headsets, smart glasses, or tablet-based displays. Within medicine, AR and VR applications continue to grow. While outcomes research remains preliminary given AR&#x2019;s relative infancy, studies have found that AR subjectively increases surgeon confidence in delineating tumor margins [<xref ref-type="bibr" rid="ref3">3</xref>]. This observation was validated by a multicenter randomized controlled trial (n=113), which observed that AR-guided robotic prostatectomies were associated with a significant decrease in subsequent positive surgical margins, a key prognostic indicator for patient survival [<xref ref-type="bibr" rid="ref4">4</xref>]. Other measured improvements have included decreased fluoroscopy time needed to navigate difficult tissue structures [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref6">6</xref>]. This technology has also expanded patient education [<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref8">8</xref>] by helping patients gain a deeper understanding of their bodies and diseases while also demonstrably decreasing procedural anxiety [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>] and improving satisfaction [<xref ref-type="bibr" rid="ref8">8</xref>].</p><p>As applications of AR and VR continue to expand across specialties, these technologies hold tremendous potential as risk-free training modalities, allowing medical students and resident physicians to practice procedures without jeopardizing patient safety [<xref ref-type="bibr" rid="ref10">10</xref>]. Recent literature has shown that AR can help resident physicians learn to identify aneurysms in surgical videos [<xref ref-type="bibr" rid="ref11">11</xref>], support medical student and resident education as a reliable and predictive simulation-based medical education modality [<xref ref-type="bibr" rid="ref12">12</xref>-<xref ref-type="bibr" rid="ref14">14</xref>], and minimize mental workload while simultaneously improving learning capacity [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref16">16</xref>].</p><p>Despite this promise, there are still some important factors to consider. Although previous studies indicate that AR can increase mental resource availability [<xref ref-type="bibr" rid="ref15">15</xref>], enhance working memory capacity [<xref ref-type="bibr" rid="ref16">16</xref>], and facilitate long-term information storage [<xref ref-type="bibr" rid="ref16">16</xref>], it may also serve as a distraction for some learners [<xref ref-type="bibr" rid="ref17">17</xref>]. Research has shown broad educational benefits, from early childhood learning in preschool [<xref ref-type="bibr" rid="ref18">18</xref>] to secondary education [<xref ref-type="bibr" rid="ref19">19</xref>] and postgraduate medical education [<xref ref-type="bibr" rid="ref13">13</xref>]. However, the extent of AR integration in medical education remains varied [<xref ref-type="bibr" rid="ref20">20</xref>].</p><p>Within medical education specifically, previous studies have indicated mixed learning outcomes. For example, AR can be beneficial for anatomy learning compared to virtual dissection tables, but not when compared to the conventional atlas method [<xref ref-type="bibr" rid="ref21">21</xref>]. Similarly, other studies have found no difference in learning among stereoscopic 3D AR models, monoscopic 3D desktop models, or conventional atlas learning [<xref ref-type="bibr" rid="ref17">17</xref>]. Further complicating its role, evidence suggests that individuals who have lower spatial ability, as measured by mental rotation tasks (MRTs), may benefit more from AR than their peers with higher MRT scores [<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref22">22</xref>]. These findings indicate that the mixed effects of AR within medical education may be explained by individual differences in spatial ability.</p><p>Despite the importance of spatial ability across industries, including STEM [<xref ref-type="bibr" rid="ref23">23</xref>-<xref ref-type="bibr" rid="ref25">25</xref>] (science, technology, engineering, and math) and medicine [<xref ref-type="bibr" rid="ref26">26</xref>-<xref ref-type="bibr" rid="ref29">29</xref>], and the growing adoption of AR within medicine [<xref ref-type="bibr" rid="ref30">30</xref>], there is still a critical gap in our understanding of how novice AR users learn to use the technology. Previous experiences, such as video game experience, have been shown to play a role in spatial ability [<xref ref-type="bibr" rid="ref31">31</xref>] as well as in medically relevant tasks [<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref33">33</xref>]. More recently, studies have demonstrated that video game experience is a strong predictor of baseline skills in gastrointestinal endoscopy learners [<xref ref-type="bibr" rid="ref34">34</xref>] and of baseline performance in nonmedical VR tasks [<xref ref-type="bibr" rid="ref35">35</xref>].</p><p>However, it remains unclear which learner characteristics (eg, visuospatial ability and previous video game experience) support the efficient adoption of AR in clinical applications and whether short, targeted exposure is sufficient for novice users to reach proficiency. This study addresses this gap by quantifying novice performance and short-term learning on a neurosurgical AR navigation task and examining how these outcomes relate to individual differences in mental rotation ability and video game experience. We hypothesize that individuals with higher visuospatial ability and, specifically, more video game experience will complete AR-based neurosurgical navigation tasks more quickly and with fewer errors. These results may indicate whether specific learner characteristics confer an advantage in AR or whether novice performance in AR is primarily influenced by learning.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Research Design</title><p>This study used a cross-sectional framework in which participants were recruited using convenience sampling to complete a pretest demographics survey and an assessment of visuospatial ability, followed by a series of standardized AR tasks and a posttest National Aeronautics and Space Administration Task Load Index (NASA-TLX) survey to assess subjective mental load. This paper was prepared in accordance with the Journal Article Reporting Standards [<xref ref-type="bibr" rid="ref36">36</xref>].</p></sec><sec id="s2-2"><title>Inclusion and Exclusion Criteria</title><p>Participants comprised undergraduate, graduate, and medical students at the University of Pittsburgh between June 2024 and December 2024. Participants who had previous experience using AR were excluded.</p></sec><sec id="s2-3"><title>Ethical Considerations</title><p>Participants gave their informed consent for participation in the study, for their performance to be recorded for analysis, and for any secondary analyses without additional consent. Participants were not compensated. The authors confirm that there are no images or identifiable features within this manuscript. All participant information was deidentified, and study data were stored in an encrypted location. This study received institutional review board approval from the University of Pittsburgh (STUDY22040182). The study workflow is shown in <xref ref-type="fig" rid="figure1">Figure 1</xref>.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Study design. In total, 23 participants were recruited for this study between June 2024 and December 2024. Two participants did not successfully complete all tasks and were excluded from the analysis, resulting in a final cohort of 21 participants. There were 11 female (pink) and 10 male (blue) participants with no previous experience with augmented reality (AR). The demographics survey collected information such as experience with video games, comfort with new technology, and educational background. All participants then performed a series of mental rotation tasks before completing 7 standardized AR tasks. Following completion of the tasks, participants were given a posttest National Aeronautics and Space Administration Task Load Index (NASA-TLX) survey to assess workload.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="xr_v3i1e81236_fig01.png"/></fig></sec><sec id="s2-4"><title>Surveys</title><p>Two pretest tasks were administered. The first task was a survey that collected demographic and experience information such as age, sex, level of education, experience with video games, comfort with new technology, and experience with surgical devices. The second pretest task was the MRT, a standardized paper-and-pencil measure of 3D spatial visualization derived from the mental rotation paradigm by Shepard and Metzler [<xref ref-type="bibr" rid="ref37">37</xref>]. The MRT requires participants to decide whether comparison figures are rotated versions or mirror images of a target 3D object, providing a robust index of individual differences in mental rotation ability. Classic psychometric work has shown that the MRT has high internal consistency as indicated by the Kuder-Richardson Formula 20 (Kuder-Richardson Formula 20=0.88), which estimates how consistently dichotomously scored items measure the same underlying construct. Classic psychometric work has also demonstrated that the MRT has high test-retest reliability (<italic>r</italic>=0.83) [<xref ref-type="bibr" rid="ref38">38</xref>], and subsequent reviews describe it as one of the most commonly used and well-validated measures of spatial ability [<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref40">40</xref>]. Moreover, mental rotation tests such as the MRT are routinely incorporated into spatial ability batteries and reliably predict performance in applied visuospatial tasks (eg, engineering design, navigation, and surgical endoscopy) [<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]. Because our experimental tasks required participants to infer 3D relationships from 2D displays and mentally transform object orientations, we selected the MRT as the primary measure of visuospatial ability.</p><p>This task was composed of 2 sets of 12 problems. Participants were allotted 3 minutes to complete each set of questions. During this task, participants were given a warning when their remaining time reached 2 minutes, 1 minute, 30 seconds, and 10 seconds. Following AR testing, participants were given the NASA-TLX survey, a clinically validated metric for measuring mental load [<xref ref-type="bibr" rid="ref43">43</xref>].</p></sec><sec id="s2-5"><title>Experimental Procedure</title><p>This study conducted AR-based tasks using SurgicalAR (version 1.6.1; Medivis Inc) software on Microsoft HoloLens 2. SurgicalAR is a surgical guidance system that volumetrically renders Digital Imaging and Communications in Medicine data and projects it onto an HMD, allowing for direct registration to patients. Participants were shown a generic, deidentified computerized tomography angiogram of the head. For tasks that required a stylus or pointer, a stylus tracked by the SurgicalAR system was used.</p><p>Participants were given 7 different AR-based tasks that were deliberately selected to resemble the clinical workflow steps that a neurosurgeon would perform in the operating room. Specifically, tasks 1 to 3 mimicked basic hologram interactions that may be performed while visualizing key structures or planning an operative approach. Tasks 4 to 7 were designed to follow a standard hologram-to-object registration in which 4 corresponding points were placed on the hologram and the physical object. Then, a 3D transformation was computed using the method by Horn [<xref ref-type="bibr" rid="ref44">44</xref>] to complete the registration.</p><p>Before participants began using AR, the study moderator demonstrated the task using the HoloLens 2 while participants viewed the task through the SurgicalAR system cart monitor, which was positioned near the moderator. Then, the moderator gave and adjusted the headset on each participant and instructed them on basic gesture interactions. All tasks were performed on and recorded using the HoloLens 2. Videos were analyzed for performance using predefined metrics, as defined below. A description of each task is provided below, and representations of the tasks can be seen in <xref ref-type="fig" rid="figure2">Figure 2</xref>.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Series of augmented reality tasks that participants were required to complete. (A) Baseline performance: resizing and rotating a hologram of a human skull model; (B) orbit tracing: outlining the orbital rims on the hologram; (C) plane visualization: viewing coronal, sagittal, and axial planes of the hologram; (D) anterior-posterior trajectory point: placing virtual trajectory markers on the hologram; (E) virtual landmark placement: placing 4 virtual landmarks on the hologram; (F) physical landmark placement: placing 4 physical landmarks on the 3D-printed human skull model; and (G) trajectory alignment: performing trajectory alignment.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="xr_v3i1e81236_fig02.png"/></fig><list list-type="bullet"><list-item><p>Task 1 (<xref ref-type="fig" rid="figure2">Figure 2A</xref>): participants resized and rotated a hologram of a human skull model. This task required participants to unanchor the hologram, detaching it from its fixed position and allowing free movement. They then needed to make the hologram larger (zoom in) and smaller (zoom out) and rotate the hologram 360&#x00B0;. Finally, participants reanchored the hologram, locking it back into its original orientation and size. This task was repeated 3 times. Task performance was measured by time taken to complete and by number of slips. Slips were defined as unintentional errors or mistakes [<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref46">46</xref>].</p></list-item><list-item><p>Task 2 (<xref ref-type="fig" rid="figure2">Figure 2B</xref>): participants outlined the orbits (eye sockets) of the hologram. Participants were instructed to perform the orbit tracing in one continuous motion for each orbit, without retracting the areas they had already outlined. Performance was measured by a qualitative analysis of orbit tracing quality.</p></list-item><list-item><p>Task 3 (<xref ref-type="fig" rid="figure2">Figure 2C</xref>): participants moved a cut-plane tool fully through the hologram of computerized tomography angiogram of the head in 3 directions&#x2014;coronal, sagittal, and axial. They were instructed to perform the task while keeping their body facing the front of the hologram. Performance was measured by the number of slips, defined as instances in which a person intends to do one action but unintentionally does something else [<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref46">46</xref>].</p></list-item><list-item><p>Task 4 (<xref ref-type="fig" rid="figure2">Figure 2D</xref>): participants placed 2 virtual trajectory landmarks. The first point was placed midline on the lambdoid suture, and the second point was placed midline on the frontal bone. Performance was measured by the time required to successfully place the posterior point and anterior point.</p></list-item><list-item><p>Task 5 (<xref ref-type="fig" rid="figure2">Figure 2E</xref>): participants placed 4 virtual landmark points on the bilateral lateral and medial parts of the hologram&#x2019;s orbit. They began with the lateral left orbit and worked from left to right, finishing with the lateral right orbit. Performance was measured by the time to place each virtual landmark point.</p></list-item><list-item><p>Task 6 (<xref ref-type="fig" rid="figure2">Figure 2F</xref>): participants placed 4 physical landmark points on a 3D-printed skull model, matched to the same locations as the virtual landmarks. They began with the lateral left orbit and worked from left to right, finishing with the lateral right orbit. Performance was measured by the time to place each physical landmark point.</p></list-item><list-item><p>Task 7 (<xref ref-type="fig" rid="figure2">Figure 2G</xref>): participants registered the holographic computerized tomography projection onto the physical skull and then activated the trajectory alignment tool. To accomplish this, participants used the stylus to make the anterior-posterior trajectory turn green, indicating successful alignment. Performance was measured by time taken to align trajectory.</p></list-item></list></sec><sec id="s2-6"><title>Statistics</title><p>Descriptive statistics were used to summarize demographic variables and baseline characteristics. Group comparisons were performed using independent samples 2-tailed <italic>t</italic> tests for continuous variables and Pearson <italic>&#x03C7;</italic><sup>2</sup> tests for categorical variables, where appropriate. To assess learning effects, mixed-effects models with Tukey multiple comparisons were used for completion times. Friedman tests with Dunn post hoc comparisons were used for error counts and landmark placement times, and Wilcoxon signed-rank tests were used for paired comparisons. The overall effect of trial on performance was evaluated using a 1-way repeated-measures ANOVA with Greenhouse-Geisser correction. To evaluate whether learning effects were modified by MRT scores or video game experience, a covariate-adjusted repeated-measures general linear model was used. Linear regression was used to evaluate the predictive relationship between MRT scores and baseline task performance. Participants were stratified based on video game experience (&#x003E;5 hours/week vs &#x2264;5 hours/week) to assess group differences in task outcomes. Significance was set at <italic>&#x03B1;</italic>=.05 for all comparisons. All statistical analyses were conducted using GraphPad Prism (version 10.0.0; GraphPad Software Inc).</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Overview</title><p>In total, 23 participants with no previous experience with AR were recruited for this observational study. Of these, participants 3 and 42 (8.69%) did not successfully complete all tasks and were excluded from the analysis, resulting in a final cohort of 21 (91.3%) participants. Within the final cohort, there were 11 (52.4%) female participants, and the median age was 22 (IQR 21-24) years. There were 15 (71.4%) participants who were undergraduate students. In total, 13 (61.9%) participants spent between 0 to 5 hours per week playing video games, and 10 (47.6%) participants spent between 0 to 10 hours per week interacting with a touch screen device or computer. Furthermore, 13 (61.9%) participants were completely comfortable with new technology. Specific demographic information and comfort with new technology are presented in <xref ref-type="table" rid="table1">Table 1</xref>. Results of the NASA-TLX are presented in <xref ref-type="table" rid="table2">Table 2</xref>.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Participant demographics (N=21).</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Variable</td><td align="left" valign="bottom">Value</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="2">Sex, n (%)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Female</td><td align="char" char="parenthesis" valign="top">11 (52.4)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Male</td><td align="char" char="parenthesis" valign="top">10 (47.6)</td></tr><tr><td align="left" valign="top">Age (years), median (range; IQR)</td><td align="char" char="parenthesis" valign="top">22 (19-25; 21-24)</td></tr><tr><td align="left" valign="top" colspan="2">Level of training, n (%)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Undergraduate student</td><td align="char" char="parenthesis" valign="top">15 (71.4)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Medical student</td><td align="char" char="parenthesis" valign="top">5 (23.8)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Master&#x2019;s student</td><td align="char" char="parenthesis" valign="top">1 (4.76)</td></tr><tr><td align="left" valign="top">Time spent playing video games per week (hours), median (range; IQR)</td><td align="char" char="parenthesis" valign="top">5 (0-55; 1.5-21)</td></tr><tr><td align="left" valign="top" colspan="2">Weekly video games use (hours), n (%)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>0-5</td><td align="char" char="parenthesis" valign="top">13 (61.9)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>6-10</td><td align="char" char="parenthesis" valign="top">1 (4.76)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>11-15</td><td align="char" char="parenthesis" valign="top">2 (9.52)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>16-20</td><td align="char" char="parenthesis" valign="top">0 (0)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>&#x2265;21</td><td align="char" char="parenthesis" valign="top">5 (23.8)</td></tr><tr><td align="left" valign="top">Time spent interacting with touch screen device or computer (hours), median (range; IQR)</td><td align="char" char="parenthesis" valign="top">15 (0-63; 5-40)</td></tr><tr><td align="left" valign="top" colspan="2">Weekly touch screen devices or computer use (hours), n (%)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>0-10</td><td align="char" char="parenthesis" valign="top">10 (47.6)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>11-20</td><td align="char" char="parenthesis" valign="top">1 (4.76)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>21-30</td><td align="char" char="parenthesis" valign="top">3 (14.3)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>31-40</td><td align="char" char="parenthesis" valign="top">3 (14.3)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>&#x2265;41</td><td align="char" char="parenthesis" valign="top">4 (19.0)</td></tr><tr><td align="left" valign="top" colspan="2">Comfort with new technology (scale 1-5), n (%)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Totally comfortable (5)</td><td align="char" char="parenthesis" valign="top">13 (61.9)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Very comfortable (4)</td><td align="char" char="parenthesis" valign="top">4 (19.0)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>More or less comfortable (3)</td><td align="char" char="parenthesis" valign="top">3 (14.3)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Not very comfortable (2)</td><td align="char" char="parenthesis" valign="top">1 (4.76)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Not comfortable at all (1)</td><td align="char" char="parenthesis" valign="top">0 (0)</td></tr><tr><td align="left" valign="top" colspan="2">Experience with other forms of surgical guidance, n (%)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Endoscopy<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="char" char="parenthesis" valign="top">2 (9.52)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>DaVinci<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup></td><td align="char" char="parenthesis" valign="top">1 (4.76)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Microsurgery</td><td align="char" char="parenthesis" valign="top">0 (0)</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>Average experience with endoscopy was 13 (SD 4.49) hours.</p></fn><fn id="table1fn2"><p><sup>b</sup>Total experience with DaVinci was 6 hours.</p></fn></table-wrap-foot></table-wrap><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>National Aeronautics and Space Administration Task Load Index scores.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Category</td><td align="left" valign="bottom">Value, median (range; IQR)</td></tr></thead><tbody><tr><td align="left" valign="top">Mental demand</td><td align="left" valign="top">50 (0-80; 25-67.5)</td></tr><tr><td align="left" valign="top">Physical demand</td><td align="left" valign="top">15 (0-70; 5-30)</td></tr><tr><td align="left" valign="top">Temporal demand</td><td align="left" valign="top">35 (0-75; 10-50)</td></tr><tr><td align="left" valign="top">Performance (lower is better)</td><td align="left" valign="top">50 (20-85; 37.5-67.5)</td></tr><tr><td align="left" valign="top">Effort</td><td align="left" valign="top">50 (0-90; 30-67.5)</td></tr><tr><td align="left" valign="top">Frustration</td><td align="left" valign="top">30 (0-85; 12.5-60)</td></tr></tbody></table></table-wrap></sec><sec id="s3-2"><title>Overall Performance on MRT</title><p>Visuospatial ability, as measured by the MRT, did not predict the time taken to complete the baseline performance task (Pearson <italic>r</italic>=0.15, 95% CI &#x2212;0.32 to 0.55; <italic>R</italic><sup>2</sup>=0.022; <italic>P</italic>=.54; <xref ref-type="fig" rid="figure3">Figure 3</xref>). Similarly, MRT scores did not predict error rates on the baseline performance task (<italic>r</italic>=0.18, 95% CI &#x2212;0.27 to 0.57; <italic>R</italic><sup>2</sup>=0.034; <italic>P</italic>=.43). There were no statistically significant differences in baseline performance time (<italic>P</italic>=.65) or number of slips (<italic>P</italic>=.62) between individuals with MRT scores &#x2265;30 and those with scores &#x003C;30.</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Association between visuospatial ability, measured by the mental rotation task (MRT), and baseline task completion time. Each dot represents an individual participant&#x2019;s MRT score and corresponding completion time. The solid line indicates the linear regression fit, and the dashed lines represent the 95% CI of the regression. MRT score does not predict the ability to complete the baseline performance task (<italic>R</italic><sup>2</sup>=0.022; <italic>P</italic>=.54).</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="xr_v3i1e81236_fig03.png"/></fig></sec><sec id="s3-3"><title>Video Game Performance</title><p>Participants were split into 2 groups based on video game experience (group with &#x201C;extensive&#x201D; experience of &#x003E;5 hours/week and group with &#x201C;minimal&#x201D; experience of &#x2264;5 hours/week) for analysis. This distribution was determined empirically to yield approximately equal participants per group (8 and 13, respectively). Participants with extensive video game experience did not demonstrate faster completion times compared to those with minimal video game experience (Mann-Whitney test; median difference &#x2212;22 seconds, 95% CI &#x2212;7.00 to 57.00; <italic>P</italic>=.24).</p><p>However, individuals who had extensive video game experience made fewer slips on average than those who had minimal video game experience (mean 4.00, SD 2.27 slips, 95% CI 2.10-5.90 vs mean 6.61, SD 3.36 slips, 95% CI 4.59-8.64; unpaired <italic>t</italic> test; mean difference &#x2212;2.62 slips, 95% CI &#x2212;5.19 to &#x2212;0.04; <italic>P</italic>=.047).</p></sec><sec id="s3-4"><title>Learning</title><p>Participants learned to perform the baseline performance task (task 1) in a significantly shorter time between attempts 1 and 2 (mean 63.3, SD 31.4 seconds, 95% CI 48.5-78.0 vs mean 33.6, SD 13.1 seconds, 95% CI 27.6-39.6; <italic>P</italic>&#x003C;.001) and attempts 1 and 3 (mean 63.3, SD 31.4 seconds, 95% CI 48.5-78.0 vs mean 34.5, SD 12.0 seconds, 95% CI 29.0-40.0; <italic>P</italic>=.002), but not between attempts 2 and 3 (mean 33.6, SD 13.1 seconds, 95% CI 27.6-39.6 vs mean 34.5, SD 12.0 seconds, 95% CI 29.0-40.0; <italic>P</italic>&#x003E;.99; <xref ref-type="fig" rid="figure4">Figure 4A</xref>).</p><fig position="float" id="figure4"><label>Figure 4.</label><caption><p>Learning effects observed across various augmented reality tasks. (A) Time taken to complete baseline performance task across attempts. Each dot represents an individual participant, and the horizontal lines indicate the mean with 95% CIs. Participants performed the baseline performance task in a significantly shorter time between attempts 1 and 2 [***<italic>P</italic>&#x003C;.001] and attempts 1 and 3 [**<italic>P</italic>=.002]. The comparison between attempts 2 and 3 was not significant [ns; <italic>P</italic>&#x003E;.99]<italic>.</italic> (B) Number of slips in the baseline performance task across attempts. Each dot represents an individual participant, and the connecting lines track each participant&#x2019;s performance across attempts. Participants improved in the accuracy of completing the baseline performance task, as demonstrated by fewer slips between attempts 1 and 2 [**<italic>P</italic>=.008] and attempts 1 and 3 [***<italic>P</italic>&#x003C;.001]. The comparison between attempts 2 and 3 was not significant [ns; <italic>P</italic>&#x003E;.99]<italic>.</italic> (C) Time taken to trace orbits. Each dot represents an individual participant, and the connecting lines track each participant&#x2019;s performance between orbits. Participants significantly improved the time to trace the orbits on the second attempt [**<italic>P</italic>=.004].</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="xr_v3i1e81236_fig04.png"/></fig><p>A 1-way repeated-measures ANOVA indicated that there was a significant effect of trial time, consistent with improved performance across trials (Greenhouse-Geisser <italic>F</italic><sub>1.135,22.691</sub>=11.890; <italic>P</italic>=.002). However, a covariate-adjusted repeated-measures general linear model that included weekly gaming hours and MRT score indicated that the trial effect was not significant (Greenhouse-Geisser <italic>F</italic><sub>1.112,20.021</sub>=0.050; <italic>P</italic>=.85), and there was no evidence that trial-related changes depended on either covariate (trial&#x00D7;gaming hours: <italic>P</italic>=.62; trial&#x00D7;MRT: <italic>P</italic>=.34).</p><p>In addition to faster task completion times between attempts 1 and 2 and 1 and 3, participants also performed the task more accurately. There was a decrease in the number of slips between attempts 1 and 2 (mean 5.62, SD 3.2 slips, 95% CI 4.2-7.1 vs mean 2.48, SD 2.02 slips, 95% CI 1.6-3.4; <italic>P</italic>=.008) and attempts 1 and 3 (mean 5.62, SD 3.2 slips, 95% CI 4.2-7.1 vs mean 2.57, SD 2.52 slips, 95% CI 1.4-3.7; <italic>P</italic>&#x003C;.001; <xref ref-type="fig" rid="figure4">Figure 4B</xref>). Furthermore, participants completed the orbit tracing more quickly between the first and second orbit (mean 20.4, SD 8.15 seconds, 95% CI 16.5-24.4 vs mean 13.9, SD 5.52 seconds, 95% CI 11.31-16.5; <italic>P</italic>=.004) without a change in quality of orbit tracing (<italic>P</italic>=.77; <xref ref-type="fig" rid="figure4">Figure 4C</xref>).</p><p>Additionally, participants required less time to place virtual landmark points 2 and 3 (mean 7.2, SD 4.82 seconds, 95% CI 5.0-9.4 vs mean 4.2, SD 1.81 seconds, 95% CI 3.4-5.0; <italic>P</italic>=.009) and points 2 and 4 (mean 7.2, SD 4.82 seconds, 95% CI 5.0-9.4 vs mean 4.0, SD 1.82 seconds, 95% CI 3.2-4.8; <italic>P</italic>=.02). There was no learning effect observed for placing physical landmarks (Friedman test; Dunn post hoc: all pairwise <italic>P</italic>&#x2265;.64). There was no significant difference in the learning curve between participants with MRT scores &#x2265;30 and those with scores &#x003C;30 (<italic>P</italic>=.87). Furthermore, there was no difference in learning curves between participants with extensive video game experience and those with minimal video game experience (<italic>P</italic>=.81).</p></sec><sec id="s3-5"><title>Predictive Variables</title><p>MRT performance did not predict baseline performance, as measured by task 1 (<italic>P</italic>=.54; <xref ref-type="fig" rid="figure3">Figure 3</xref>). Additionally, video game experience was not a predictor of baseline performance (Pearson <italic>r</italic>=&#x2212;0.35, 95% CI &#x2212;0.69 to 0.13; <italic>R</italic><sup>2</sup>=.12; <italic>P</italic>=.14); however, it did predict the number of slips (<italic>P</italic>=.046).</p></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>As AR technology continues to improve and integrate within health care and other industries, it becomes increasingly important to understand which factors contribute to technological proficiency among novice AR users. By identifying these factors, product designers can address the scarcity of implementation models that is hindering the widespread adoption of AR and VR in clinical settings [<xref ref-type="bibr" rid="ref30">30</xref>] and develop programs to help guide novice users through more complex AR-based interactions, thereby proactively addressing areas of difficulty, minimizing the user learning curve, and increasing user adoptability. To address this growing need, our study aimed to identify predictors of performance in novice AR users. Our findings suggest that visuospatial ability does not predict AR task completion time, though extensive video game experience was associated with greater accuracy. Despite this result, neither visuospatial ability nor video game experience corresponded with an improved learning curve.</p></sec><sec id="s4-2"><title>Predictive Variables of Performance Gains</title><p>Existing literature has placed a strong emphasis on visuospatial ability as a predictor of performance in various clinical settings, including ultrasound [<xref ref-type="bibr" rid="ref27">27</xref>], laparoscopic [<xref ref-type="bibr" rid="ref28">28</xref>], and endoscopic procedures [<xref ref-type="bibr" rid="ref29">29</xref>], as well as in nonclinical settings [<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref48">48</xref>] and learning [<xref ref-type="bibr" rid="ref26">26</xref>]. Given that factors such as depth perception and stereovision undoubtedly contribute to an individual&#x2019;s visuospatial ability [<xref ref-type="bibr" rid="ref49">49</xref>], our study used one of the most popular validated ways of evaluating spatial ability, the MRT [<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref51">51</xref>]. In our study, we found no relationship between MRT scores and baseline performance. This suggests that AR proficiency may be influenced by more nuanced visual processing skills that are not captured by the MRT.</p><p>H&#x00F6;hler et al [<xref ref-type="bibr" rid="ref49">49</xref>] and Martin-Gomez et al [<xref ref-type="bibr" rid="ref52">52</xref>] have suggested that depth perception and stereoacuity affect individuals&#x2019; ability to estimate distances of objects in AR. Given the importance of interacting with virtual elements in AR, estimating the depth and position of these objects may play a larger role than previously thought and could account for the visual processing skills that are not captured by the MRT.</p><p>The observed result that increased video game experience was correlated with increased accuracy in AR tasks may be explained by the beneficial effect of gaming on spatial cognition. Work by Bavelier and Green [<xref ref-type="bibr" rid="ref53">53</xref>] indicates that specifically action video game play enhances spatial cognition; however, other literature has indicated that these cognitive improvements are not unique to only action games [<xref ref-type="bibr" rid="ref54">54</xref>]. This indicates that the relationship between video game experience and accuracy in AR may be due to the cognitive benefits of extensively playing video games, regardless of genre.</p><p>The literature indicates that video game experience may be a positive predictor of performance in surgical tasks with respect to errors and time [<xref ref-type="bibr" rid="ref55">55</xref>-<xref ref-type="bibr" rid="ref57">57</xref>]. Our findings suggest that this relationship may extend to AR-based applications with respect to errors; however, more research is needed to evaluate its effect on performance time.</p></sec><sec id="s4-3"><title>Learning How to Use AR</title><p>One of the reasons AR can be challenging for novice users is the variability in the learning process [<xref ref-type="bibr" rid="ref58">58</xref>]. However, as with other skills, increased AR exposure is associated with improved performance. Our unadjusted analyses demonstrated a rapid learning effect, with the most pronounced gains occurring during early task exposure. This suggests that novice AR users may rapidly familiarize themselves with the AR environment. However, covariate-adjusted models did not indicate that these improvements differed significantly based on user characteristics.</p><p>Users who initially performed tasks more slowly demonstrated the greatest improvement. Tasks requiring less depth perception showed more rapid learning, while those emphasizing higher depth perception and precision, such as the virtual landmark placement (task 5), improved more gradually. Notably, physical landmark placement (task 6) did not show a learning effect, possibly because participants could rely on tactile feedback from touching the skull with the stylus.</p><p>Given that covariate-adjusted models showed no significant influence of MRT or video game experience on learning, these findings suggest that inherent user characteristics, such as spatial ability, do not impact early AR learning capacity in novice users. However, given our modest sample size (N=21), the nonsignificant covariate terms and interactions should be interpreted cautiously.</p></sec><sec id="s4-4"><title>Importance of Depth Perception With AR</title><p>There is a possibility that depth perception and stereoacuity play a larger role in novice AR performance due to inherent technological limitations of the HMD. The AR device used in this study, the Microsoft HoloLens 2, uses a traditional fixed plane optical display. Research with the HoloLens has supported that visual rendering factors such as shadows [<xref ref-type="bibr" rid="ref59">59</xref>] and lighting conditions [<xref ref-type="bibr" rid="ref60">60</xref>] may impact the depth perception of users. Additionally, binocular disparity and the occlusion of an object are other important cues for depth perception [<xref ref-type="bibr" rid="ref61">61</xref>].</p><p>If a user attempts to interact with a virtual object in AR, they may experience an occlusion error, in which the object appears translucent despite the user&#x2019;s hand not being at the appropriate distance to interact with it. Uehira and Suzuki [<xref ref-type="bibr" rid="ref61">61</xref>] identified that this depth perception error was highly varied between individuals, particularly at short distances where the difference in binocular disparity is especially pronounced. Most of the tasks in our study were performed at short distances, mimicking clinical interactions with AR. Our study did not quantify the distances of the virtual objects, nor did we measure how many times users missed targets due to misjudgment of depth. Given that interaction with virtual objects is a fundamental component of AR use, it is likely that individuals who have stronger depth perception abilities may outperform those with weaker depth perception [<xref ref-type="bibr" rid="ref49">49</xref>].</p><p>Concurrently, these findings provide new evidence that traditional measures of visuospatial ability do not reliably predict novice AR performance, while unmeasured factors, including depth perception, may contribute more than previously thought. The early performance gains observed in unadjusted analyses suggest that novice AR proficiency can be rapidly developed, a result supported by short-format training within urology [<xref ref-type="bibr" rid="ref14">14</xref>]. Importantly, these learning effects, combined with the scarcity of existing implementation models [<xref ref-type="bibr" rid="ref30">30</xref>], suggest that successful AR adoption may benefit from short, targeted training programs that guide all novice users to a competency threshold rather than prioritizing users based on traits such as visuospatial ability or video game experience. Furthermore, this emphasizes that predictive measures of novice performance should be interpreted in the context of this rapid rate of improvement.</p></sec><sec id="s4-5"><title>Limitations</title><p>This study has some notable limitations. The potential sampling bias introduced by the inclusion of only undergraduate and graduate students may limit the generalizability of the findings to broader populations, such as resident and attending physicians who represent actual AR users in health care settings. Given that our sample size was 21 nonsurgeon participants, we believe that further research evaluating the learning curve within intraoperative environments is necessary before concluding that task-specific guides will reduce the learning curve.</p><p>Additionally, as the sample was modest (N=21) and the covariate-adjusted model included multiple predictors (gaming hours and MRT), this study may be underpowered to detect small-to-moderate covariate effects and trial-by-covariate interactions. Accordingly, nonsignificant covariate terms (eg, gaming hours <italic>P</italic>=.80; MRT <italic>P</italic>=.17) and interaction terms (trial&#x00D7;gaming hours <italic>P</italic>=.62; trial&#x00D7;MRT <italic>P</italic>=.34, Greenhouse-Geisser corrected) should be interpreted cautiously. Additionally, video game experience was self-reported and categorized based on hours per week. Our study found it challenging to obtain the genre of video games played and therefore did not analyze whether different categories of video games influenced performance with AR. The cognitive demand effects and the type of video games were not collected; however, these factors may have an influence on how participants perform in the tasks we evaluated in this study. Furthermore, technical limitations of the Microsoft HoloLens 2 cannot be discounted, such as ambient lighting conditions in the room during experimentation, which may have affected hologram visual quality. Finally, some outcomes, such as orbit tracing quality, were evaluated qualitatively and may be subject to observer bias.</p></sec><sec id="s4-6"><title>Conclusions</title><p>As AR technology continues to grow in adoption across different industries, there is an increased need to identify the factors that contribute to effective AR use. Our research found that extensive video game experience was correlated with decreased error frequency, while neither visuospatial ability nor video game experience predicted novice user performance time. We believe that future research should focus on how depth perception, stereoacuity, and learning play a role in novice user performance, while also evaluating the learning curve of surgeons in intraoperative environments. This area of research holds important promise and may shape how industry professionals and product developers design and train future users to adopt AR systems more effectively.</p></sec></sec></body><back><ack><p>The authors attest that generative artificial intelligence was not used in the generation of any part of this manuscript.</p></ack><notes><sec><title>Funding</title><p>This study is based on work supported by the National Science Foundation Graduate Research Fellowship (grants DGE2140739 and 2139321).</p></sec><sec><title>Data Availability</title><p>The data presented in this study are available from the corresponding author on reasonable request and with institutional approval.</p></sec></notes><fn-group><fn fn-type="conflict"><p>EGA, JTB, and GH are shareholders of SymphonyMR Inc.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AR</term><def><p>augmented reality</p></def></def-item><def-item><term id="abb2">HMD</term><def><p>head-mounted display</p></def></def-item><def-item><term id="abb3">MRT</term><def><p>mental rotation task</p></def></def-item><def-item><term id="abb4">NASA-TLX</term><def><p>National Aeronautics and Space Administration Task Load Index</p></def></def-item><def-item><term id="abb5">STEM</term><def><p>science, technology, engineering, and math</p></def></def-item><def-item><term id="abb6">VR</term><def><p>virtual reality</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Villagran-Vizcarra</surname><given-names>DC</given-names> </name><name name-style="western"><surname>Luviano-Cruz</surname><given-names>D</given-names> </name><name name-style="western"><surname>P&#x00E9;rez-Dom&#x00ED;nguez</surname><given-names>LA</given-names> </name><name name-style="western"><surname>M&#x00E9;ndez-Gonz&#x00E1;lez</surname><given-names>LC</given-names> </name><name name-style="western"><surname>Garcia-Luna</surname><given-names>F</given-names> </name></person-group><article-title>Applications analyses, challenges and development of augmented reality in education, industry, marketing, medicine, and entertainment</article-title><source>Appl Sci</source><year>2023</year><month>02</month><volume>13</volume><issue>5</issue><fpage>2766</fpage><pub-id pub-id-type="doi">10.3390/app13052766</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Syed</surname><given-names>TA</given-names> </name><name name-style="western"><surname>Siddiqui</surname><given-names>MS</given-names> </name><name name-style="western"><surname>Abdullah</surname><given-names>HB</given-names> </name><etal/></person-group><article-title>In-depth review of augmented reality: tracking technologies, development tools, AR displays, collaborative AR, and security concerns</article-title><source>Sensors (Basel)</source><year>2022</year><month>12</month><day>23</day><volume>23</volume><issue>1</issue><fpage>146</fpage><pub-id pub-id-type="doi">10.3390/s23010146</pub-id><pub-id pub-id-type="medline">36616745</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Vucicevic</surname><given-names>RS</given-names> </name><name name-style="western"><surname>Castonguay</surname><given-names>JB</given-names> </name><name name-style="western"><surname>Trevi&#x00F1;o</surname><given-names>N</given-names> </name><etal/></person-group><article-title>Surgeon perspectives on a virtual reality platform for preoperative planning in complex bone sarcomas</article-title><source>J Orthop</source><year>2024</year><month>10</month><volume>62</volume><fpage>43</fpage><lpage>48</lpage><pub-id pub-id-type="doi">10.1016/j.jor.2024.10.012</pub-id><pub-id pub-id-type="medline">39507951</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Porpiglia</surname><given-names>F</given-names> </name><name name-style="western"><surname>Checcucci</surname><given-names>E</given-names> </name><name name-style="western"><surname>Volpi</surname><given-names>G</given-names> </name><etal/></person-group><article-title>Artificial intelligence 3D augmented reality-guided robotic prostatectomy versus cognitive MRI intervention: results of the prospective randomized RIDERS trial</article-title><source>Eur Urol</source><year>2026</year><month>03</month><volume>89</volume><issue>3</issue><fpage>233</fpage><lpage>243</lpage><pub-id pub-id-type="doi">10.1016/j.eururo.2025.09.4172</pub-id><pub-id pub-id-type="medline">41087293</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nadeem-Tariq</surname><given-names>A</given-names> </name><name name-style="western"><surname>Kazemeini</surname><given-names>S</given-names> </name><name name-style="western"><surname>Kaur</surname><given-names>P</given-names> </name><etal/></person-group><article-title>Augmented reality in spine surgery: a narrative review of clinical accuracy, workflow efficiency, and barriers to adoption</article-title><source>Cureus</source><year>2025</year><month>06</month><volume>17</volume><issue>6</issue><fpage>e86803</fpage><pub-id pub-id-type="doi">10.7759/cureus.86803</pub-id><pub-id pub-id-type="medline">40718258</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>West</surname><given-names>K</given-names> </name><name name-style="western"><surname>Al-Nimer</surname><given-names>S</given-names> </name><name name-style="western"><surname>Goel</surname><given-names>VR</given-names> </name><etal/></person-group><article-title>Three-dimensional holographic guidance, navigation, and control (3D-GNC) for endograft positioning in porcine aorta: feasibility comparison with 2-dimensional x-ray fluoroscopy</article-title><source>J Endovasc Ther</source><year>2021</year><month>10</month><volume>28</volume><issue>5</issue><fpage>796</fpage><lpage>803</lpage><pub-id pub-id-type="doi">10.1177/15266028211025026</pub-id><pub-id pub-id-type="medline">34142900</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Urlings</surname><given-names>J</given-names> </name><name name-style="western"><surname>Sezer</surname><given-names>S</given-names> </name><name name-style="western"><surname>Ter Laan</surname><given-names>M</given-names> </name><etal/></person-group><article-title>The role and effectiveness of augmented reality in patient education: a systematic review of the literature</article-title><source>Patient Educ Couns</source><year>2022</year><month>07</month><volume>105</volume><issue>7</issue><fpage>1917</fpage><lpage>1927</lpage><pub-id pub-id-type="doi">10.1016/j.pec.2022.03.005</pub-id><pub-id pub-id-type="medline">35341611</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wehrkamp</surname><given-names>K</given-names> </name><name name-style="western"><surname>Miksch</surname><given-names>RC</given-names> </name><name name-style="western"><surname>Polzer</surname><given-names>H</given-names> </name><etal/></person-group><article-title>The impact of virtual-, augmented- and mixed reality during preoperative informed consent: a systematic review of the literature</article-title><source>J Med Syst</source><year>2025</year><month>06</month><day>24</day><volume>49</volume><issue>1</issue><fpage>89</fpage><pub-id pub-id-type="doi">10.1007/s10916-025-02217-9</pub-id><pub-id pub-id-type="medline">40555846</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bray</surname><given-names>L</given-names> </name><name name-style="western"><surname>Sharpe</surname><given-names>A</given-names> </name><name name-style="western"><surname>Gichuru</surname><given-names>P</given-names> </name><name name-style="western"><surname>Fortune</surname><given-names>PM</given-names> </name><name name-style="western"><surname>Blake</surname><given-names>L</given-names> </name><name name-style="western"><surname>Appleton</surname><given-names>V</given-names> </name></person-group><article-title>The acceptability and impact of the Xploro digital therapeutic platform to inform and prepare children for planned procedures in a hospital: before and after evaluation study</article-title><source>J Med Internet Res</source><year>2020</year><month>08</month><day>11</day><volume>22</volume><issue>8</issue><fpage>e17367</fpage><pub-id pub-id-type="doi">10.2196/17367</pub-id><pub-id pub-id-type="medline">32780025</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mondal</surname><given-names>H</given-names> </name><name name-style="western"><surname>Mondal</surname><given-names>S</given-names> </name></person-group><article-title>Adopting augmented reality and virtual reality in medical education in resource-limited settings: constraints and the way forward</article-title><source>Adv Physiol Educ</source><year>2025</year><month>06</month><day>1</day><volume>49</volume><issue>2</issue><fpage>503</fpage><lpage>507</lpage><pub-id pub-id-type="doi">10.1152/advan.00027.2025</pub-id><pub-id pub-id-type="medline">40136005</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liu</surname><given-names>X</given-names> </name><name name-style="western"><surname>Xiao</surname><given-names>W</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Yan</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Liang</surname><given-names>F</given-names> </name></person-group><article-title>Augmented reality technology shortens aneurysm surgery learning curve for residents</article-title><source>Comput Assist Surg (Abingdon)</source><year>2024</year><month>12</month><volume>29</volume><issue>1</issue><fpage>2311940</fpage><pub-id pub-id-type="doi">10.1080/24699322.2024.2311940</pub-id><pub-id pub-id-type="medline">38315080</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Barteit</surname><given-names>S</given-names> </name><name name-style="western"><surname>Lanfermann</surname><given-names>L</given-names> </name><name name-style="western"><surname>B&#x00E4;rnighausen</surname><given-names>T</given-names> </name><name name-style="western"><surname>Neuhann</surname><given-names>F</given-names> </name><name name-style="western"><surname>Beiersmann</surname><given-names>C</given-names> </name></person-group><article-title>Augmented, mixed, and virtual reality-based head-mounted devices for medical education: systematic review</article-title><source>JMIR Serious Games</source><year>2021</year><month>07</month><day>8</day><volume>9</volume><issue>3</issue><fpage>e29080</fpage><pub-id pub-id-type="doi">10.2196/29080</pub-id><pub-id pub-id-type="medline">34255668</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Suresh</surname><given-names>D</given-names> </name><name name-style="western"><surname>Aydin</surname><given-names>A</given-names> </name><name name-style="western"><surname>James</surname><given-names>S</given-names> </name><name name-style="western"><surname>Ahmed</surname><given-names>K</given-names> </name><name name-style="western"><surname>Dasgupta</surname><given-names>P</given-names> </name></person-group><article-title>The role of augmented reality in surgical training: a systematic review</article-title><source>Surg Innov</source><year>2023</year><month>06</month><volume>30</volume><issue>3</issue><fpage>366</fpage><lpage>382</lpage><pub-id pub-id-type="doi">10.1177/15533506221140506</pub-id><pub-id pub-id-type="medline">36412148</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bhardwaj</surname><given-names>M</given-names> </name><name name-style="western"><surname>Singhal</surname><given-names>A</given-names> </name><name name-style="western"><surname>Bhardwaj</surname><given-names>G</given-names> </name><name name-style="western"><surname>Sur</surname><given-names>H</given-names> </name></person-group><article-title>Effectiveness of simulation-based training in urology: a systematic review of educational outcomes and clinical skill transfer</article-title><source>Cureus</source><year>2025</year><month>12</month><volume>17</volume><issue>12</issue><fpage>e98641</fpage><pub-id pub-id-type="doi">10.7759/cureus.98641</pub-id><pub-id pub-id-type="medline">41409365</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jeffri</surname><given-names>NF</given-names> </name><name name-style="western"><surname>Awang Rambli</surname><given-names>DR</given-names> </name></person-group><article-title>A review of augmented reality systems and their effects on mental workload and task performance</article-title><source>Heliyon</source><year>2021</year><month>03</month><day>8</day><volume>7</volume><issue>3</issue><fpage>e06277</fpage><pub-id pub-id-type="doi">10.1016/j.heliyon.2021.e06277</pub-id><pub-id pub-id-type="medline">33748449</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Alessa</surname><given-names>FM</given-names> </name><name name-style="western"><surname>Alhaag</surname><given-names>MH</given-names> </name><name name-style="western"><surname>Al-Harkan</surname><given-names>IM</given-names> </name><name name-style="western"><surname>Ramadan</surname><given-names>MZ</given-names> </name><name name-style="western"><surname>Alqahtani</surname><given-names>FM</given-names> </name></person-group><article-title>A neurophysiological evaluation of cognitive load during augmented reality interactions in various industrial maintenance and assembly tasks</article-title><source>Sensors (Basel)</source><year>2023</year><month>09</month><day>6</day><volume>23</volume><issue>18</issue><fpage>7698</fpage><pub-id pub-id-type="doi">10.3390/s23187698</pub-id><pub-id pub-id-type="medline">37765755</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mayer</surname><given-names>R</given-names> </name><name name-style="western"><surname>Makransky</surname><given-names>G</given-names> </name><name name-style="western"><surname>Parong</surname><given-names>J</given-names> </name></person-group><article-title>The promise and pitfalls of learning in immersive virtual reality</article-title><source>Int J Hum Comput Interact</source><year>2023</year><volume>39</volume><issue>11</issue><fpage>2229</fpage><lpage>2238</lpage><pub-id pub-id-type="doi">10.1080/10447318.2022.2108563</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>&#x015E;im&#x015F;ek</surname><given-names>EE</given-names> </name></person-group><article-title>The effect of augmented reality storybooks on the story comprehension and retelling of preschool children</article-title><source>Front Psychol</source><year>2024</year><volume>15</volume><fpage>1459264</fpage><pub-id pub-id-type="doi">10.3389/fpsyg.2024.1459264</pub-id><pub-id pub-id-type="medline">39439758</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Amores-Valencia</surname><given-names>A</given-names> </name><name name-style="western"><surname>Burgos</surname><given-names>D</given-names> </name><name name-style="western"><surname>Branch-Bedoya</surname><given-names>JW</given-names> </name></person-group><article-title>Influence of motivation and academic performance in the use of augmented reality in education. A systematic review</article-title><source>Front Psychol</source><year>2022</year><volume>13</volume><fpage>1011409</fpage><pub-id pub-id-type="doi">10.3389/fpsyg.2022.1011409</pub-id><pub-id pub-id-type="medline">36304863</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>B&#x00F6;lek</surname><given-names>KA</given-names> </name><name name-style="western"><surname>De Jong</surname><given-names>G</given-names> </name><name name-style="western"><surname>Henssen</surname><given-names>D</given-names> </name></person-group><article-title>The effectiveness of the use of augmented reality in anatomy education: a systematic review and meta-analysis</article-title><source>Sci Rep</source><year>2021</year><month>07</month><day>27</day><volume>11</volume><issue>1</issue><fpage>15292</fpage><pub-id pub-id-type="doi">10.1038/s41598-021-94721-4</pub-id><pub-id pub-id-type="medline">34315955</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bork</surname><given-names>F</given-names> </name><name name-style="western"><surname>Stratmann</surname><given-names>L</given-names> </name><name name-style="western"><surname>Enssle</surname><given-names>S</given-names> </name><etal/></person-group><article-title>The benefits of an augmented reality magic mirror system for integrated radiology teaching in gross anatomy</article-title><source>Anat Sci Educ</source><year>2019</year><month>11</month><volume>12</volume><issue>6</issue><fpage>585</fpage><lpage>598</lpage><pub-id pub-id-type="doi">10.1002/ase.1864</pub-id><pub-id pub-id-type="medline">30697948</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bogomolova</surname><given-names>K</given-names> </name><name name-style="western"><surname>van der Ham</surname><given-names>IJ</given-names> </name><name name-style="western"><surname>Dankbaar</surname><given-names>ME</given-names> </name><etal/></person-group><article-title>The effect of stereoscopic augmented reality visualization on learning anatomy and the modifying effect of visual-spatial abilities: a double-center randomized controlled trial</article-title><source>Anat Sci Educ</source><year>2020</year><month>09</month><volume>13</volume><issue>5</issue><fpage>558</fpage><lpage>567</lpage><pub-id pub-id-type="doi">10.1002/ase.1941</pub-id><pub-id pub-id-type="medline">31887792</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wai</surname><given-names>J</given-names> </name><name name-style="western"><surname>Lubinski</surname><given-names>D</given-names> </name><name name-style="western"><surname>Benbow</surname><given-names>CP</given-names> </name></person-group><article-title>Spatial ability for STEM domains: aligning over 50 years of cumulative psychological knowledge solidifies its importance</article-title><source>J Educ Psychol</source><year>2009</year><volume>101</volume><issue>4</issue><fpage>817</fpage><lpage>835</lpage><pub-id pub-id-type="doi">10.1037/a0016127</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Roca-Gonz&#x00E1;lez</surname><given-names>C</given-names> </name><name name-style="western"><surname>Martin Gutierrez</surname><given-names>J</given-names> </name><name name-style="western"><surname>Garc&#x00CD;a-Dominguez</surname><given-names>M</given-names> </name><name name-style="western"><surname>Mato Carrodeguas</surname><given-names>MC</given-names> </name></person-group><article-title>Virtual technologies to develop visual-spatial ability in engineering students</article-title><source>Eurasia J Math Sci Technol Educ</source><year>2017</year><volume>13</volume><issue>2</issue><fpage>441</fpage><lpage>468</lpage><pub-id pub-id-type="doi">10.12973/eurasia.2017.00625a</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Contero</surname><given-names>M</given-names> </name><name name-style="western"><surname>Company</surname><given-names>P</given-names> </name><name name-style="western"><surname>Naya</surname><given-names>F</given-names> </name><name name-style="western"><surname>Saorin</surname><given-names>JL</given-names> </name></person-group><article-title>Learning support tools for developing spatial abilities in engineering design</article-title><source>Int J Eng Educ</source><year>2006</year><access-date>2026-03-26</access-date><volume>22</volume><issue>3</issue><fpage>470</fpage><lpage>477</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://www.ijee.ie/articles/Vol22-3/06_ijee1769.pdf">https://www.ijee.ie/articles/Vol22-3/06_ijee1769.pdf</ext-link></comment></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yousuf</surname><given-names>MS</given-names> </name><name name-style="western"><surname>Alsawareah</surname><given-names>A</given-names> </name><name name-style="western"><surname>Alhroub</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Investigation of factors that influence the relationship between mental rotation ability and anatomy learning</article-title><source>Morphologie</source><year>2024</year><month>03</month><volume>108</volume><issue>360</issue><fpage>100728</fpage><pub-id pub-id-type="doi">10.1016/j.morpho.2023.100728</pub-id><pub-id pub-id-type="medline">37988905</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mulder</surname><given-names>TA</given-names> </name><name name-style="western"><surname>van de Velde</surname><given-names>T</given-names> </name><name name-style="western"><surname>Dokter</surname><given-names>E</given-names> </name><etal/></person-group><article-title>Unravelling the skillset of point-of-care ultrasound: a systematic review</article-title><source>Ultrasound J</source><year>2023</year><month>04</month><day>19</day><volume>15</volume><issue>1</issue><fpage>19</fpage><pub-id pub-id-type="doi">10.1186/s13089-023-00319-4</pub-id><pub-id pub-id-type="medline">37074526</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pedersen</surname><given-names>H</given-names> </name><name name-style="western"><surname>St&#x00E5;hl</surname><given-names>D</given-names> </name><name name-style="western"><surname>Ekelund</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Visuospatial ability is associated to 2D laparoscopic simulator performance amongst surgical residents</article-title><source>Surg Open Sci</source><year>2023</year><month>01</month><volume>11</volume><fpage>56</fpage><lpage>61</lpage><pub-id pub-id-type="doi">10.1016/j.sopen.2022.11.004</pub-id><pub-id pub-id-type="medline">36545373</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rogister</surname><given-names>F</given-names> </name><name name-style="western"><surname>Pottier</surname><given-names>L</given-names> </name><name name-style="western"><surname>El Haddadi</surname><given-names>I</given-names> </name><etal/></person-group><article-title>Use of Vandenberg and Kuse Mental Rotation Test to predict practical performance of sinus endoscopy</article-title><source>Ear Nose Throat J</source><year>2022</year><month>02</month><volume>101</volume><issue>2_suppl</issue><fpage>24S</fpage><lpage>30S</lpage><pub-id pub-id-type="doi">10.1177/01455613211000599</pub-id><pub-id pub-id-type="medline">33734883</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Iqbal</surname><given-names>AI</given-names> </name><name name-style="western"><surname>Aamir</surname><given-names>A</given-names> </name><name name-style="western"><surname>Hammad</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Immersive technologies in healthcare: an in-depth exploration of virtual reality and augmented reality in enhancing patient care, medical education, and training paradigms</article-title><source>J Prim Care Community Health</source><year>2024</year><volume>15</volume><fpage>21501319241293311</fpage><pub-id pub-id-type="doi">10.1177/21501319241293311</pub-id><pub-id pub-id-type="medline">39439304</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>L</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>R</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>J</given-names> </name></person-group><article-title>Playing action video games improves visuomotor control</article-title><source>Psychol Sci</source><year>2016</year><month>08</month><volume>27</volume><issue>8</issue><fpage>1092</fpage><lpage>1108</lpage><pub-id pub-id-type="doi">10.1177/0956797616650300</pub-id><pub-id pub-id-type="medline">27485132</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>van Dongen</surname><given-names>KW</given-names> </name><name name-style="western"><surname>Verleisdonk</surname><given-names>EJ</given-names> </name><name name-style="western"><surname>Schijven</surname><given-names>MP</given-names> </name><name name-style="western"><surname>Broeders</surname><given-names>IA</given-names> </name></person-group><article-title>Will the Playstation generation become better endoscopic surgeons?</article-title><source>Surg Endosc</source><year>2011</year><month>07</month><volume>25</volume><issue>7</issue><fpage>2275</fpage><lpage>2280</lpage><pub-id pub-id-type="doi">10.1007/s00464-010-1548-2</pub-id><pub-id pub-id-type="medline">21416186</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sammut</surname><given-names>M</given-names> </name><name name-style="western"><surname>Sammut</surname><given-names>M</given-names> </name><name name-style="western"><surname>Andrejevic</surname><given-names>P</given-names> </name></person-group><article-title>The benefits of being a video gamer in laparoscopic surgery</article-title><source>Int J Surg</source><year>2017</year><month>09</month><volume>45</volume><fpage>42</fpage><lpage>46</lpage><pub-id pub-id-type="doi">10.1016/j.ijsu.2017.07.072</pub-id><pub-id pub-id-type="medline">28733118</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gugura</surname><given-names>R</given-names> </name><name name-style="western"><surname>Fischer</surname><given-names>P</given-names> </name><name name-style="western"><surname>Tan&#x021B;&#x0103;u</surname><given-names>M</given-names> </name><name name-style="western"><surname>Tefas</surname><given-names>C</given-names> </name></person-group><article-title>Just five more minutes, mom: why video games could make you a better endoscopist</article-title><source>Surg Endosc</source><year>2023</year><month>09</month><volume>37</volume><issue>9</issue><fpage>6901</fpage><lpage>6907</lpage><pub-id pub-id-type="doi">10.1007/s00464-023-10167-x</pub-id><pub-id pub-id-type="medline">37316677</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Katz</surname><given-names>D</given-names> </name><name name-style="western"><surname>Hyers</surname><given-names>B</given-names> </name><name name-style="western"><surname>Patten</surname><given-names>E</given-names> </name><name name-style="western"><surname>Sarte</surname><given-names>D</given-names> </name><name name-style="western"><surname>Loo</surname><given-names>M</given-names> </name><name name-style="western"><surname>Burnett</surname><given-names>GW</given-names> </name></person-group><article-title>Relationship between demographic and social variables and performance in virtual reality among healthcare personnel: an observational study</article-title><source>BMC Med Educ</source><year>2024</year><month>03</month><day>4</day><volume>24</volume><issue>1</issue><fpage>227</fpage><pub-id pub-id-type="doi">10.1186/s12909-024-05180-0</pub-id><pub-id pub-id-type="medline">38439056</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="web"><article-title>Journal Article Reporting Standards (JARS)</article-title><source>American Psychological Association</source><access-date>2026-03-07</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://apastyle.apa.org/jars">https://apastyle.apa.org/jars</ext-link></comment></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shepard</surname><given-names>RN</given-names> </name><name name-style="western"><surname>Metzler</surname><given-names>J</given-names> </name></person-group><article-title>Mental rotation of three-dimensional objects</article-title><source>Science</source><year>1971</year><month>02</month><day>19</day><volume>171</volume><issue>3972</issue><fpage>701</fpage><lpage>703</lpage><pub-id pub-id-type="doi">10.1126/science.171.3972.701</pub-id><pub-id pub-id-type="medline">5540314</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Vandenberg</surname><given-names>SG</given-names> </name><name name-style="western"><surname>Kuse</surname><given-names>AR</given-names> </name></person-group><article-title>Mental rotations, a group test of three-dimensional spatial visualization</article-title><source>Percept Mot Skills</source><year>1978</year><month>10</month><volume>47</volume><issue>2</issue><fpage>599</fpage><lpage>604</lpage><pub-id pub-id-type="doi">10.2466/pms.1978.47.2.599</pub-id><pub-id pub-id-type="medline">724398</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Caissie</surname><given-names>AF</given-names> </name><name name-style="western"><surname>Vigneau</surname><given-names>F</given-names> </name><name name-style="western"><surname>Bors</surname><given-names>DA</given-names> </name></person-group><article-title>What does the Mental Rotation Test measure? An analysis of item difficulty and item characteristics</article-title><source>Open Psychol J</source><year>2009</year><month>12</month><day>11</day><volume>2</volume><fpage>94</fpage><lpage>102</lpage><pub-id pub-id-type="doi">10.2174/1874350100902010094</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hegarty</surname><given-names>M</given-names> </name></person-group><article-title>Ability and sex differences in spatial thinking: what does the mental rotation test really measure?</article-title><source>Psychon Bull Rev</source><year>2018</year><month>06</month><volume>25</volume><issue>3</issue><fpage>1212</fpage><lpage>1219</lpage><pub-id pub-id-type="doi">10.3758/s13423-017-1347-z</pub-id><pub-id pub-id-type="medline">28808983</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rajeb</surname><given-names>M</given-names> </name><name name-style="western"><surname>Krist</surname><given-names>AT</given-names> </name><name name-style="western"><surname>Shi</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Oyeniran</surname><given-names>DO</given-names> </name><name name-style="western"><surname>Wind</surname><given-names>SA</given-names> </name><name name-style="western"><surname>Lakin</surname><given-names>JM</given-names> </name></person-group><article-title>Mental rotation performance: contribution of item features to difficulties and functional adaptation</article-title><source>J Intell</source><year>2024</year><month>12</month><day>30</day><volume>13</volume><issue>1</issue><fpage>2</fpage><pub-id pub-id-type="doi">10.3390/jintelligence13010002</pub-id><pub-id pub-id-type="medline">39852411</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Uttal</surname><given-names>DH</given-names> </name><name name-style="western"><surname>Meadow</surname><given-names>NG</given-names> </name><name name-style="western"><surname>Tipton</surname><given-names>E</given-names> </name><etal/></person-group><article-title>The malleability of spatial skills: a meta-analysis of training studies</article-title><source>Psychol Bull</source><year>2013</year><month>03</month><volume>139</volume><issue>2</issue><fpage>352</fpage><lpage>402</lpage><pub-id pub-id-type="doi">10.1037/a0028446</pub-id><pub-id pub-id-type="medline">22663761</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Hart</surname><given-names>SG</given-names> </name><name name-style="western"><surname>Staveland</surname><given-names>LE</given-names> </name></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Hancock</surname><given-names>PA</given-names></name><name name-style="western"><surname>Meshkati</surname><given-names>N</given-names></name></person-group><article-title>Development of NASA-TLX (task load index): results of empirical and theoretical research</article-title><source>Human Mental Workload</source><year>1988</year><publisher-name>North-Holland Publishing Company</publisher-name><fpage>139</fpage><lpage>183</lpage></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Horn</surname><given-names>BK</given-names> </name></person-group><article-title>Closed-form solution of absolute orientation using unit quaternions</article-title><source>J Opt Soc Am A</source><year>1987</year><volume>4</volume><issue>4</issue><fpage>629</fpage><lpage>642</lpage><pub-id pub-id-type="doi">10.1364/JOSAA.4.000629</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Reason</surname><given-names>J</given-names> </name></person-group><source>Human Error</source><year>1990</year><publisher-name>Cambridge University Press</publisher-name><pub-id pub-id-type="other">9781139062367</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Norman</surname><given-names>DA</given-names> </name></person-group><source>The Design of Everyday Things</source><year>2013</year><publisher-name>MIT Press</publisher-name><pub-id pub-id-type="other">9780262525671</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tandon</surname><given-names>S</given-names> </name><name name-style="western"><surname>Abdul-Rahman</surname><given-names>A</given-names> </name><name name-style="western"><surname>Borgo</surname><given-names>R</given-names> </name></person-group><article-title>Measuring effects of spatial visualization and domain on visualization task performance: a comparative study</article-title><source>IEEE Trans Visual Comput Graphics</source><year>2023</year><volume>29</volume><issue>1</issue><fpage>668</fpage><lpage>678</lpage><pub-id pub-id-type="doi">10.1109/TVCG.2022.3209491</pub-id><pub-id pub-id-type="medline">36166560</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Kourtesis</surname><given-names>P</given-names> </name><name name-style="western"><surname>MacPherson</surname><given-names>SE</given-names> </name></person-group><article-title>An ecologically valid examination of event-based and time-based prospective memory using immersive virtual reality: the influence of attention, memory, and executive function processes on real-world prospective memory</article-title><source>arXiv</source><comment>Preprint posted online on  Feb 23, 2021</comment><pub-id pub-id-type="doi">10.48550/arXiv.2102.11652</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>H&#x00F6;hler</surname><given-names>C</given-names> </name><name name-style="western"><surname>Rasamoel</surname><given-names>ND</given-names> </name><name name-style="western"><surname>Rohrbach</surname><given-names>N</given-names> </name><etal/></person-group><article-title>The impact of visuospatial perception on distance judgment and depth perception in an augmented reality environment in patients after stroke: an exploratory study</article-title><source>J Neuroeng Rehabil</source><year>2021</year><month>08</month><day>21</day><volume>18</volume><issue>1</issue><fpage>127</fpage><pub-id pub-id-type="doi">10.1186/s12984-021-00920-5</pub-id><pub-id pub-id-type="medline">34419086</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Aguilar Ramirez</surname><given-names>DE</given-names> </name><name name-style="western"><surname>Blinch</surname><given-names>J</given-names> </name><name name-style="western"><surname>Takeda</surname><given-names>K</given-names> </name><name name-style="western"><surname>Copeland</surname><given-names>JL</given-names> </name><name name-style="western"><surname>Gonzalez</surname><given-names>CL</given-names> </name></person-group><article-title>Differential effects of aging on spatial abilities</article-title><source>Exp Brain Res</source><year>2022</year><month>05</month><volume>240</volume><issue>5</issue><fpage>1579</fpage><lpage>1588</lpage><pub-id pub-id-type="doi">10.1007/s00221-022-06363-1</pub-id><pub-id pub-id-type="medline">35428943</pub-id></nlm-citation></ref><ref id="ref51"><label>51</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tang</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>X</given-names> </name><name name-style="western"><surname>Huo</surname><given-names>H</given-names> </name><etal/></person-group><article-title>Eye movement characteristics in a mental rotation task presented in virtual reality</article-title><source>Front Neurosci</source><year>2023</year><volume>17</volume><fpage>1143006</fpage><pub-id pub-id-type="doi">10.3389/fnins.2023.1143006</pub-id><pub-id pub-id-type="medline">37051147</pub-id></nlm-citation></ref><ref id="ref52"><label>52</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Martin-Gomez</surname><given-names>A</given-names> </name><name name-style="western"><surname>Weiss</surname><given-names>J</given-names> </name><name name-style="western"><surname>Keller</surname><given-names>A</given-names> </name><name name-style="western"><surname>Eck</surname><given-names>U</given-names> </name><name name-style="western"><surname>Roth</surname><given-names>D</given-names> </name><name name-style="western"><surname>Navab</surname><given-names>N</given-names> </name></person-group><article-title>The impact of focus and context visualization techniques on depth perception in optical see-through head-mounted displays</article-title><source>IEEE Trans Vis Comput Graph</source><year>2022</year><month>12</month><volume>28</volume><issue>12</issue><fpage>4156</fpage><lpage>4171</lpage><pub-id pub-id-type="doi">10.1109/TVCG.2021.3079849</pub-id><pub-id pub-id-type="medline">33979287</pub-id></nlm-citation></ref><ref id="ref53"><label>53</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bavelier</surname><given-names>D</given-names> </name><name name-style="western"><surname>Green</surname><given-names>CS</given-names> </name></person-group><article-title>Enhancing attentional control: lessons from action video games</article-title><source>Neuron</source><year>2019</year><month>10</month><day>9</day><volume>104</volume><issue>1</issue><fpage>147</fpage><lpage>163</lpage><pub-id pub-id-type="doi">10.1016/j.neuron.2019.09.031</pub-id><pub-id pub-id-type="medline">31600511</pub-id></nlm-citation></ref><ref id="ref54"><label>54</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Oei</surname><given-names>AC</given-names> </name><name name-style="western"><surname>Patterson</surname><given-names>MD</given-names> </name></person-group><article-title>Enhancing cognition with video games: a multiple game training study</article-title><source>PLoS One</source><year>2013</year><volume>8</volume><issue>3</issue><fpage>e58546</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0058546</pub-id><pub-id pub-id-type="medline">23516504</pub-id></nlm-citation></ref><ref id="ref55"><label>55</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tsai</surname><given-names>CL</given-names> </name><name name-style="western"><surname>Heinrichs</surname><given-names>WL</given-names> </name></person-group><article-title>Acquisition of eye-hand coordination skills for videoendoscopic surgery</article-title><source>J Am Assoc Gynecol Laparosc</source><year>1994</year><month>08</month><volume>1</volume><issue>4, Part 2</issue><fpage>S37</fpage><pub-id pub-id-type="doi">10.1016/s1074-3804(05)80989-2</pub-id><pub-id pub-id-type="medline">9073766</pub-id></nlm-citation></ref><ref id="ref56"><label>56</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Grantcharov</surname><given-names>TP</given-names> </name><name name-style="western"><surname>Bardram</surname><given-names>L</given-names> </name><name name-style="western"><surname>Funch-Jensen</surname><given-names>P</given-names> </name><name name-style="western"><surname>Rosenberg</surname><given-names>J</given-names> </name></person-group><article-title>Impact of hand dominance, gender, and experience with computer games on performance in virtual reality laparoscopy</article-title><source>Surg Endosc</source><year>2003</year><month>07</month><volume>17</volume><issue>7</issue><fpage>1082</fpage><lpage>1085</lpage><pub-id pub-id-type="doi">10.1007/s00464-002-9176-0</pub-id><pub-id pub-id-type="medline">12728373</pub-id></nlm-citation></ref><ref id="ref57"><label>57</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rosser</surname><given-names>JC</given-names>  <suffix>Jr</suffix></name><name name-style="western"><surname>Lynch</surname><given-names>PJ</given-names> </name><name name-style="western"><surname>Cuddihy</surname><given-names>L</given-names> </name><name name-style="western"><surname>Gentile</surname><given-names>DA</given-names> </name><name name-style="western"><surname>Klonsky</surname><given-names>J</given-names> </name><name name-style="western"><surname>Merrell</surname><given-names>R</given-names> </name></person-group><article-title>The impact of video games on training surgeons in the 21st century</article-title><source>Arch Surg</source><year>2007</year><month>02</month><volume>142</volume><issue>2</issue><fpage>181</fpage><lpage>186</lpage><pub-id pub-id-type="doi">10.1001/archsurg.142.2.181</pub-id><pub-id pub-id-type="medline">17309970</pub-id></nlm-citation></ref><ref id="ref58"><label>58</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Khurana</surname><given-names>A</given-names> </name><name name-style="western"><surname>Glueck</surname><given-names>M</given-names> </name><name name-style="western"><surname>Chilana</surname><given-names>PK</given-names> </name></person-group><article-title>Do i just tap my headset?: how novice users discover gestural interactions with consumer augmented reality applications</article-title><source>Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies</source><year>2024</year><volume>7</volume><publisher-name>Association for Computing Machinery</publisher-name><fpage>1</fpage><lpage>28</lpage><pub-id pub-id-type="doi">10.1145/3631451</pub-id></nlm-citation></ref><ref id="ref59"><label>59</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Adams</surname><given-names>H</given-names> </name><name name-style="western"><surname>Stefanucci</surname><given-names>JK</given-names> </name><name name-style="western"><surname>Creem-Regehr</surname><given-names>S</given-names> </name><name name-style="western"><surname>Bodenheimer</surname><given-names>B</given-names> </name></person-group><article-title>Depth perception in augmented reality: the effects of display, shadow, and position</article-title><conf-name>2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)</conf-name><conf-date>Mar 12-16, 2022</conf-date><pub-id pub-id-type="doi">10.1109/VR51125.2022.00101</pub-id></nlm-citation></ref><ref id="ref60"><label>60</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ashtiani</surname><given-names>O</given-names> </name><name name-style="western"><surname>Guo</surname><given-names>HJ</given-names> </name><name name-style="western"><surname>Prabhakaran</surname><given-names>B</given-names> </name></person-group><article-title>Impact of motion cues, color, and luminance on depth perception in optical see-through AR displays</article-title><source>Front Virtual Real</source><year>2023</year><month>12</month><day>6</day><volume>4</volume><pub-id pub-id-type="doi">10.3389/frvir.2023.1243956</pub-id></nlm-citation></ref><ref id="ref61"><label>61</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Uehira</surname><given-names>K</given-names> </name><name name-style="western"><surname>Suzuki</surname><given-names>M</given-names> </name></person-group><article-title>Depth perception for virtual object displayed in optical see-through HMD</article-title><access-date>2026-03-26</access-date><conf-name>ACHI 2018: Eleventh International Conference on Advances in Computer-Human Interactions</conf-name><conf-date>Mar 25-29, 2018</conf-date><comment><ext-link ext-link-type="uri" xlink:href="https://personales.upv.es/thinkmind/dl/conferences/achi/achi_2018/achi_2018_11_20_20009.pdf">https://personales.upv.es/thinkmind/dl/conferences/achi/achi_2018/achi_2018_11_20_20009.pdf</ext-link></comment></nlm-citation></ref></ref-list></back></article>