We’re in the process of curating a list of this year’s publications. If you would like your paper included, please submit it via our dedicated form.
Disclaimer: This list is not complete yet; the DOIs might not be working yet.
A Conditional Companion: Lived Experiences of People with Mental Health Disorders Using LLMs
Aditya Kumar Purohit (Center for Advanced Internet Studies (CAIS)) , Hendrik Heuer (Center for Advanced Internet Studies (CAIS))
Abstract | Tags: Papers | Links:
@inproceedings{Purohit2026ConditionalCompanion,
title = {A Conditional Companion: Lived Experiences of People with Mental Health Disorders Using LLMs},
author = {Aditya Kumar Purohit (Center for Advanced Internet Studies (CAIS)) , Hendrik Heuer (Center for Advanced Internet Studies (CAIS))},
url = {https://www.linkedin.com/in/adityakumarpurohit/, author's linkedin},
doi = {10.1145/3772318.3791763},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Large Language Models (LLMs) are increasingly used for mental health support, yet little is known about how people with mental health challenges engage with them, how they evaluate their usefulness, and what design opportunities they envision. We conducted 20 semi-structured interviews with people in the UK who live with mental health conditions and have used LLMs for mental health support. Through reflexive thematic analysis, we found that participants engaged with LLMs in conditional and situational ways: for immediacy, the desire for non-judgement, self-paced disclosure, cognitive reframing, and relational engagement. Simultaneously, participants articulated clear boundaries informed by prior therapeutic experience: LLMs were effective for mild-to-moderate distress but inadequate for crises, trauma, and complex social-emotional situations. We contribute empirical insights into the lived use of LLMs for mental health, highlight boundary-setting as central to their safe role, and propose design and governance directions for embedding them responsibly within care ecosystem},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
A Tree’s Perspective: Enhancing Nature Connectedness Through Transitional and Multisensory Virtual Reality Experiences
Lisa L. Townsend (TU Dortmund University,), Julian Rasch (LMU Munich), Amy Grech (University of Strathclyde), Bernhard E. Riecke (Simon Fraser University), Sven Mayer (TU Dortmund University, Research Center Trustworthy Data Science, Security)
Abstract | Tags: Papers | Links:
@inproceedings{Townsend2026TreesPerspective,
title = {A Tree’s Perspective: Enhancing Nature Connectedness Through Transitional and Multisensory Virtual Reality Experiences},
author = {Lisa L. Townsend (TU Dortmund University,), Julian Rasch (LMU Munich), Amy Grech (University of Strathclyde), Bernhard E. Riecke (Simon Fraser University), Sven Mayer (TU Dortmund University, Research Center Trustworthy Data Science and Security)},
url = {https://haii.cs.tu-dortmund.de/, website
https://haii.group/, lab's social media
https://www.linkedin.com/in/lisa-townsend-hci/, author's linkedin},
doi = {10.1145/3772318.3790282},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Embodying natural entities in Virtual Reality (VR) shows potential to enhance nature connectedness, but design factors that support such embodiment remain underexplored. This study examined whether transitional elements in the physical setting before and after VR and multisensory stimuli during VR can strengthen nature connectedness in a transformative tree-embodiment experience. Through a mixed-methods approach (N=20), where we varied the pre- and post-VR experience (Neutral vs. Transitional) and sensory modalities (Audiovisual vs. Multisensory), we found that both transitional and multisensory experiences significantly enhanced presence, embodiment, and nature connectedness, with increases in emotional connectedness sustained one week later. Drawing on interview insights and impact ratings of specific design features, we derive design recommendations for integrating transitional and multisensory elements. Our findings demonstrate the value of holistic design for enhancing the emotional and transformative potential of virtual nature embodiment for fostering environmental awareness.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
AI CHAOS! 2nd Workshop on the Challenges for Human Oversight of AI Systems
Malik Khadar (Department of Computer Science & Engineering, University of Minnesota), Julia Cecil (Department of Psychology, LMU Munich), Leon Van Der Neut (Delft University of Technology), Nikola Banovic (Electrical Engineering, Computer Science, University of Michigan), Dr. Kevin Baum (Center for European Research in Trusted AI (CERTAIN), German Research Center for Artificial Intelligence (DFKI), Saarbrücken), Stevie Chancellor (Computer Science, Engineering, University of Minnesota), Enrico Costanza (UCL Interaction Centre, University College London), Motahhare Eslami (School of Computer Science, Carnegie Mellon University), Anna Maria Feit (Saarland Informatics Campus, Saarland University), Susanne Gaube (Global Business School for Health (GBSH), University College London (UCL)), Ujwal Gadiraju (Web Information Systems, Delft University of Technology), Harmanpreet Kaur (University of Minnesota)
Abstract | Tags: Workshops | Links:
@inproceedings{Khadar2026AiChaos,
title = {AI CHAOS! 2nd Workshop on the Challenges for Human Oversight of AI Systems},
author = {Malik Khadar (Department of Computer Science & Engineering, University of Minnesota), Julia Cecil (Department of Psychology, LMU Munich), Leon Van Der Neut (Delft University of Technology), Nikola Banovic (Electrical Engineering and Computer Science, University of Michigan), Dr. Kevin Baum (Center for European Research in Trusted AI (CERTAIN), German Research Center for Artificial Intelligence (DFKI), Saarbrücken), Stevie Chancellor (Computer Science and Engineering, University of Minnesota), Enrico Costanza (UCL Interaction Centre, University College London), Motahhare Eslami (School of Computer Science, Carnegie Mellon University), Anna Maria Feit (Saarland Informatics Campus, Saarland University), Susanne Gaube (Global Business School for Health (GBSH), University College London (UCL)), Ujwal Gadiraju (Web Information Systems, Delft University of Technology), Harmanpreet Kaur (University of Minnesota)},
url = {https://cix.cs.uni-saarland.de/, website},
doi = {10.1145/3772363.3778736},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {As AI systems are increasingly adopted in high-stakes domains such as healthcare, autonomous driving, and criminal justice, their failures may threaten human safety and rights. Human oversight of AI systems is therefore critically important as a potential safeguard to prevent harmful consequences in high-risk AI applications. The global regulatory and policy landscape for AI governance remains understandably fragmented and diverse. While frameworks like the European AI Act require human oversight for high-risk AI systems, there is currently a lack of well-defined methodologies and conceptual clarity to operationalize such oversight effectively. Independent of policy and regulation, poorly designed oversight can create dangerous illusions of safety while obscuring accountability. This interdisciplinary workshop aims to bring together researchers from various disciplines, including AI, HCI, psychology, law, and policy, to address this critical gap. We will explore the following questions: (1) What are the greatest challenges to achieving effective human oversight of AI systems? (2) How can we design AI systems that enable meaningful human oversight? (3) How do we assign responsibilities to and support the various stakeholders involved in oversight? Through talks and interactive group discussions, participants will identify oversight challenges; examine stakeholder roles; discuss supporting tools, methods, and regulatory frameworks; and establish a collaborative research agenda. Our central goal is to further a roadmap that enables effective human oversight for the responsible deployment of AI in society.},
keywords = {Workshops},
pubstate = {published},
tppubtype = {inproceedings}
}
AI-Supported Electrocardiogram Interpretation: The Effect of Support Presentation on Diagnostic Accuracy, Psychological Need Satisfaction, and Diagnosis Time
Tobias Grundgeiger (Julius-Maximilians-Universität Würzburg, Germany), Louisa Maurer (Julius-Maximilians-Universität Würzburg, Germany), Carlos Ramon Hölzing (University Hospital Würzburg, Germany), Oliver Happel (University Hospital Würzburg, Germany),
Abstract | Tags: Papers | Links:
@inproceedings{Grundgeiger2026AisupportedElectrocardiogram,
title = {AI-Supported Electrocardiogram Interpretation: The Effect of Support Presentation on Diagnostic Accuracy, Psychological Need Satisfaction, and Diagnosis Time},
author = {Tobias Grundgeiger (Julius-Maximilians-Universität Würzburg, Germany), Louisa Maurer (Julius-Maximilians-Universität Würzburg, Germany), Carlos Ramon Hölzing (University Hospital Würzburg, Germany), Oliver Happel (University Hospital Würzburg, Germany),},
url = {https://www.mcm.uni-wuerzburg.de/psyergo/, website},
doi = {10.1145/3772318.3790619},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Interpreting electrocardiograms (ECGs) is an important but complex and error-prone task. While diagnostic support algorithms exist, how support is displayed and how clinicians interact with ECG diagnostic and clinical decision support systems in general remain underexplored. In this preregistered experiment, we studied how providing clinicians with different versions of diagnostic support affects ECG interpretation. All four support types improved diagnosis accuracy compared to a no-support control condition, but the most effective was support offering visual ECG trace markings. User experience, in the form of psychological need satisfaction of competence and security, was highest when clinicians first viewed the ECG independently and then received support in a second stage. The latter two-stage support also resulted in the shortest diagnosis times. We conclude with design and research implications for creating clinician-algorithmic support interactions that improve user experience, efficacy, and effectiveness in the present study, and may ultimately contribute to patient safety.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Anticipating Physical Processes in VR: Environment Type and Scale Alter Temporal Expectations
Martin Riemer (Technical University Berlin), Elisa Valletta (University of Regensburg), David Halbhuber (University of Regensburg), Johanna Bogon (University of Regensburg)
Abstract | Tags: Papers | Links:
@inproceedings{Riemer2026AnticipatingPhysical,
title = {Anticipating Physical Processes in VR: Environment Type and Scale Alter Temporal Expectations},
author = {Martin Riemer (Technical University Berlin), Elisa Valletta (University of Regensburg), David Halbhuber (University of Regensburg), Johanna Bogon (University of Regensburg)},
url = {https://www.uni-regensburg.de/informatik-data-science/fakultaet/einrichtungen/medieninformatik, website
https://www.linkedin.com/in/elisa-valletta-041b3939b/, author's linkedin},
doi = {10.1145/3772318.3791767},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Accurate temporal expectations support interaction in virtual reality (VR), yet it remains unclear whether the internal models that guide such expectations in the real world transfer unchanged to immersive VR. We report two experiments examining expected durations of gravity-driven motion across real and virtual environments. In Experiment 1, participants imagined a ball rolling down ramps in a physical lab, a 1:1 VR replica, and an up-scaled VR room and produced the time the imagined process would take. Results revealed systematic distortions: durations were underestimated in VR relative to the physical lab, and larger virtual spaces elicited longer durations. Experiment 2 assessed whether participants incorporated gravity laws into their simulations. Although gravitational acceleration was consistently underestimated, it was incorporated in both real and virtual environments. Our findings show that VR and its spatial scale bias temporal expectations, with implications for the design of temporally coherent and physically plausible VR experiences.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Anticipation Before Action: EEG-Based Implicit Intent Detection for Adaptive Gaze Interaction in Mixed Reality
Francesco Chiossi (LMU Munich, Munich, Germany), Elnur Imamaliyev (Department of Neuroscience, Carl von Ossietzky Universität Oldenburg, Oldenburg, Germany), Martin Bleichner (Department of Psychology, Carl von Ossietzky Universität Oldenburg, Oldenburg, Oldenburg, Germany), Sven Mayer (TU Dortmund University, Dortmund, Germany, info@sven-mayer.com Research Center Trustworthy Data Science, Security, Dortmund, Germany)
Abstract | Tags: Papers | Links:
@inproceedings{Chiossi2026AnticipationBefore,
title = {Anticipation Before Action: EEG-Based Implicit Intent Detection for Adaptive Gaze Interaction in Mixed Reality},
author = {Francesco Chiossi (LMU Munich, Munich, Germany), Elnur Imamaliyev (Department of Neuroscience, Carl von Ossietzky Universität Oldenburg, Oldenburg, Germany), Martin Bleichner (Department of Psychology, Carl von Ossietzky Universität Oldenburg, Oldenburg, Oldenburg, Germany), Sven Mayer (TU Dortmund University, Dortmund, Germany, info@sven-mayer.com Research Center Trustworthy Data Science and Security, Dortmund, Germany)},
url = {https://www.medien.ifi.lmu.de/, website
https://www.linkedin.com/company/lmu-media-informatics-group/posts/?feedView=all, lab's linkedin
https://www.linkedin.com/in/francescochiossi/, author's linkedin
https://www.francesco-chiossi-hci.com/, social media},
doi = {10.1145/3772318.3790523},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Mixed Reality (MR) interfaces increasingly rely on gaze for interaction, yet distinguishing visual attention from intentional action remains difficult, leading to the Midas Touch problem. Existing solutions require explicit confirmations, while brain–computer interfaces may provide an implicit marker of intention using Stimulus-Preceding Negativity (SPN). We investigated how Intention (Select vs. Observe) and Feedback (With vs. Without) modulate SPN during gaze-based MR interactions. During realistic selection tasks, we acquired EEG and eye-tracking data from 28 participants. SPN was robustly elicited and sensitive to both factors: observation without feedback produced the strongest amplitudes, while intention to select and expectation of feedback reduced activity, suggesting SPN reflects anticipatory uncertainty rather than motor preparation. Complementary decoding with deep learning models achieved reliable person-dependent classification of user intention, with accuracies ranging from 75% to 97% across participants. These findings identify SPN as an implicit marker for building intention-aware MR interfaces that mitigate the Midas Touch.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Anticipation Without Acceleration: Benefits of Shared Gaze in Collocated Augmented Reality Collaboration
Julian Rasch (LMU Munich), Vladislav Dmitrievic Rusakov (LMU Munich), Jan Leusmann (LMU Munich), Florian Müller (TU Darmstadt), Albrecht Schmidt (LMU Munich)
Abstract | Tags: Papers | Links:
@inproceedings{Rasch2026AnticipationWithout,
title = {Anticipation Without Acceleration: Benefits of Shared Gaze in Collocated Augmented Reality Collaboration},
author = {Julian Rasch (LMU Munich), Vladislav Dmitrievic Rusakov (LMU Munich), Jan Leusmann (LMU Munich), Florian Müller (TU Darmstadt), Albrecht Schmidt (LMU Munich)},
url = {https://www.medien.ifi.lmu.de/index.xhtml, website
https://www.linkedin.com/company/lmu-media-informatics-group/, lab's linkedin
https://de.linkedin.com/in/julian-rasch, author's linkedin},
doi = {10.1145/3772318.3791758},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Knowing what collaborators attend to is essential. Previous studies demonstrated that shared gaze enhances coordination and social connectedness in remote settings. In collocated settings, gaze can be both naturally observable and technologically augmented. AR enables gaze cues to be rendered explicitly in the environment. To investigate if and how such cues are beneficial in collocated AR collaboration, we examined both qualitative and quantitative effects across three task types (puzzle, negotiation, search) and two spatial setups (plane, room), focusing on task completion time and the collaborative experience. In our user study with 24 dyads (n=48), we varied gaze visibility and measured task performance, user preference, social connectedness, and shared attention. Our results show that sharing gaze in collocated collaborative AR can increase shared attention, is perceived as helpful, and improves the user experience, similar to remote collaboration, but has a limited impact on the actual task completion time across the chosen tasks.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
AR-Cues for Task Resumption Change Users’ Strategy for Dealing with Deferrable Interruptions
Kilian Bahnsen (Julius-Maximilians-Universität Würzburg, Germany), Tobias Grundgeiger (Julius-Maximilians-Universität Würzburg, Germany)
Abstract | Tags: Papers | Links:
@inproceedings{Bahnsen2026ArcuesTask,
title = {AR-Cues for Task Resumption Change Users’ Strategy for Dealing with Deferrable Interruptions},
author = {Kilian Bahnsen (Julius-Maximilians-Universität Würzburg, Germany), Tobias Grundgeiger (Julius-Maximilians-Universität Würzburg, Germany)},
url = {https://www.mcm.uni-wuerzburg.de/psyergo/, website},
doi = {10.1145/3772318.3790673},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {When given the opportunity, people tend to try to reach coarse breakpoints for work interruptions. Coarse breakpoints are frequently associated with less effort when resuming the task. We investigated how supporting task resumption with augmented reality (AR)-cues affects this behavior. In a mixed factorial experiment, 50 participants performed a physical sorting task that included deferrable interruptions with varying distances to a coarse breakpoint, either with or without an AR-cue indicating the next correct step after interruption. Participants with AR-cue accepted interruptions at fine breakpoints more frequently than those without a cue, except when the coarse breakpoint was one step away, and reported less stress. Our findings indicate that AR-cues attenuate but do not eliminate the need for specific task resumption strategies, such as reaching a coarse breakpoint, and reduce the stress. Considering AR-cues for task resumption may be particularly beneficial for time-critical interruptions and fast-paced work environments.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
AttentiveLearn: Personalized Post-Lecture Support for Gaze-Aware Immersive Learning
Shi Liu (KIT, h-lab), Martin Feick (KIT, h-lab), Linus Bierhoff (KIT, h-lab), Alexander Maedche (KIT, h-lab)
Abstract | Tags: Papers | Links:
@inproceedings{Liu2026Attentivelearn,
title = {AttentiveLearn: Personalized Post-Lecture Support for Gaze-Aware Immersive Learning},
author = {Shi Liu (KIT, h-lab), Martin Feick (KIT, h-lab), Linus Bierhoff (KIT, h-lab), Alexander Maedche (KIT, h-lab)},
url = {https://h-lab.win.kit.edu/, website},
doi = {10.1145/3772318.3790667},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Immersive learning environments such as virtual classrooms in Virtual Reality (VR) offer learners unique learning experiences, yet providing effective learner support remains a challenge. While prior HCI research has explored in-lecture support for immersive learning, little research has been conducted to provide post-lecture support, despite being critical for sustained motivation, engagement, and learning outcomes. To address this, we present AttentiveLearn, a learning ecosystem that generates personalized quizzes on a mobile learning assistant based on learners’ attention distribution inferred using eye-tracking in VR lectures. We evaluated the system in a four-week field study with 36 university students attending lectures on Bayesian data analysis. AttentiveLearn improved learners’ reported motivation and engagement, without conclusive evidence of learning gains. Meanwhile, anecdotal evidence suggested improvements in attention for certain participants over time. Based on our findings of the field study, we provide empirical insights and design implications for personalized post-lecture support for immersive learning systems.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Augmented Body Parts: Bridging VR Embodiment and Wearable Robotics
HyeonBeom Yi (Electronics, Telecommunications Research Institute, Daejeon, Republic of Korea), Myung Jin (MJ) Kim (Electronics, Telecommunications Research Institute, Daejeon, Republic of Korea), Seungwoo Je (Southern University of Science, Technology, Shenzhen, China), Seungjae Oh (Kyung Hee University, Yongin, Republic of Korea), Shuto Takashita (University of Tokyo, Tokyo, Japan), Hongyu Zhou (University of Sydney, Sydney, Australia), Marie Muehlhaus (Saarland University, Saarbrücken, Germany), Dr. Eyal Ofek (University of Birmingham, Birmingham, United Kingdom), Andrea Bianchi (KAIST, Daejeon, Republic of Korea)
Abstract | Tags: Workshops | Links:
@inproceedings{Yi2026AugmentedBody,
title = {Augmented Body Parts: Bridging VR Embodiment and Wearable Robotics},
author = {HyeonBeom Yi (Electronics and Telecommunications Research Institute, Daejeon, Republic of Korea), Myung Jin (MJ) Kim (Electronics and Telecommunications Research Institute, Daejeon, Republic of Korea), Seungwoo Je (Southern University of Science and Technology, Shenzhen, China), Seungjae Oh (Kyung Hee University, Yongin, Republic of Korea), Shuto Takashita (University of Tokyo, Tokyo, Japan), Hongyu Zhou (University of Sydney, Sydney, Australia), Marie Muehlhaus (Saarland University, Saarbrücken, Germany), Dr. Eyal Ofek (University of Birmingham, Birmingham, United Kingdom), Andrea Bianchi (KAIST, Daejeon, Republic of Korea)},
url = {https://hci.cs.uni-saarland.de, website
https://www.linkedin.com/company/saarhcilab/, lab's linkedin},
doi = {10.1145/3772363.3778688},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Recent work across HCI/HRI and wearable robotics has investigated how people control and perceive extra body parts in both virtual and physical settings. Virtual embodiment in XR has shown that users can experience ownership and agency with non-anthropomorphic avatars, while wearable robotics has introduced supernumerary limbs such as third arms and robotic tails. Despite these shared goals, connections between findings remain limited because VR and hardware studies rely on different assumptions about sensory feedback, human perception, and physical constraints, making insights difficult to transfer across contexts. This workshop brings together researchers in XR, wearable robotics, haptics, and neuroscience to explore how to foster embodiment and adaptation with augmented body parts, and how to bridge virtual embodiment to effective use with wearable devices. Through a keynote, brief position shares, and two hands-on group activities, participants will examine control mappings and sensory-feedback strategies and identify which aspects of VR-based embodiment realistically transfer when accounting for hardware limits, sensor variability, and cognitive load. Ultimately, the workshop aims to articulate a focused research agenda connecting VR-based insights to feasible wearable robotics implementations, enabling future work on augmenting the human body with new parts and capabilities.},
keywords = {Workshops},
pubstate = {published},
tppubtype = {inproceedings}
}
Augmenting Imagery with Multimodal Vibrotactile Representations: Touch, Feel, and Hear
Mazen Salous (OFFIS Institute for Information Technology, Oldenburg, Germany), Matthias Kramer (OFFIS Institute for Information Technology, Oldenburg, Germany), Wilko Heuten (OFFIS Institute for Information Technology, Oldenburg, Germany), Charles Hudin (CEA Tech, Gif Sur Yvettes, France), Susanne Boll (University of Oldenburg, Oldenburg, Germany), Larbi Abdenebaoui (OFFIS Institute for Information Technology, Oldenburg, Germany)
Abstract | Tags: Papers | Links:
@inproceedings{Salous2026AugmentingImagery,
title = {Augmenting Imagery with Multimodal Vibrotactile Representations: Touch, Feel, and Hear},
author = {Mazen Salous (OFFIS Institute for Information Technology, Oldenburg, Germany), Matthias Kramer (OFFIS Institute for Information Technology, Oldenburg, Germany), Wilko Heuten (OFFIS Institute for Information Technology, Oldenburg, Germany), Charles Hudin (CEA Tech, Gif Sur Yvettes, France), Susanne Boll (University of Oldenburg, Oldenburg, Germany), Larbi Abdenebaoui (OFFIS Institute for Information Technology, Oldenburg, Germany)},
url = {https://www.offis.de/, website
https://www.linkedin.com/showcase/offis-science, lab's linkedin
https://www.linkedin.com/in/dr-mazen-salous-a2b84225/, author's linkedin},
doi = {10.1145/3772318.3791493},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Digital images remain largely inaccessible to blind or visually impaired (BVI) people because alt-text rarely conveys how %objects For-TAPS - materials materials feel or sound. We augment material images with multimodal vibrotactile patterns and evaluate four generation pipelines. AP1: prompt with one-shot example, AP2: prompt to audio, then pattern, AP3: real finger–material recording to pattern, and AP4: patterns from a public haptic database. A custom multilocal vibrotactile tablet played patterns on 10 material images (e.g., wood, stone, glass). Eight BVI participants explored each image with four patterns and ranked the best match. Think-aloud feedback highlighted: Theme 1 (realism — rough/grainy for wood and stone; smooth/steady for glass), Theme 2 (distinctiveness — separable cues; uniform buzzes were criticized), Theme 3 (personal associations), Theme 4 (effort/calibration for faint/noisy patterns; intensity tuning), and Theme 5 (preferences/suggestions). Exploratory ranks (n=8) echoed hybrid, user-tunable pipelines for accessible material perception.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Balancing Accuracy and Embodiment: A Hybrid Perspective for Complex Visuomotor Tasks in VR
Dennis Dietz (LMU Munich), Sebastian Walz (LMU Munich), Sven Mayer (TU Dortmund University), Andreas Butz (LMU Munich), Matthias Hoppe (Keio University Graduate School of Media Design)
Abstract | Tags: Papers | Links:
@inproceedings{Dietz2026BalancingAccuracy,
title = {Balancing Accuracy and Embodiment: A Hybrid Perspective for Complex Visuomotor Tasks in VR},
author = {Dennis Dietz (LMU Munich), Sebastian Walz (LMU Munich), Sven Mayer (TU Dortmund University), Andreas Butz (LMU Munich), Matthias Hoppe (Keio University Graduate School of Media Design)},
url = {https://www.medien.ifi.lmu.de/, website},
doi = {10.1145/3772318.3791472},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Visual perspective is a crucial design factor in Virtual Reality (VR). Especially when complex motor tasks are involved, it can affect both objective performance and subjective experience. We compared four visual perspectives (First-Person view, translucent Ghost view, Third-Person view, and Hybrid view) in a user study (N=20) involving different difficulties in a balancing game. Our findings reveal complex tradeoffs between the sense of embodiment, performance, and preference: The preferred Hybrid perspective offered a significant stability advantage for low task difficulty. However, this benefit vanished with increasing physical demand, revealing a speed-accuracy trade-off where external views required longer completion times. Ego-centric perspectives (First and Ghost) induced a stronger sense of embodiment and presence, but were less preferred. Participants' choice was not determined by representational fidelity but by pragmatic considerations of perceived utility. As perceived effectiveness can overrule objective performance and subjective experience, the choice of perspective is an important consideration for future training and rehabilitation applications in VR.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Balancing Automation and Discretion: How Decision Stakes and Human-AI Collaboration Affect Citizen Perceptions in Public Administration
Saja Aljuneidi (OFFIS - Institute for Information Technology),, Wilko Heuten (OFFIS - Institute for Information Technology),, Zhamilya Bilyalova (Wellesley College),, Maria K Wolters (OFFIS - Institute for Information Technology),, Susanne Boll (University of Oldenburg)
Abstract | Tags: Papers | Links:
@inproceedings{Aljuneidi2026BalancingAutomation,
title = {Balancing Automation and Discretion: How Decision Stakes and Human-AI Collaboration Affect Citizen Perceptions in Public Administration},
author = {Saja Aljuneidi (OFFIS - Institute for Information Technology),, Wilko Heuten (OFFIS - Institute for Information Technology),, Zhamilya Bilyalova (Wellesley College),, Maria K Wolters (OFFIS - Institute for Information Technology),, Susanne Boll (University of Oldenburg)},
url = {https://hci.uni-oldenburg.de/, website
https://vimeo.com/1161420158?share=copy&fl=sv&fe=ci, teaser video
www.linkedin.com/in/saljuneidi, author's social media},
doi = {10.1145/3772318.3790795},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {The growing use of AI in public administration improves efficiency, yet its use in discretionary decisions raises concerns about fairness and legitimacy. While prior research examined decision stakes and Human–AI decision-making configurations separately, their combined effect on citizens’ perceptions of fairness and adoption remains underexplored. We conducted a mixed-method Wizard-of-Oz study (n=43) using an Intelligent-Self-Service-Kiosk. Participants completed a low-stakes (ID renewal) and a high-stakes (social housing) task under one of three decision-making configurations: AI alone, AI with human-supervision, and human with AI advice or recommendation. Quantitative analysis found no significant effects, highlighting the limits of standard metrics. However, qualitative interviews revealed that citizens valued human involvement, requiring meaningful over symbolic oversight. They emphasized interactive dialogue before decisions to capture their circumstances and after, to facilitate appeals. We contribute evidence of tensions between citizens’ desire for efficiency and need for human-control and fairness. We provide guidance for designing citizen-centered AI systems that align with democratic values.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Beyond Links: Exploring Visual Representations of Multi-View Relations in Mixed Reality
Weizhou Luo (Interactive Media Lab Dresden, TUD Dresden University of Technology, Dresden, Germany), Rufat Rzayev (Interactive Media Lab Dresden, TUD Dresden University of Technology, Dresden, Germany), Benjamin Russig (Computer Graphics, Visualization, TUD Dresden University of Technology, Dresden, Germany), Sivanon Visutarporn (Interactive Media Lab Dresden, TUD Dresden University of Technology, Dresden, Germany), Marc Satkowski (Fraunhofer Institute for Process Engineering, Packaging IVV, Dresden, Germany), Stefan Gumhold (Computer Graphics, Visualization, TUD Dresden University of Technology, Dresden, Germany), Raimund Dachselt (Interactive Media Lab Dresden, TUD Dresden University of Technology, Dresden, Germany)
Abstract | Tags: Papers | Links:
@inproceedings{Luo2026BeyondLinks,
title = {Beyond Links: Exploring Visual Representations of Multi-View Relations in Mixed Reality},
author = {Weizhou Luo (Interactive Media Lab Dresden, TUD Dresden University of Technology, Dresden, Germany), Rufat Rzayev (Interactive Media Lab Dresden, TUD Dresden University of Technology, Dresden, Germany), Benjamin Russig (Computer Graphics and Visualization, TUD Dresden University of Technology, Dresden, Germany), Sivanon Visutarporn (Interactive Media Lab Dresden, TUD Dresden University of Technology, Dresden, Germany), Marc Satkowski (Fraunhofer Institute for Process Engineering and Packaging IVV, Dresden, Germany), Stefan Gumhold (Computer Graphics and Visualization, TUD Dresden University of Technology, Dresden, Germany), Raimund Dachselt (Interactive Media Lab Dresden, TUD Dresden University of Technology, Dresden, Germany)},
url = {https://imld.de/en/, website
https://www.linkedin.com/company/iml-dresden/, lab's linkedin
linkedin.com/in/weizhou-luo-8ab457bb, author's social media},
doi = {10.1145/3772318.3791398},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {This paper investigates associations, explicit representations of relations between multiple views in Mixed Reality (MR). While research on 2D desktop environments offers extensive recommendations for communicating relations between multiple views, MR environments lack such systematic guidance, necessitating adapted solutions that consider their spatial affordances. To address this gap, we systematically explored association techniques in existing research. Building on established 2D multi-view literature and refining insights from prior design principles, we developed a codebook to describe view relations and their representations. Applying it to a corpus of 44 immersive multi-view approaches, we identified recurring design strategies and synthesized them into a design space of visual association techniques adapted for immersive contexts. Based on a lightweight prototyping framework, we validate the utility of the design space through three envisioning scenarios, demonstrating how associations can support exploration, coordination, and sensemaking in MR applications. Our results inform the design of MR multi-view environments.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
BotaXplore: Enhancing Visitor Engagement and Learning in Botanical Gardens Through Mobile Technology
Albin Zeqiri (Ulm University), Tobias Wagner (Ulm University), Johanna Grüneberg (LMU Munich), Enrico Rukzio (Ulm University)
Abstract | Tags: Posters | Links:
@inproceedings{Zeqiri2026Botaxplore,
title = {BotaXplore: Enhancing Visitor Engagement and Learning in Botanical Gardens Through Mobile Technology},
author = {Albin Zeqiri (Ulm University), Tobias Wagner (Ulm University), Johanna Grüneberg (LMU Munich), Enrico Rukzio (Ulm University)},
url = {https://www.uni-ulm.de/in/mi/hci/, website
https://az16.github.io/, author's social media
https://wgnrto.de/, author's social media},
doi = {10.1145/3772363.3799272},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Educational guided visits in botanical gardens offer valuable opportunities for learning and engagement that promote awareness of the importance of biological diversity, its conservation, and sustainable use. However, a focus group with five botanists identified challenges in designing tours for heterogeneous audiences that foster curiosity and interest, as well as in tailoring educational content. To address these aspects, this paper presents BotaXplore, a prototype mobile application that supports plant exploration and learning in botanical gardens through three modes: exploratory, semi-guided, and tour-based. Using photo-based identification, users access short facts and quizzes about plants, and discovered species are added to a personal collection. Building on this prototype, we plan to evaluate the app's impact on nature engagement and learning outcomes after improving learning paths, content generation, and support for collaborative exploration.},
keywords = {Posters},
pubstate = {published},
tppubtype = {inproceedings}
}
Challenges in Synchronous & Remote Collaboration Around Visualization
Matthew Brehmer (University of Waterloo), Maxime Cordeil (University of Queensland), Christophe Hurter (Université de Toulouse), Takayuki Itoh (Ochanomizu University), Wolfgang Büschel (University of Stuttgart), Mahmood Jasim (Louisiana State University), Arnaud Prouzeau (Université Paris-Saclay), David Saffo (J.P. Morgan Chase & Co.), Lyn Bartram (Simon Fraser University), Sheelagh Carpendale (Simon Fraser University), Chen Zhu-Tian (University of Minnesota-Twin Cities), Andrew Cunningham (University of South Australia), Tim Dwyer (Monash University), Samuel Huron (Institut Polytechnique de Paris), Masahiko Itoh (Hokkaido Information University), Alark Joshi (University of San Francisco), Kiyoshi Kiyokawa (Nara Institute of Science, Technology), Hideaki Kuzuoka (University of Tokyo), Bongshin Lee (Yonsei University), Gabriela Molina León (Aarhus University), Harald Reiterer (University of Konstanz), Bektur Ryskeldiev (Mercari R4D), Jonathan Schwabish (Urban Institute), Brian A. Smith (Columbia University), Yasuyuki Sumi (Future University Hakodate), Ryo Suzuki (University of Colorado Boulder), Anthony Tang (Singapore Management University), Yalong Yang (Georgia Institute of Technology), Jian Zhao (University of Waterloo)
Abstract | Tags: Papers | Links:
@publication{Brehmer2026ChallengesSynchronous,
title = {Challenges in Synchronous & Remote Collaboration Around Visualization},
author = {Matthew Brehmer (University of Waterloo), Maxime Cordeil (University of Queensland), Christophe Hurter (Université de Toulouse), Takayuki Itoh (Ochanomizu University), Wolfgang Büschel (University of Stuttgart), Mahmood Jasim (Louisiana State University), Arnaud Prouzeau (Université Paris-Saclay), David Saffo (J.P. Morgan Chase & Co.), Lyn Bartram (Simon Fraser University), Sheelagh Carpendale (Simon Fraser University), Chen Zhu-Tian (University of Minnesota-Twin Cities), Andrew Cunningham (University of South Australia), Tim Dwyer (Monash University), Samuel Huron (Institut Polytechnique de Paris), Masahiko Itoh (Hokkaido Information University), Alark Joshi (University of San Francisco), Kiyoshi Kiyokawa (Nara Institute of Science and Technology), Hideaki Kuzuoka (University of Tokyo), Bongshin Lee (Yonsei University), Gabriela Molina León (Aarhus University), Harald Reiterer (University of Konstanz), Bektur Ryskeldiev (Mercari R4D), Jonathan Schwabish (Urban Institute), Brian A. Smith (Columbia University), Yasuyuki Sumi (Future University Hakodate), Ryo Suzuki (University of Colorado Boulder), Anthony Tang (Singapore Management University), Yalong Yang (Georgia Institute of Technology), Jian Zhao (University of Waterloo)},
url = {http://hci.uni-konstanz.de, website
https://www.linkedin.com/company/105489067/admin/page-posts/published/, lab's linkedin},
doi = {10.1145/3772318.3791117},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {We characterize 16 challenges faced by those investigating and developing remote and synchronous collaborative experiences around visualization. Our work reflects the perspectives and prior research efforts of an international group of 29 experts from across human-computer interaction and visualization sub-communities. The challenges are anchored around five collaborative activities that exhibit a centrality of visualization and multimodal communication. These activities include exploratory data analysis, creative ideation, visualization-rich presentations, joint decision making grounded in data, and real-time data monitoring. The challenges also reflect the changing dynamics of these activities in the face of recent advances in extended reality (XR) and artificial intelligence (AI). As an organizing scheme for future research at the intersection of visualization and computer-supported cooperative work, we align the challenges with a sequence of four sets of research and development activities: technological choices, social factors, AI assistance, and evaluation.},
keywords = {Papers},
pubstate = {published},
tppubtype = {publication}
}
Characterising Gaming Group Experiences
Daniel Reis (Universidade de Lisboa), Kathrin Gerling (KIT), André Rodrigues (Universidade de Lisboa)
Abstract | Tags: Papers | Links:
@inproceedings{Reis2026CharacterisingGaming,
title = {Characterising Gaming Group Experiences},
author = {Daniel Reis (Universidade de Lisboa), Kathrin Gerling (KIT), André Rodrigues (Universidade de Lisboa)},
url = {https://hci.iar.kit.edu, website},
doi = {10.1145/3772318.3791223},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {When people play digital games together, their experiences are often influenced by the group. While prior research has focused on the individual player experience, we argue that a deeper understanding of group dynamics is required for designing digital games that effectively support complex social interactions. In this paper, we characterise the lived group experiences of fifteen long-term players, using qualitative content analysis of semi-structured interviews examining group lifecycles, their impact on play, and how games and platforms support or constrain them. Our findings show that gaming groups are diverse, often shifting between people- and task-orientation based on needs and motivations. They influence how games are experienced, establishing shared practices that persist across contexts. Yet, while games and tools support group play, they often lack flexibility to accommodate such evolving and nuanced social dynamics. We provide insight into how group-based play unfolds and examples of how games can better support it.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
CoEmpaTeam: Enhancing Cognitive Empathy using LLM-based Avatars and Dynamic Role Play in Virtual Reality
Dehui Kong (Human-Centered Systems Lab (h-lab), Karlsruhe Institute of Technology (KIT)), Martin Feick (Human-Centered Systems Lab (h-lab), Karlsruhe Institute of Technology (KIT)), Shi Liu (Human-Centered Systems Lab (h-lab), Karlsruhe Institute of Technology (KIT)), Alexander Maedche (Human-Centered Systems Lab (h-lab), Karlsruhe Institute of Technology (KIT))
Abstract | Tags: Papers | Links:
@inproceedings{Kong2026Coempateam,
title = {CoEmpaTeam: Enhancing Cognitive Empathy using LLM-based Avatars and Dynamic Role Play in Virtual Reality},
author = {Dehui Kong (Human-Centered Systems Lab (h-lab), Karlsruhe Institute of Technology (KIT)), Martin Feick (Human-Centered Systems Lab (h-lab), Karlsruhe Institute of Technology (KIT)), Shi Liu (Human-Centered Systems Lab (h-lab), Karlsruhe Institute of Technology (KIT)), Alexander Maedche (Human-Centered Systems Lab (h-lab), Karlsruhe Institute of Technology (KIT))},
url = {https://h-lab.win.kit.edu/, website
https://youtu.be/WlE8jhRTFps, full video
https://www.linkedin.com/in/dehui-kong-0aa8a7306/, author's linkedin},
doi = {10.1145/3772318.3790389},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Cognitive empathy, the ability to understand others‘ perspectives, is essential for effective communication, reducing biases, and constructive negotiation. However, this skill is declining in a performance-driven society, which prioritizes efficiency over perspective-taking. Here, the training of cognitive empathy is challenging because it is a subtle, hard-to-perceive soft skill. To address this, we developed CoEmpaTeam, a VR-based system that enables users to train their cognitive empathy by using LLM-driven avatars with different personalities. Through dynamic role play, users actively engage in perspective-taking, experiencing situations through another person's eyes. CoEmpaTeam deploys three avatars who significantly differ in their personality, validated by a technical evaluation and an online experiment (n=90). Next, we evaluated the system through a lab experiment with 32 participants who performed three sessions across two weeks, followed by a one-week diary study. Our results showed a significant increase in cognitive empathy, which, according to participants, transferred into their real lives.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Collaborative Document Editing with Multiple Users and AI Agents
Florian Lehmann (University of Bayreuth), Krystsina Shauchenka (University of Bayreuth), Daniel Buschek (University of Bayreuth),
Abstract | Tags: Papers | Links:
@inproceedings{Lehmann2026CollaborativeDocument,
title = {Collaborative Document Editing with Multiple Users and AI Agents},
author = {Florian Lehmann (University of Bayreuth), Krystsina Shauchenka (University of Bayreuth), Daniel Buschek (University of Bayreuth),},
url = {https://www.hciai.uni-bayreuth.de, website},
doi = {10.1145/3772318.3790648},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Current AI writing support tools are largely designed for individuals, complicating collaboration when co-writers must leave the shared workspace to use AI and then communicate and reintegrate results. We propose integrating AI agents directly into collaborative writing environments. Our prototype makes AI use visible to all users through two new shared objects: user-defined agent profiles and tasks. Agent responses appear in the familiar comment feature. In a user study (N=30), 14 teams worked on writing projects during one week. Interaction logs and interviews show that teams incorporated agents into existing norms of authorship, control, and coordination, rather than treating them as team members. Agent profiles were viewed as personal territory, while created agents and outputs became shared resources. We discuss implications for team-based AI interaction, highlighting opportunities and boundaries for treating AI as a shared resource in collaborative work.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Connected Material Experiences using Bimanual Vibrotactile Crosstalk in Virtual Reality
Nihar Sabnis (Sensorimotor Interaction Group, Max Planck Institute for Informatics, Germany), André Zenner, Erik Peralta Løvaas (Sensorimotor Interaction, Max Planck Institute for Informatics),
Abstract | Tags: Papers | Links:
@inproceedings{Sabnis2026ConnectedMaterial,
title = {Connected Material Experiences using Bimanual Vibrotactile Crosstalk in Virtual Reality},
author = {Nihar Sabnis (Sensorimotor Interaction Group, Max Planck Institute for Informatics, Germany), André Zenner, Erik Peralta Løvaas (Sensorimotor Interaction, Max Planck Institute for Informatics),},
doi = {10.1145/3772318.3790767},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Perceiving material properties such as elasticity, flexibility, and torsion is inherently bimanual, as we rely on the relative motion of our hands to form a unified sense of materiality. Yet, most vibrotactile material rendering approaches are limited to a single hand or finger. While prior work has explored bimanual haptic interfaces, most depend on specialized hardware for specific interactions. In this paper, we demonstrate design strategies to support bimanual material exploration through motion-coupled vibrotactile feedback. Our technique introduces variable crosstalk between the controllers' vibration to evoke connectedness, making two unconnected devices feel as though they manipulate a single object. The technique generalizes motion-coupled feedback approaches beyond previous single-point explorations. Through two user studies, we show that this approach (1) significantly enhances perceived connectedness and (2) conveys distinct material qualities such as elasticity and torsion. Finally, we present Dvihastīya, an authoring tool for designing connected bimanual experiences in virtual reality.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Contextualizing Public Interfaces for Meaningful Human-Environment Interactions with Traces in Use
Linda Hirsch (Social Emotional Technology Lab, University of California Santa Cruz, Santa Cruz, California, United States), Dr Marius Hoggenmüller (Design Lab, School of Architecture, Design, Planning, The University of Sydney, Sydney, NSW, Australia), Prof. Dr. Andreas Martin Butz (LMU Munich, Munich, Germany, Louisa Sophie Bekker (LMU Munich, Munich, Germany), Sarita Maria Sridharan (LMU Munich, Munich, Germany), Ceenu George (Chair of Human-Computer Interaction, TU Berlin, Berlin, Germany)
Abstract | Tags: Journal | Links:
@inproceedings{Hirsch2026ContextualizingPublicb,
title = {Contextualizing Public Interfaces for Meaningful Human-Environment Interactions with Traces in Use},
author = {Linda Hirsch (Social Emotional Technology Lab, University of California Santa Cruz, Santa Cruz, California, United States), Dr Marius Hoggenmüller (Design Lab, School of Architecture, Design and Planning, The University of Sydney, Sydney, NSW, Australia), Prof. Dr. Andreas Martin Butz (LMU Munich, Munich, Germany, Louisa Sophie Bekker (LMU Munich, Munich, Germany), Sarita Maria Sridharan (LMU Munich, Munich, Germany), Ceenu George (Chair of Human-Computer Interaction, TU Berlin, Berlin, Germany)},
url = {https://www.hci.tu-berlin.de/, website},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Meaningful interactions positively impact users' meaning-making and feeling socially and culturally connected with their surroundings. However, creating such interactions is a continuous, complex challenge. We present the Traces in Use design concept supporting interface contextualization for meaningful human-environment interactions in public places. The concept was developed and evaluated in three steps: I) exploring traces of use characteristics as inspiration for design, II) introducing the concept definition, its theoretical evaluation, and a supporting framework, and III) practically evaluating the concept by developing, contextualizing, and testing three interfaces (a lion interface, a drum, and a storyteller) in two empirical field studies (N=40). The results show that the concept promotes meaningful interaction by supporting users' feelings of socio-cultural connectedness and meaning-making. With this, our work contributes the Traces in Use design concept, its development, and its methodological application for meaningful human-environment interactions and interface contextualization in public places.},
keywords = {Journal},
pubstate = {published},
tppubtype = {inproceedings}
}
Determining Perception Thresholds for Real and Virtual Inclinations While Cycling in Virtual Reality
Jonas Keppel (University of Duisburg-Essen), Marvin Prochazka (University of Duisburg-Essen), Stefan Lewin (University of Duisburg-Essen), Markus Stroehnisch (University of Duisburg-Essen), Marvin Strauss (University of Duisburg-Essen), André Zenner (Saarland University, DFKI), Donald Degraen (University of Canterbury), Andrii Matviienko (KTH Royal Institute of Technology), Stefan Schneegass (University of Duisburg-Essen)
Abstract | Tags: Papers | Links:
@inproceedings{Keppel2026DeterminingPerception,
title = {Determining Perception Thresholds for Real and Virtual Inclinations While Cycling in Virtual Reality},
author = {Jonas Keppel (University of Duisburg-Essen), Marvin Prochazka (University of Duisburg-Essen), Stefan Lewin (University of Duisburg-Essen), Markus Stroehnisch (University of Duisburg-Essen), Marvin Strauss (University of Duisburg-Essen), André Zenner (Saarland University and DFKI), Donald Degraen (University of Canterbury), Andrii Matviienko (KTH Royal Institute of Technology), Stefan Schneegass (University of Duisburg-Essen)},
url = {https://hci.informatik.uni-due.de/, website
https://de.linkedin.com/company/hci-group-essen, lab's linkedin
https://www.facebook.com/HCIEssen, facebook
https://youtu.be/eT_CP7vTleY, full video},
doi = {10.1145/3772318.3791538},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {In virtual reality (VR) experiences, mismatches between reality and virtuality are usually undesirable, as they can disrupt immersion and induce cybersickness. However, when carefully controlled, they may expand the design space of VR. This research investigates perceptual detection thresholds for mismatches between real and virtual inclinations during cycling in VR. Using a custom simulation},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Development, Evaluation, and Implementation of SEQR – a Usable Secure QR code Scanner
Mattia Mossano (Karlsruhe Institute of Technology), Maxime Fabian Veit (Karlsruhe Institute of Technology), Tobias Länge (Karlsruhe Institute of Technology), Benjamin Maximilian Berens (Karlsruhe Institute of Technology), Filipo Sharevski (DePaul University), Melanie Volkamer (Karlsruhe Institute of Technology)
Abstract | Tags: | Links:
@inproceedings{Mossano2026DevelopmentEvaluation,
title = {Development, Evaluation, and Implementation of SEQR – a Usable Secure QR code Scanner},
author = {Mattia Mossano (Karlsruhe Institute of Technology), Maxime Fabian Veit (Karlsruhe Institute of Technology), Tobias Länge (Karlsruhe Institute of Technology), Benjamin Maximilian Berens (Karlsruhe Institute of Technology), Filipo Sharevski (DePaul University), Melanie Volkamer (Karlsruhe Institute of Technology)},
url = {https://secuso.aifb.kit.edu/, website
https://www.linkedin.com/company/secuso-research-group/, lab's linkedin
www.linkedin.com/in/mattiamossano, author's social media
https://bsky.app/profile/secusoresearch.bsky.social, bluesky
https://mastodon.social/@SECUSO_Research@baw%C3%BC.social, mastadon},
doi = {10.1145/3772318.3793213},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {QR codes are widely used, but can become the vector of phishing attacks (QRishing). To support users, we systematically developed a usable secure QR code scanner, SEQR (Security Enhanced QR code scanner). We based the SEQR's design on two systematic reviews: (i) of academic literature (2015–2025), identifying 96 papers on QRishing, and (ii) of the MITRE ATT&CK Mobile repository, finding 36 QRishing techniques. From these two sources, we categorized 60 potential attacks, and divided them between those that SEQR addresses only at the technology level, and those where SEQR involves the users in the decision. We evaluated SEQR effectiveness in thwarting attacks in a between-subjects online study (n=556), where SEQR achieved 93.35% correct answers, compared to 75.24% for the Apple iOS QR code scanner and 65.11% for the Samsung Android QR code scanner. We implemented SEQR as an open source Android application, available on GitHub.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Do Citizens Agree with the EU AI Act? Public Perspectives on Risk and Regulation of AI Systems
Gabriel Lima (Max Planck Institute for Security, Privacy), Gustavo Gil Gasiola (Karlsruhe Institute of Technology), Frederike Zufall (Karlsruhe Institute of Technology, Waseda Institute for Advanced Study), Yixin Zou (Max Planck Institute for Security, Privacy)
Abstract | Tags: Papers | Links:
@inproceedings{Lima2026CitizensAgree,
title = {Do Citizens Agree with the EU AI Act? Public Perspectives on Risk and Regulation of AI Systems},
author = {Gabriel Lima (Max Planck Institute for Security and Privacy), Gustavo Gil Gasiola (Karlsruhe Institute of Technology), Frederike Zufall (Karlsruhe Institute of Technology, Waseda Institute for Advanced Study), Yixin Zou (Max Planck Institute for Security and Privacy)},
url = {https://thegcamilo.github.io/, website
https://www.linkedin.com/in/gabriel-lima-531b271a0/, author's linkedin},
doi = {10.1145/3772318.3790535},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {The European Union (EU) has spearheaded the regulation of artificial intelligence (AI) with the AI Act, which regulates AI systems based on the risks they pose to fundamental rights and other protected values. AI systems that pose unacceptable risks are prohibited, high-risk AI systems must comply with mandatory requirements, and minimal risk AI systems are encouraged—but not required—to adopt voluntary standards. Motivated by concerns that the AI Act may not reflect the public's opinions, we investigate how laypeople (N=1,421) assess 48 different AI systems concerning their risk and regulation. We find that people believe all 48 AI systems pose moderate levels of risk and should be regulated (albeit without outright prohibitions). Our findings challenge the AI Act's tiered approach, showing that people might support horizontal regulation requiring minimal standards for AI systems, and provide implications for developers seeking to develop AI aligned with public expectations.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Do It Fast, Forget It Fast: How Timing and Limb Visualizations Affect First-Person Augmented Reality Instructions
Clara Sayffaerth (LMU Munich), Ehbal Ablimit (LMU Munich), Annika Köhler (University Hospital Würzburg), Jonas Wombacher (TU Darmstadt), Albrecht Schmidt (LMU Munich), Florian Müller (TU Darmstadt)
Abstract | Tags: Papers | Links:
@inproceedings{Sayffaerth2026ItFast,
title = {Do It Fast, Forget It Fast: How Timing and Limb Visualizations Affect First-Person Augmented Reality Instructions},
author = {Clara Sayffaerth (LMU Munich), Ehbal Ablimit (LMU Munich), Annika Köhler (University Hospital Würzburg), Jonas Wombacher (TU Darmstadt), Albrecht Schmidt (LMU Munich), Florian Müller (TU Darmstadt)},
url = {https://www.medien.ifi.lmu.de/, website
https://www.linkedin.com/company/lmu-media-informatics-group/, lab's linkedin
https://www.linkedin.com/in/sayffaerth/, author's linkedin},
doi = {10.1145/3772318.3791471},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Acquiring tacit knowledge and practical skills often depends on direct observation and in situ training. AR offers an alternative by overlaying first-person step-by-step instructions that guide users through tasks such as assembly and repair. Previous work demonstrates the effectiveness of AR instruction for specific applications. In our experimental work, we systematically explore aspects of the broader design space. We conducted a controlled experiment (n = 40) to investigate three key factors identified in learning theory and XR embodiment research: imitation timing (parallel vs. sequential), limb visualization (hand vs. full arm), and limb visibility (opaque vs. semi-transparent). Across all conditions, participants followed AR instructions and afterward repeated the tasks from memory. We assessed performance, user experience, and retention. Our results show that parallel imitation is faster and increases embodiment, whereas sequential imitation enhances memory retention and comfort. Our findings provide guidance for the temporal and visual design of first-person AR tutorials.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Effects of Small Latency Variations in 2D Target Selection Tasks
Andreas Schmid (University of Regensburg), Isabell Röhr (University of Regensburg), Martina Emmert (University of Regensburg), Niels Henze (University of Tübingen), Raphael Wimmer (University of Regensburg)
Abstract | Tags: Papers | Links:
@inproceedings{Schmid2026EffectsSmall,
title = {Effects of Small Latency Variations in 2D Target Selection Tasks},
author = {Andreas Schmid (University of Regensburg), Isabell Röhr (University of Regensburg), Martina Emmert (University of Regensburg), Niels Henze (University of Tübingen), Raphael Wimmer (University of Regensburg)},
url = {https://www.uni-regensburg.de/informatik-data-science/fakultaet/einrichtungen/medieninformatik/, website},
doi = {10.1145/3772318.3791712},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Systems' latency — the time between user input and system response — slows down the human-computer interaction loop. Several studies revealed negative objective and subjective effects of high latency, typically treating latency as a constant delay. Because latency varies significantly in practice, recent work also assessed the effects of large and sudden latency changes. In practice, however, latency variations are small but frequent. As the effects of such variations are unclear, we investigate how small latency variations (+/-50 ms) affect users' performance and perceived task load for 2D target selection tasks with static and moving targets. For static targets, we found that latency variation causes significantly higher completion times and less efficient trajectories, however with small effect sizes. In contrast, we found no significant effects on any performance measure for moving targets. Our findings indicate that the effect of latency variation is generally very small and quickly disappears for non-trivial tasks.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
eHMI for All - Investigating the Effect of External Communication of Automated Vehicles on Pedestrians, Manual Drivers, and Cyclists in Virtual Reality
Mark Colley (UCL Interaction Centre), Simon Kopp (Institute of Media Informatics, Ulm University), Debargha Dey (Eindhoven University of Technology), Pascal Jansen (Institute of Media Informatics, Ulm University), Enrico Rukzio (Institute of Media Informatics, Ulm University)
Abstract | Tags: Papers | Links:
@inproceedings{Colley2026EhmiAll,
title = {eHMI for All - Investigating the Effect of External Communication of Automated Vehicles on Pedestrians, Manual Drivers, and Cyclists in Virtual Reality},
author = {Mark Colley (UCL Interaction Centre), Simon Kopp (Institute of Media Informatics, Ulm University), Debargha Dey (Eindhoven University of Technology), Pascal Jansen (Institute of Media Informatics, Ulm University), Enrico Rukzio (Institute of Media Informatics, Ulm University)},
doi = {10.1145/3772318.3790585},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {With automated vehicles (AVs), the absence of a human operator could necessitate external Human-Machine Interfaces (eHMIs) to communicate with other road users. Existing research primarily focuses on pedestrian-AV interactions, with limited attention given to other road users, such as cyclists and drivers of manually driven vehicles. So far, no studies have compared the effects of eHMIs across these three road user roles. Therefore, we conducted a within-subjects virtual reality experiment (N=40), evaluating the subjective and objective impact of an eHMI communicating the AV's intention to pedestrians, cyclists, and drivers under various levels of distraction (no distraction, visual noise, interference). eHMIs positively influenced safety perceptions, trust, perceived usefulness, and mental demand across all roles. While distraction and road user roles showed significant main effects, interaction effects were only observed in perceived usability. Thus, a unified eHMI design is effective, facilitating the standardization and broader adoption of eHMIs in diverse traffic.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
EmbroForm: Digital Fabrication of Soft Freeform Objects with Machine Embroidered Pull-up Strings
Yu Jiang (HCI Lab), Haonan Zhang (HCI Lab), Jürgen Steimle (HCI Lab)
Abstract | Tags: Papers | Links:
@inproceedings{Jiang2026Embroform,
title = {EmbroForm: Digital Fabrication of Soft Freeform Objects with Machine Embroidered Pull-up Strings},
author = {Yu Jiang (HCI Lab), Haonan Zhang (HCI Lab), Jürgen Steimle (HCI Lab)},
doi = {10.1145/3772318.3790731},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Pull-up objects form 3D shapes by pulling a string routed through a 2D material, offering low-cost 2D fabrication and reversible transformation. However, existing approaches rely on origamic folding, which creates faceted, oftentimes rigid surfaces and requires manual pull-up string routing. We introduce EmbroForm, a digital fabrication pipeline for fully soft pull-up objects with organic, higher-fidelity shapes. Instead of folding, EmbroForm forms 3D shapes by seaming the boundaries of a flexible 2D patch unwrapped from the target. To enable this, we contribute a fabrication technique that automates the routing of sliding strings on flexible sheet materials with machine embroidery, which we extend on to design zig-zag lacings for seaming the boundaries. Then we introduce an end-to-end pipeline that, given a 3D mesh, creates an optimized 2D unwrapped patch and generates pull-up string routing paths for fabrication. We provide a design tool for customization and validate our approach with technical experiments and implemented application cases.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Emotion Through Motion: How Shape-Changing Jewelry Conveys Emotions
Anke Brocker (RWTH Aachen University),, Felix Kasteel (RWTH Aachen University),, Sören Schröder (RWTH Aachen University),, Heiko Mueller (OFFIS),, Jürgen Steimle (Saarland University),, Jan Borchers (RWTH Aachen University)
Abstract | Tags: Papers | Links:
@publication{Brocker2026EmotionThrough,
title = {Emotion Through Motion: How Shape-Changing Jewelry Conveys Emotions},
author = {Anke Brocker (RWTH Aachen University),, Felix Kasteel (RWTH Aachen University),, Sören Schröder (RWTH Aachen University),, Heiko Mueller (OFFIS),, Jürgen Steimle (Saarland University),, Jan Borchers (RWTH Aachen University)},
url = {https://hci.rwth-aachen.de, website
https://www.linkedin.com/in/anke-brocker-313890172/, author's linkedin},
doi = {10.1145/3772318.3791295},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Shape-changing wearables are known to convey emotions to wearers and observers, and jewelry is commonly worn for self-expression and to be seen by others. But how do individual shape change parameters impact the emotions communicated? In a first study, participants observed a shape-changing necklace; the second included wearing it. The necklace uses pneumatic finger actuators; fabrication details are provided. We systematically varied motion type, speed, amplitude and repetition, and exterior material to analyze emotions using Russell's circumplex model. Additionally, we asked users what they associated with each shape change. We found some surprising relationships between shape change parameters and the valence and arousal levels of emotions wearers and observers perceived. Symmetrical actuations were recognized more accurately and received higher valence and arousal ratings. Interestingly, even when wearers, who only felt motions, misidentified them, their ratings matched those from observers. Our findings support creating shape-changing interfaces that communicate emotions more precisely.},
keywords = {Papers},
pubstate = {published},
tppubtype = {publication}
}
Enhancing Memory Recall Through AI-Assisted Method of Loci in Virtual Reality
Clemens Wulff (Universität Hamburg), Lucie Kruse (Universität Hamburg), Frank Steinicke (Universität Hamburg)
Abstract | Tags: Posters | Links:
@inproceedings{Wulff2026EnhancingMemory,
title = {Enhancing Memory Recall Through AI-Assisted Method of Loci in Virtual Reality},
author = {Clemens Wulff (Universität Hamburg), Lucie Kruse (Universität Hamburg), Frank Steinicke (Universität Hamburg)},
url = {https://www.inf.uni-hamburg.de/en/inst/ab/hci.html, website
https://www.linkedin.com/in/lucie-kruse-004740234/, author's linkedin},
doi = {10.1145/3772363.3798815},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {The Method of Loci is a well-established mnemonic technique that involves associating words with objects placed along a route. A key factor in its effectiveness is creating meaningful connections between the words to-be-remembered and the corresponding objects. In this study, we investigate how artificial intelligence (AI) can enhance this technique by i) selecting appropriate objects for each word and ii) generating coherent textual associations between the words and their objects. These AI-assisted approaches are compared to a control condition, where iii) object-word pairs are chosen randomly without assistance. Our findings demonstrate that the Object condition significantly improves word recall both immediately and after one week. On the other side, the Text condition did not lead to a significant enhancement in recall, and perceived workload showed no significant differences across all conditions. These results offer valuable insights for advancing mnemonic techniques and suggest directions for future research to optimize memory strategies.},
keywords = {Posters},
pubstate = {published},
tppubtype = {inproceedings}
}
Exploring the Potential of Disengagement-Friendly Game Design to Support Children's Exit from Play Sessions
Meshaiel Alsheail (Human-Computer Interaction, Accessibility, Karlsruhe Institute of Technology, Karlsruhe, Germany), Kathrin Gerling (Human-Computer Interaction, Accessibility, Karlsruhe Institute of Technology, Karlsruhe, Germany), Zeynep Yildiz (Human-Computer Interaction, Accessibility, Karlsruhe Institute of Technology, Karlsruhe, Germany), Anna-Lena Meiners (Human-Computer Interaction, Accessibility, Karlsruhe Institute of Technology, Karlsruhe, Germany), Merlin Steven Opp (Human-Computer Interaction, Accessibility, Karlsruhe Institute of Technology, Karlsruhe, Germany)
Abstract | Tags: Papers | Links:
@inproceedings{Alsheail2026ExploringPotential,
title = {Exploring the Potential of Disengagement-Friendly Game Design to Support Children's Exit from Play Sessions},
author = {Meshaiel Alsheail (Human-Computer Interaction and Accessibility, Karlsruhe Institute of Technology, Karlsruhe, Germany), Kathrin Gerling (Human-Computer Interaction and Accessibility, Karlsruhe Institute of Technology, Karlsruhe, Germany), Zeynep Yildiz (Human-Computer Interaction and Accessibility, Karlsruhe Institute of Technology, Karlsruhe, Germany), Anna-Lena Meiners (Human-Computer Interaction and Accessibility, Karlsruhe Institute of Technology, Karlsruhe, Germany), Merlin Steven Opp (Human-Computer Interaction and Accessibility, Karlsruhe Institute of Technology, Karlsruhe, Germany)},
url = {https://hci.iar.kit.edu/index.php, website},
doi = {10.1145/3772318.3790564},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Disengagement from games is challenging for children, and can lead to family conflict. While parental mediation is well-understood, the role of game design in supporting children’s disengagement remains underexplored. Our work addresses this gap through a qualitative study with 39 participants (22 children aged 4-11 and 17 parents), in which children played "Snarky’s Adventure", a prototype featuring disengagement-friendly mechanics supporting player understanding of progress, and the experience of satisfaction and closure at the end of play. Through Qualitative Content Analysis, we show that the features help children anticipate the end of play, but in some cases spark curiosity and the desire to re-engage. Additionally, while parents valued the mechanics to understand game progress, future work should explore how to actively engage them in children’s disengagement. Our work provides the first empirical exploration of disengagement-friendly game mechanics, and outlines challenges and opportunities for their future integration in children's games.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Eye Want It All! Investigating Eye Tracking as Implicit Support for Generative Inpainting
Niklas Pfützenreuter (University of Duisburg-Essen), Carina Liebers (University of Duisburg-Essen), David Goedicke (University of Duisburg-Essen), Donald Degraen (University of Canterbury), Uwe Gruenefeld (GENERIO), Stefan Schneegass (University of Duisburg-Essen)
Abstract | Tags: Posters | Links:
@inproceedings{Pfuetzenreuter2026EyeWant,
title = {Eye Want It All! Investigating Eye Tracking as Implicit Support for Generative Inpainting},
author = {Niklas Pfützenreuter (University of Duisburg-Essen), Carina Liebers (University of Duisburg-Essen), David Goedicke (University of Duisburg-Essen), Donald Degraen (University of Canterbury), Uwe Gruenefeld (GENERIO), Stefan Schneegass (University of Duisburg-Essen)},
url = {https://hci.informatik.uni-due.de/, website
https://de.linkedin.com/company/hci-group-essen, lab's linkedin
https://www.linkedin.com/in/niklas-pfützenreuter/, author's linkedin
https://www.facebook.com/HCIEssen, facebook},
doi = {10.1145/3772363.3799314},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Users often struggle to use Generative Artificial Intelligence (GenAI) models to generate a desired image, as controlling them solely with prompts is difficult. Current solutions to this problem, such as adding conditional controls, require users to provide explicit input, which can be tedious. To avoid depending on additional explicit input, this paper explores what implicit gaze behavior tells about user intentions when viewing generated images. In our user study (𝑁 = 16), we evaluated the correlation between gaze behavior and user annotations, showing that users looked longer at areas they wanted to regenerate. While our research is the first step, we believe our work can pave the way for incorporating implicit user input into interactions with GenAI systems.},
keywords = {Posters},
pubstate = {published},
tppubtype = {inproceedings}
}
Flow on Social Media? Rarer Than You’d Think
Michael T. Knierim (KIT, University of Nottingham), Thimo Schulz (KIT), Moritz Schiller (KIT), Jwan Shaban (University of Nottingham), Mario Nadj (University Duesburg-Essen), Max L. Wilson (University of Nottingham), Alexander Maedche (KIT)
Abstract | Tags: Papers | Links:
@inproceedings{Knierim2026FlowSocial,
title = {Flow on Social Media? Rarer Than You’d Think},
author = {Michael T. Knierim (KIT, University of Nottingham), Thimo Schulz (KIT), Moritz Schiller (KIT), Jwan Shaban (University of Nottingham), Mario Nadj (University Duesburg-Essen), Max L. Wilson (University of Nottingham), Alexander Maedche (KIT)},
url = {https://h-lab.win.kit.edu/, website
https://www.linkedin.com/in/dr-michael-knierim-13397881/, author's linkedin},
doi = {10.1145/3772318.3791800},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Researchers often attribute social media’s appeal to its ability to elicit flow experiences of deep absorption and effortless engagement. Yet prolonged use has also been linked to distraction, fatigue, and lower mood. This paradox remains poorly understood, in part because prior studies rely on habitual or one-shot reports that ask participants to directly attribute flow to social media. To address this gap, we conducted a five-day field study with 40 participants, combining objective smartphone app tracking with daily reconstructions of flow-inducing activities. Across 673 reported flow occurrences, participants rarely associated flow with social media (2%). Instead, heavier social media use predicted fewer daily flow occurrences. We further examine this relationship through the effects of social media use on fatigue, mood, and motivation. Altogether, our findings suggest that flow and social media may not align as closely as assumed - and might even compete - underscoring the need for further research.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Forefeel the Move: Investigating Proprioceptive Feedback for Communicating Imminent Motions of Body-actuating Systems
Marie Muehlhaus (Saarland University), Martin Schmitz (University of Koblenz), Jürgen Steimle (Saarland University)
Abstract | Tags: Papers | Links:
@inproceedings{Muehlhaus2026ForefeelMove,
title = {Forefeel the Move: Investigating Proprioceptive Feedback for Communicating Imminent Motions of Body-actuating Systems},
author = {Marie Muehlhaus (Saarland University), Martin Schmitz (University of Koblenz), Jürgen Steimle (Saarland University)},
url = {https://hci.cs.uni-saarland.de, website
https://www.linkedin.com/company/saarhcilab/, lab's linkedin
https://www.linkedin.com/in/marie-muehlhaus-21b2b8202/, author's linkedin},
doi = {10.1145/3772318.3790487},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Systems actuating the body can proactively assist users in diverse tasks. However, unexpected body actuation may pose safety risks. We propose proprioceptive feedback to inform users about an imminent actuation before the system takes control. In a user study, we compare different proprioceptive cues that either interrupt or augment user motion to convey (1) solely that a body actuation is imminent, (2) its direction, or (3) its target. To enable a controlled investigation, we confined the cues to one degree-of-freedom joints and implemented them in an elbow exoskeleton. The results show that all cues are highly noticeable, offering an integrated feedback channel; yet, their effectiveness in communicating direction and target differed: While cues that augmented user motion were more accurate and preferred, disruptive cues enabled faster but less accurate interpretations. Furthermore, our analysis revealed that proprioceptive feedback enhanced the expressiveness of the conveyed information and user's aspirations for adaptive feedback.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
From Distribution to Contribution: Negotiating Justice Policies in Grassroots FOSS Communities
Philip Engelbutzeder (University of Siegen), Leonie Jahn (University of Siegen), Anton Ballmaier (University of Siegen), Dennis Lawo (Bonn-Rhein Sieg University of Applied Science), Lea Katharina Michel (University of Siegen), Sof Gjing Jovanovska (University of Siegen), Dave Randall (University of Siegen), Volker Wulf (University of Siegen)
Abstract | Tags: Papers | Links:
@inproceedings{Engelbutzeder2026FromDistribution,
title = {From Distribution to Contribution: Negotiating Justice Policies in Grassroots FOSS Communities},
author = {Philip Engelbutzeder (University of Siegen), Leonie Jahn (University of Siegen), Anton Ballmaier (University of Siegen), Dennis Lawo (Bonn-Rhein Sieg University of Applied Science), Lea Katharina Michel (University of Siegen), Sof Gjing Jovanovska (University of Siegen), Dave Randall (University of Siegen), Volker Wulf (University of Siegen)},
url = {https://www.wineme.uni-siegen.de/, website},
doi = {10.1145/3772318.3791722},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Fairness is a recurring challenge in grassroots digital infrastructures, where collective action depends on volunteer contributions. This paper presents a study of Foodsharing.de, a grassroots FOSS platform with 185,000 members rescuing and redistributing surplus food. Drawing on 25 interviews and long-term activist involvement, we analyze two justice-oriented features: the Cherry-Picking Rule (distributional fairness) and Commitment Statistics (contributional fairness). We show how these fairness features become deeply entangled in practice and how they operate as policy-in- code, inscribing fairness logics into software and redistributing not only food and labor but also authority within the community. Rather than settling questions of justice, these interventions trigger renewed negotiation across deliberative spaces and everyday coordination, as encoded rules are interpreted, contested, and adapted. Building on these dynamics, we outline governance directions for justice-oriented grassroots infrastructures, highlighting the need for contestability and accountable autonomy to sustain negotiation and align technical change with community legitimacy.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
From Harm to Healing: Understanding Individual Resilience after Cybercrimes
Xiaowei Chen (MPI-SP), Mindy Tran (MPI-SP), Yue Deng (The Hong Kong University of Science, Technology & MPI-SP), Bhupendra Acharya (University of Louisiana), Yixin Zou (MPI-SP)
Abstract | Tags: Papers | Links:
@inproceedings{Chen2026FromHarm,
title = {From Harm to Healing: Understanding Individual Resilience after Cybercrimes},
author = {Xiaowei Chen (MPI-SP), Mindy Tran (MPI-SP), Yue Deng (The Hong Kong University of Science and Technology & MPI-SP), Bhupendra Acharya (University of Louisiana), Yixin Zou (MPI-SP)},
doi = {10.1145/3772318.3791486},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {How do individuals recover from cybercrimes? Victims experience various types of harm after cybercrimes, including monetary loss, data breaches, negative emotions, and even psychological trauma. The aspects that support their recovery process and contribute to individual cyber resilience remain underinvestigated. To address this gap, we interviewed 18 cybercrime victims from Western Europe using a trauma-informed approach. We identified four common stages following victimization: recognition, coping, processing, and recovery. Participants adopted various strategies to mitigate the impact of cybercrime and used different indicators to describe recovery. While they mostly relied on social support and self-regulation for emotional coping, service providers largely determined whether victims were able to recover their money. Internal factors, external support, and context sensitivity collectively contribute to individuals' cyber resilience. We recommend trauma-informed support for cybercrime victims. Extending our conceptualization of individual cyber resilience, we propose collaborative and context-sensitive strategies to address the harmful impacts of cybercrime.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
From Using to Infrastructuring: Grassroots VPN-Building in Iran’s Women–Life–Freedom Movement
Sarvin Qalandar (Zentrum für Digitalisierung Südwestfalen), Philip Engelbutzeder (Information Systems, New Media, University of Siegen), David Randall (Information Systems, New Media, University of Siegen), Volker Wulf (Information Systems, New Media, University of Siegen)
Best PaperAbstract | Tags: Best Paper, Papers | Links:
@inproceedings{Qalandar2026FromUsing,
title = {From Using to Infrastructuring: Grassroots VPN-Building in Iran’s Women–Life–Freedom Movement},
author = {Sarvin Qalandar (Zentrum für Digitalisierung Südwestfalen), Philip Engelbutzeder (Information Systems and New Media, University of Siegen), David Randall (Information Systems and New Media, University of Siegen), Volker Wulf (Information Systems and New Media, University of Siegen)},
doi = {10.1145/3772318.3790369},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {States increasingly weaponize digital infrastructures through censorship and surveillance. Iran represents an acute case of this broader global pattern. We study how citizens sustain connectivity and agency during Iran’s Women–Life–Freedom (WLF) protests. Based on 21 interviews with citizens and digital activists in Kermanshah province, inside Iran and in the diaspora, we document a shift from dependence on commercial circumvention to grassroots infrastructuring: people created and shared VPNs, proxies, and ad hoc communication pipelines. Peer learning on platforms such as Telegram, X, and GitHub—via Persian tutorials, scripts, and troubleshooting—enabled rapid adaptation under repression. We identify four dynamics: (1) distrust and survival as primary motivations; (2) infrastructural solidarity as everyday care; (3) technical improvisation and peer teaching; and (4) persistent constraints from censorship and risk. We argue that grassroots infrastructuring reframes end-user development as survival work. The paper contributes empirical evidence and design implications for HCI/CSCW on civic technologies, digital activism, and infrastructures of participation under authoritarian control.},
keywords = {Best Paper, Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Grand Challenges around Designing Computers’ Control Over Our Bodies
Florian Floyd Mueller (Monash University), Nadia Berthouze (University College London), Misha Sra (University of California), Mar Gnzalez-Franco (Google Seattle), Hennig Pohl (Aalborg University), Susanne Boll (University of Oldenburg), Richard Byrne (RMIT University Melbourne), Arthur Caetano (University of California), Masahiko Inami (University of Tokyo), Jarrod Knibbe (University of Queensland), Per Ola Kristensson (University of Cambridge), Xiang Li (University of Cambridge), Zhuying Li (Southeast University Nanjing), Joe Marshall (University of Notthingham), Louise Petersen Matjeka (Norwegian University of Science, Technology Trondheim), Minna Nygren (UCL London), Rakesh Patibanda (Monash University), Sara Price (UCL London), Harald Reiterer (University of Konstanz), Aryan Saini (Monash University), Oliver Schneider (University of Waterloo), Ambika Shahu (Interdisciplinary Transformation University Linz), Phoebe Toups Dugas (Monash University), Don Samitha Elvitigala (Monash University)
Abstract | Tags: Papers | Links:
@inproceedings{Mueller2026GrandChallenges,
title = {Grand Challenges around Designing Computers’ Control Over Our Bodies},
author = {Florian Floyd Mueller (Monash University), Nadia Berthouze (University College London), Misha Sra (University of California), Mar Gnzalez-Franco (Google Seattle), Hennig Pohl (Aalborg University), Susanne Boll (University of Oldenburg), Richard Byrne (RMIT University Melbourne), Arthur Caetano (University of California), Masahiko Inami (University of Tokyo), Jarrod Knibbe (University of Queensland), Per Ola Kristensson (University of Cambridge), Xiang Li (University of Cambridge), Zhuying Li (Southeast University Nanjing), Joe Marshall (University of Notthingham), Louise Petersen Matjeka (Norwegian University of Science and Technology Trondheim), Minna Nygren (UCL London), Rakesh Patibanda (Monash University), Sara Price (UCL London), Harald Reiterer (University of Konstanz), Aryan Saini (Monash University), Oliver Schneider (University of Waterloo), Ambika Shahu (Interdisciplinary Transformation University Linz), Phoebe Toups Dugas (Monash University), Don Samitha Elvitigala (Monash University)},
url = {http://hci.uni-konstanz.de, website
https://www.linkedin.com/company/105489067/admin/page-posts/published/, lab's linkedin},
doi = {10.1145/3772318.3790606},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Advances in emerging technologies, such as on-body mechanical actuators and electrical muscle stimulation, have allowed computers to take control over our bodies. This presents opportunities as well as challenges, raising fundamental questions about agency and the role of our body when interacting with technology. To advance this research field as a whole, we brought together expert perspectives in a week-long seminar to articulate the grand challenges that should be tackled when it comes to the design of computers’ control over our bodies. These grand challenges span technical, design, user, and ethical aspects. By articulating these grand challenges, we aim to begin initiating a research agenda that positions bodily control not only as a technical feature but as a central, experiential, and ethical concern for future human–computer interaction endeavors.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Hacking Flow: From Lived Practices to Innovation
Fabio Stano (KIT), Max L. Wilson (University of Nottingham), Christof Weinhardt (KIT), Michael T. Knierim (KIT, University of Nottingham)
Abstract | Tags: Papers | Links:
@inproceedings{Stano2026HackingFlow,
title = {Hacking Flow: From Lived Practices to Innovation},
author = {Fabio Stano (KIT), Max L. Wilson (University of Nottingham), Christof Weinhardt (KIT), Michael T. Knierim (KIT, University of Nottingham)},
url = {https://www.linkedin.com/in/dr-michael-knierim-13397881/, author's linkedin},
doi = {10.1145/3772318.3791009},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {In digital knowledge work, flow promises not just productivity; it offers a pathway to well-being. Yet despite decades of flow research in HCI, we know little about how to design digital interventions that support it. In this work, we foreground lived interventions — everyday practices workers already use to foster flow — to uncover overlooked opportunities and chart new directions for digital intervention design. Specifically, we report findings from two studies: (1) a reflexive thematic analysis of open-ended survey responses (n = 160), surfacing 38 lived interventions across four categories: environment, organization, task shaping, and personal readiness; and (2) a quantitative online survey (n = 121) that validates this repertoire, identifies which interventions are broadly endorsed versus polarizing, and elicits visions of technological support. We contribute empirical insights into how digital workers cultivate flow, situate these lived interventions within existing literature, and derive design opportunities for future digital flow interventions.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
HaptEx: Investigating Haptic Notification Channels for Exoskeletons Across Different Levels of Actuation
Marie Muehlhaus* (Saarland University), Jannik Nau* (Saarland University), Martin Schmitz (University of Koblenz), Jürgen Steimle (Saarland University)
Abstract | Tags: Papers | Links:
@inproceedings{Muehlhaus2026Haptex,
title = {HaptEx: Investigating Haptic Notification Channels for Exoskeletons Across Different Levels of Actuation},
author = {Marie Muehlhaus* (Saarland University), Jannik Nau* (Saarland University), Martin Schmitz (University of Koblenz), Jürgen Steimle (Saarland University)},
url = {https://hci.cs.uni-saarland.de, website
https://www.linkedin.com/company/saarhcilab/, lab's linkedin
https://www.linkedin.com/in/marie-muehlhaus-21b2b8202/, author's linkedin
https://www.linkedin.com/in/jannik-nau/, author's linkedin},
doi = {10.1145/3772318.3791179},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Exoskeletons are increasingly deployed in real-world contexts, where communicating critical system states or unexpected events is important for effective interaction. Haptic feedback offers a direct communication channel, integrating naturally with the actuated body region. Yet, it remains unclear how well haptic feedback is perceived while the body is being actuated. In a controlled study (N=24) with a shoulder exoskeleton, we compare four common haptic notification channels (poking, proprioceptive, thermal, vibrotactile) under different levels of actuation. Results show that poking was detected fastest, while thermal and proprioceptive notifications were most accurate and noticeable. Actuation levels affected error rates and noticeability, but not response times. Participants reported that thermal notifications aligned best with the actuation levels, producing a distinct sensation that blended naturally with movement. In contrast, proprioceptive notifications conveyed the strongest sense of urgency. We discuss design implications for leveraging haptic notifications to support embodied communication with exoskeletons.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Helping Johnny Make Sense of Privacy Policies with LLMs
Vincent Freiberger (ScaDS.AI, Leipzig University), Arthur Fleig (ScaDS.AI, Leipzig University), Erik Buchmann (ScaDS.AI, Leipzig University)
Abstract | Tags: Papers | Links:
@inproceedings{Freiberger2026HelpingJohnny,
title = {Helping Johnny Make Sense of Privacy Policies with LLMs},
author = {Vincent Freiberger (ScaDS.AI, Leipzig University), Arthur Fleig (ScaDS.AI, Leipzig University), Erik Buchmann (ScaDS.AI, Leipzig University)},
url = {https://scads.ai/about-us/junior-research-groups/cyber-physical-autonomous-systems-cyphy/, website
https://de.linkedin.com/in/vincent-freiberger-2510b0153, author's linkedin},
doi = {10.1145/3772318.3791465},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Understanding and engaging with privacy policies is crucial for online privacy, yet these documents remain notoriously complex and difficult to navigate. We present PRISMe, an interactive browser extension that combines LLM-based policy assessment with a dashboard and customizable chat interface, enabling users to skim quick overviews or explore policy details in depth while browsing. We conduct a user study (N=22) with participants of diverse privacy knowledge to investigate how users interpret the tool's explanations and how it shapes their engagement with privacy policies, identifying distinct interaction patterns. Participants valued the clear overviews and conversational depth, but flagged some issues, particularly adversarial robustness and hallucination risks. Thus, we investigate how a retrieval-augmented generation (RAG) approach can alleviate issues by re-running the chat queries from the study. Our findings surface design challenges as well as technical trade-offs, contributing actionable insights for developing future user-centered, trustworthy privacy policy analysis tools.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Human-AI-UI Interactions Across Modalities
Kewen Peng (University of Utah, United States), Jeffrey Nichols (Apple Inc., United States), Christof Lutteroth (University of Bath, United Kingdom), Tiffany Knearem (MBZUAI, United Arab Emirates), Felix Kretzer (Karlsruhe Institute of Technology (KIT), Germany). Jeffrey Bigham (Carnegie Mellon University & Apple Inc., United States), Alexander Maedche (Karlsruhe Institute of Technology (KIT), Germany), Yue Jiang (University of Utah, United States)
Abstract | Tags: Workshops | Links:
@inproceedings{Peng2026HumanaiuiInteractions,
title = {Human-AI-UI Interactions Across Modalities},
author = {Kewen Peng (University of Utah, United States), Jeffrey Nichols (Apple Inc., United States), Christof Lutteroth (University of Bath, United Kingdom), Tiffany Knearem (MBZUAI, United Arab Emirates), Felix Kretzer (Karlsruhe Institute of Technology (KIT), Germany). Jeffrey Bigham (Carnegie Mellon University & Apple Inc., United States), Alexander Maedche (Karlsruhe Institute of Technology (KIT), Germany), Yue Jiang (University of Utah, United States)},
url = {https://h-lab.win.kit.edu/, website
https://www.linkedin.com/company/68838007/, lab's linkedin},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Designing and developing user-friendly interfaces has long been a cornerstone of HCI research, yet today we are at a turning point where UIs are no longer designed solely for humans but also for intelligent agents that act on users’ behalf, while UIs are also expanding beyond 2D screens into extended reality environments with inherently multimodal characteristics, together challenging us to rethink the role of the UI as a mediator of human–AI interaction. This workshop will explore how UI agents bridge human intent and system behavior by interpreting multimodal inputs and generating adaptive outputs across surfaces from screens to extended reality (XR), and we will examine not only their technical capabilities but also their broader impact, including how agents reshape daily workflows, how bidirectional alignment between human and AI activity can be achieved, and how generative models may transform UI creation. XR provides a compelling testbed for these questions and highlights challenges around accuracy, efficiency, transparency, accessibility, and user agency, setting the stage for the next generation of intelligent and adaptive UIs.},
keywords = {Workshops},
pubstate = {published},
tppubtype = {inproceedings}
}
Human-Centered Explainable AI (HCXAI): Re-examining XAI in the Era of Agentic AI
Upol Ehsan (Khoury College of Computer Sciences, Northeastern University , Boston, Massachusetts, United States), Amal Alabdulkarim (Georgia Institute of Technology, Atlanta, Georgia, United States), Kenneth Holstein (Human-Computer Interaction Institute, Carnegie Mellon University, Pittsburgh, Pennsylvania, United States), Min Kyung Lee (School of Information, University of Texas at Austin, Austin, Texas, United States), Andreas Riener (Human-Computer Interaction Group, Technische Hochschule Ingolstadt, Ingolstadt, Bavaria, Germany), Justin D. Weisz (IBM Research, Yorktown Heights, New York, United States)
Abstract | Tags: Workshops | Links:
@inproceedings{Ehsan2026HumancenteredExplainable,
title = {Human-Centered Explainable AI (HCXAI): Re-examining XAI in the Era of Agentic AI},
author = {Upol Ehsan (Khoury College of Computer Sciences, Northeastern University , Boston, Massachusetts, United States), Amal Alabdulkarim (Georgia Institute of Technology, Atlanta, Georgia, United States), Kenneth Holstein (Human-Computer Interaction Institute, Carnegie Mellon University, Pittsburgh, Pennsylvania, United States), Min Kyung Lee (School of Information, University of Texas at Austin, Austin, Texas, United States), Andreas Riener (Human-Computer Interaction Group, Technische Hochschule Ingolstadt, Ingolstadt, Bavaria, Germany), Justin D. Weisz (IBM Research, Yorktown Heights, New York, United States)},
url = {https://hcig.thi.de/, website
https://www.linkedin.com/in/andreas-riener-19233710/, author's linkedin},
doi = {10.1145/3772363.3778728},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Making AI explainable requires more than algorithmic transparency: it demands understanding who needs explanations and why. In our sixth CHI workshop on Human-Centered XAI (HCXAI), we shift focus to agentic AI systems. LLM-based agents foundationally challenge existing explainability paradigms. Unlike traditional AI that produces single outputs, agents plan multi-step strategies, invoke tools with real-world consequences, and coordinate with other systems; yet current XAI approaches fail to address these complexities. Users need to understand not just what an agent might do, but the cascade of actions it could trigger, the risks involved, and why responses take time. Even our expanded HCXAI frameworks struggle with these new demands. Through our workshop series, we have built a strong community making important conceptual, methodological, and technical impact. This year, we re-examine what human-centered explainable AI means in the agentic era, bringing together researchers and practitioners to shape explainability for both users and developers of these systems.},
keywords = {Workshops},
pubstate = {published},
tppubtype = {inproceedings}
}
I Felt Like I Need to Fit in Someone Else’s Body - Understanding Body-Centered UX Design for Online Fashion Shopping
Margarita Osipova (Bauhaus-Universität Weimar), Urszula Kulon (Bauhaus-Universität Weimar), Adithi Mahesh (Bauhaus-Universität Weimar), Olesia Kirillova (independent), Marion Koelle (Hochschule RheinMain), Eva Hornecker (Bauhaus-Universität Weimar)
Abstract | Tags: Papers | Links:
@inproceedings{Osipova2026FeltLike,
title = {I Felt Like I Need to Fit in Someone Else’s Body - Understanding Body-Centered UX Design for Online Fashion Shopping},
author = {Margarita Osipova (Bauhaus-Universität Weimar), Urszula Kulon (Bauhaus-Universität Weimar), Adithi Mahesh (Bauhaus-Universität Weimar), Olesia Kirillova (independent), Marion Koelle (Hochschule RheinMain), Eva Hornecker (Bauhaus-Universität Weimar)},
url = {https://www.uni-weimar.de/en/media/chairs/computer-science-department/human-computer-interaction/, website
https://www.linkedin.com/in/eva-hornecker-8b34983/, lab's linkedin},
doi = {10.1145/3772318.3791225},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Decades of online fashion retail and investment in its usability have led to a seemingly refined user experience. Yet, our study shows that female online shoppers, who make up the largest user group, experience a conflicted love-hate relationship when shopping online. Adopting a feminist HCI perspective, we contribute insights from a multi-step qualitative approach involving probes, co-design, iterative prototyping and body maps. We demonstrate that even screen-based website designs are deeply entangled with users’ embodied experiences. Through our analysis, we identify where such designs contribute to heightened emotional labour and negative user experiences. Our work offers concrete design implications centred around inclusivity, the predictive user experience of wearing and caring for garments, and transparency of information. We embody these implications in an interactive prototype and use it to validate our recommendations for a body-centred approach to UX design.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Investigating the Effects of Eco-Friendly Service Options on Rebound Behavior in Ride-Hailing
Albin Zeqiri (Institute of Media Informatics, Ulm University), Michael Rietzler (Institute of Media Informatics, Ulm University),, Enrico Rukzio (Institute of Media Informatics, Ulm University)
Abstract | Tags: Papers | Links:
@inproceedings{Zeqiri2026InvestigatingEffectsb,
title = {Investigating the Effects of Eco-Friendly Service Options on Rebound Behavior in Ride-Hailing},
author = {Albin Zeqiri (Institute of Media Informatics, Ulm University), Michael Rietzler (Institute of Media Informatics, Ulm University), and Enrico Rukzio (Institute of Media Informatics, Ulm University)},
url = {https://www.linkedin.com/in/albinzeq/, author's linkedin
https://az16.github.io/, social media},
doi = {10.1145/3772318.3790711},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Eco-friendly service options (EFSOs) aim to reduce personal carbon emissions, yet their eco-friendly framing may permit increased consumption, weakening their intended impact. Such rebound effects remain underexamined in HCI, including how common eco-feedback approaches shape them. We investigate this in an online within-subjects experiment (N=75) in a ride-hailing context. Participants completed 10 trials for five conditions (No EFSO, EFSO - Minimal, EFSO - CO2 Equivalency, EFSO - Gamified, EFSO - Social), yielding 50 choices between walking and ride-hailing for trips ranging from 0.5mi - 2.0mi (≈ 0.80km - 3.22km). We measured how different EFSO variants affected ride-hailing uptake relative to a No EFSO baseline. EFSOs lacking explicit eco-feedback metrics increased ride-hailing uptake, and qualitative responses indicate that EFSOs can make convenience-driven choices more permissible. We conclude with implications for designing EFSOs that begin to take rebound effects into account.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
It Shouldn’t Be This Difficult: Researcher Perspectives on Diversity and Inclusion in Usable Privacy and Security Research
Priyasha Chatterjee (Max Plank Institute for Security, Privacy), Smirity Kaushik (University of Illinois at Urbana-Champaign), Karola Marky (Ruhr University Bochum), Yixin Zou (Max Plank Institute for Security, Privacy)
Abstract | Tags: Papers | Links:
@inproceedings{Chatterjee2026ItShouldnt,
title = {It Shouldn’t Be This Difficult: Researcher Perspectives on Diversity and Inclusion in Usable Privacy and Security Research},
author = {Priyasha Chatterjee (Max Plank Institute for Security and Privacy), Smirity Kaushik (University of Illinois at Urbana-Champaign), Karola Marky (Ruhr University Bochum), Yixin Zou (Max Plank Institute for Security and Privacy)},
url = {https://yixinzou.github.io/group/, website
https://de.linkedin.com/in/priyashachatterjee, author's linkedin},
doi = {10.1145/3772318.3791487},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {While recent usable privacy and security (UPS) research has made progress in moving beyond “the average user,” a systematic account of how UPS researchers navigate diversity and inclusion in their work remains lacking. Through 20 in-depth semi-structured interviews with experienced researchers, we examine how and why they recruit diverse, underserved populations in their work, as well as the challenges they face in doing so, including conceptual difficulties in defining who is underserved, limited access to target populations, and inflexible peer review and publishing norms. Participants also reflected on their own positionality when planning and conducting studies, often expressing uncertainty about how to account for and articulate their positionality. We identify strategies researchers use to overcome challenges and highlight areas where collective action from the research community and institutions is needed to foster greater inclusion in UPS research practices.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Mediating Social Interaction in Public Spaces – Co-Design of Robotic Street Furniture with Adolescents
Judith Dörrenbächer (University of Siegen), Tuan Vu Pham (University of Siegen, Honda Research Institute Europe GmbH), Thomas Weisswange (Honda Research Institute Europe GmbH), Alarith Uhde (Ritsumeikan University, uhde@fc.ritsumei.ac.jp), Anna Hoch (University of Siegen), Marc Hassenzahl (University of Siegen),
Abstract | Tags: Papers | Links:
@inproceedings{Doerrenbaecher2026MediatingSocial,
title = {Mediating Social Interaction in Public Spaces – Co-Design of Robotic Street Furniture with Adolescents},
author = {Judith Dörrenbächer (University of Siegen), Tuan Vu Pham (University of Siegen and Honda Research Institute Europe GmbH), Thomas Weisswange (Honda Research Institute Europe GmbH), Alarith Uhde (Ritsumeikan University, uhde@fc.ritsumei.ac.jp), Anna Hoch (University of Siegen), Marc Hassenzahl (University of Siegen),},
url = {https://www.experienceandinteraction.com, website},
doi = {10.1145/3772318.3791189},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {With the ongoing shift into the digital, spontaneous social encounters in public spaces are becoming challenging for adolescents. This study explores how robotic street furniture could facilitate meaningful adolescent social interaction. In a focus group and a theater-based co-design workshop, fourteen adolescents envisioned and enacted ten speculative concepts, such as roaming benches that invite serendipitous meetings. An analysis of these concepts identified diverse roles for robots (e.g., icebreaker, scapegoat) and revealed their particular social strengths and weaknesses (e.g., objective yet insistent). These insights were condensed into eight design suggestions, such as designing robots to orchestrate coincidences or framing them as opponents that humans can team up against. We suggest that robots can facilitate adolescents’ social interaction in public spaces, particularly due to certain social strengths inherent in the machinic nature of a robot.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Meme, Myself and AR: Exploring Memes Sharing in Face-to-face Conversation using Augmented Reality
Yanni Mei (TU Darmstadt), Samuel Wendt (TU Darmstadt), Florian Müller (TU Darmstadt), Jan Gugenheimer (TU Darmstadt)
Abstract | Tags: Papers | Links:
@inproceedings{Mei2026MemeMyself,
title = {Meme, Myself and AR: Exploring Memes Sharing in Face-to-face Conversation using Augmented Reality},
author = {Yanni Mei (TU Darmstadt), Samuel Wendt (TU Darmstadt), Florian Müller (TU Darmstadt), Jan Gugenheimer (TU Darmstadt)},
url = {https://www.informatik.tu-darmstadt.de/hci/hci_tuda/index.en.jsp, website},
doi = {10.1145/3772318.3791255},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Internet memes are central to online communication, yet their visual humor is often lost in face-to-face (F2F) conversations. Augmented reality (AR) offers new ways to bring memes into F2F interactions, but it is unclear how memes can be integrated into F2F conversations using AR, and how they impact conversational dynamics. We surveyed meme users (N=29) to understand motivations and challenges in visualising memes in F2F conversations. With these insights, we developed an AR meme-sharing prototype and invited 12 pairs of friends to design AR visualizations for their memes and use them in conversations. Our analysis reveals two AR-unique visualizations: merging memes with one's body (The-Meme-On-Me) and situating oneself in meme environment (Me-In-The-Meme). We observed two integration patterns: using speech as setup before a meme punchline, and showing memes simultaneously with speech to amplify humor. We report users’ reactions toward AR memes, showing how it enables playful social interaction.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
MIRAGE: Enabling Real-Time Automotive Mediated Reality
Pascal Jansen (Institute of Media Informatics, Ulm University), Julian Britten (Institute of Media Informatics, Ulm University), Mark Colley (UCL Interaction Centre), Markus Sasalovici (Institute of Media Informatics, Ulm University), Enrico Rukzio (Institute of Media Informatics, Ulm University)
Abstract | Tags: Papers | Links:
@inproceedings{Jansen2026Mirage,
title = {MIRAGE: Enabling Real-Time Automotive Mediated Reality},
author = {Pascal Jansen (Institute of Media Informatics, Ulm University), Julian Britten (Institute of Media Informatics, Ulm University), Mark Colley (UCL Interaction Centre), Markus Sasalovici (Institute of Media Informatics, Ulm University), Enrico Rukzio (Institute of Media Informatics, Ulm University)},
url = {https://www.youtube.com/watch?v=MMBduUx9ZG4, full video
https://www.linkedin.com/in/pascal-jansen-/, author's linkedin},
doi = {10.1145/3772318.3791195},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Traffic is inherently dangerous, with around 1.19 million fatalities annually. Automotive Mediated Reality (AMR) can enhance driving safety by overlaying critical information (e.g., outlines, icons, text) on key objects to improve awareness, altering objects' appearance to simplify traffic situations, and diminishing their appearance to minimize distractions. However, real-world AMR evaluation remains limited due to technical challenges. To fill this sim-to-real gap, we present MIRAGE, an open-source tool that enables real-time AMR in real vehicles. MIRAGE implements 15 effects across the AMR spectrum of augmented, diminished, and modified reality using state-of-the-art computational models for object detection and segmentation, depth estimation, and inpainting. In an on-road expert user study (N=9) of MIRAGE, participants enjoyed the AMR experience while pointing out technical limitations and identifying use cases for AMR. We discuss these results in relation to prior work and outline implications for AMR ethics and interaction design.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
MultiBand: Adding Multi-Touch to the Smartwatch Wristband for Extended Interaction
David Petersen (Technische Hochschule Köln), Marvin Reuter (Technische Hochschule Köln), Matthias Böhmer (Technische Hochschule Köln)
Abstract | Tags: Posters | Links:
@inproceedings{Petersen2026Multiband,
title = {MultiBand: Adding Multi-Touch to the Smartwatch Wristband for Extended Interaction},
author = {David Petersen (Technische Hochschule Köln), Marvin Reuter (Technische Hochschule Köln), Matthias Böhmer (Technische Hochschule Köln)},
url = {https://moxd.io/, website
https://www.instagram.com/moxdlab/, instagram},
doi = {10.1145/3772363.3799304},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {The small screen size of smartwatches presents input challenges due to the limited touch surface and screen occlusion. To expand the input space and mitigate the fat finger problem, extensive research has explored various strategies for improving smartwatch interaction design. While wristband-based input has also been studied, there is a lack of research on multi-touch interaction and gestures performed directly on the band. To address this gap, we present MultiBand, a functional prototype that expands smartwatch input capabilities by leveraging capacitive touch sensors around the wristband. Our prototype enables users to execute different functions on a smartwatch based on how they place their fingers on the wristband. Our implementation distinguishes between two types of finger interactions to trigger different scrolling techniques when navigating a contact list. We contribute the software and hardware of our prototype as well as first insights from preliminary user tests.},
keywords = {Posters},
pubstate = {published},
tppubtype = {inproceedings}
}
Navigating Postpartum: Exploring Lived and Professional Perspectives to Inform Supportive Technology Design
Sophie Grimme (OFFIS – Institute for Information Technology), Alice Benedetti (OFFIS – Institute for Information Technology), Susanna Spoerl (OFFIS – Institute for Information Technology), Susanne Boll (University Oldenburg), Marion Koelle (Hochschule RheinMain)
Abstract | Tags: Papers | Links:
@inproceedings{Grimme2026NavigatingPostpartum,
title = {Navigating Postpartum: Exploring Lived and Professional Perspectives to Inform Supportive Technology Design},
author = {Sophie Grimme (OFFIS – Institute for Information Technology), Alice Benedetti (OFFIS – Institute for Information Technology), Susanna Spoerl (OFFIS – Institute for Information Technology), Susanne Boll (University Oldenburg), Marion Koelle (Hochschule RheinMain)},
url = {https://hci.uni-oldenburg.de/, website},
doi = {10.1145/3772318.3791707},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Childbirth is a significant life transition involving physical recovery, emotional adjustment, and caring for a newborn. This period exposes parents to postpartum challenges, including emotional difficulties, social isolation, and overwhelming adjustments that can lead to depression or anxiety. Despite the prevalence of postpartum challenges, research and support systems remain insufficient. To explore how technology could address these challenges, we combined professional and lived perspectives. Through a mixed-methods approach with midwives, social workers, and affected parents, we conducted interviews (N=8), collected experience reports (N=52), and used these insights to inform four participatory workshops (N=15). By using zines, self-curated booklets - for expression and reflection, participants articulated challenges, ideal circumstances, and imagined support tools. We identified five challenge areas that technology should address through a Reflexive Thematic Analysis (RTA). Our work contributes empirically grounded perspectives on postpartum challenges, design recommendations for supportive technologies, and considerations for designing technologies during challenging life transitions.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
No Spirituality Please, We’re HCI: Challenges for HCI Research on Religion and Spirituality
Sara Wolf (Psychologische Ergonomie, JMU Würzburg), Paula Friedrich (Psychologie intelligenter interaktiver Systeme, JMU Würzburg), Elizabeth Buie (Independent Researcher), Mark Blythe (School of Design, Northumbria University)
Abstract | Tags: Papers | Links:
@inproceedings{Wolf2026NoSpirituality,
title = {No Spirituality Please, We’re HCI: Challenges for HCI Research on Religion and Spirituality},
author = {Sara Wolf (Psychologische Ergonomie, JMU Würzburg), Paula Friedrich (Psychologie intelligenter interaktiver Systeme, JMU Würzburg), Elizabeth Buie (Independent Researcher), Mark Blythe (School of Design, Northumbria University)},
url = {https://www.mcm.uni-wuerzburg.de/psyergo, website},
doi = {10.1145/3772318.3790490},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Religion and spirituality (R/S) shape billions of lives, yet they remain marginal in Human–Computer Interaction (HCI) research. Prior literature reviews mapped fragments of this space but missed key contributions and the lived realities of its researchers. We extend this picture through a review of 206 ACM and IEEE publications and a survey of R/S scholars in HCI (n=19). Our analysis shows a field in transition: Research on R/S is growing slightly in volume and diversity, with design-oriented work emerging as the dominant form of engagement. Yet the ACM and IEEE corpora remain largely separate, reflecting distinct epistemic traditions. Researchers report persistent challenges, including marginalization, exposing a deeper tension in HCI: While HCI claims to center the full range of human experience, R/S experience is still treated with suspicion. Our findings call for a reconsideration: If HCI is serious about human experience, it must take R/S experience seriously as well.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Open Challenges of Immersive AI-based Remembrance Systems using the Example of Interactive Digital Testimonies
Daniel Kolb (Leibniz Supercomputing Centre), Fabian Heindl (Ludwig-Maximilians–Universität München), Markus Gloe (Ludwig-Maximilians–Universität München), Dieter Kranzlmüller (Ludwig-Maximilians–Universität München)
Abstract | Tags: Posters | Links:
@inproceedings{Kolb2026OpenChallenges,
title = {Open Challenges of Immersive AI-based Remembrance Systems using the Example of Interactive Digital Testimonies},
author = {Daniel Kolb (Leibniz Supercomputing Centre), Fabian Heindl (Ludwig-Maximilians–Universität München), Markus Gloe (Ludwig-Maximilians–Universität München), Dieter Kranzlmüller (Ludwig-Maximilians–Universität München)},
url = {https://www.lrz.de/en/technologies/virtual-reality, website
https://www.linkedin.com/in/daniel-kolb-999628129/, author's linkedin},
doi = {10.1145/3772363.3799277},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Immersive AI-based remembrance systems offer users various ways to interact with recordings or recreations of living and deceased individuals. This includes simulated face-to-face conversations based on authentic recordings, such as in Interactive Digital Testimonies. Their potential application ranges from supporting family members during times of grief to educating learners on historical events. Using this example, we shed light on nine as-of-yet unresolved fundamental challenges of immersive AI-based remembrance systems. Given the complexity and relativity of these challenges, we emphasize the need for concerted research across a broad range of scientific and humanities disciplines.},
keywords = {Posters},
pubstate = {published},
tppubtype = {inproceedings}
}
Out of Emergency: How Doctors Navigate Jurisdictional Seams in Emergency Care Referrals
Aloha Hufana Ambe (The University of Queensland, Australia), Isaac Salisbury (The University of Queensland, Australia), Tobias Grundgeiger (Julius-Maximilians-Universität Würzburg, Germany), Daniel Bodnar (Royal Brisbane, Women's Hospital, Australia), Sean Rothwell (Royal Brisbane, Women's Hospital, Australia), Dr Nathan Brown (Royal Brisbane, Women's Hospital, Australia), Ben Matthews (The University of Queensland, Australia)
Honorable MentionAbstract | Tags: Honorable Mention, Papers | Links:
@inproceedings{Ambe2026OutEmergency,
title = {Out of Emergency: How Doctors Navigate Jurisdictional Seams in Emergency Care Referrals},
author = {Aloha Hufana Ambe (The University of Queensland, Australia), Isaac Salisbury (The University of Queensland, Australia), Tobias Grundgeiger (Julius-Maximilians-Universität Würzburg, Germany), Daniel Bodnar (Royal Brisbane and Women's Hospital, Australia), Sean Rothwell (Royal Brisbane and Women's Hospital, Australia), Dr Nathan Brown (Royal Brisbane and Women's Hospital, Australia), Ben Matthews (The University of Queensland, Australia)},
url = {https://www.mcm.uni-wuerzburg.de/psyergo/, website},
doi = {10.1145/3772318.3790619},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Referrals from the emergency department (ED) to inpatient teams are routine but high-stakes interactions, yet little is known about how they are accomplished in practice. Prior work often treats referrals as information transfer and emphasises structural tensions between departments, paying limited attention to the interactional detail of referral calls. To address this gap, we draw on a year of ethnographic fieldwork that includes seventeen recorded referral calls in a metropolitan ED. We show how clinicians manage jurisdictional seams through fine-grained conversational moves, and identify navigation strategies: tentative framing, preference-sensitive questioning, implicit acknowledgement of boundaries, offers of assistance and calibrated displays of competence. We demonstrate how the patient’s case is not treated as a fixed record but is reshaped in talk to align with different specialties, operating as a dynamic object. These insights extend accounts of boundary work and contribute to the design of supportive referral tools and training practices.},
keywords = {Honorable Mention, Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Privacy & Safety Challenges of On-Body Interaction Techniques
Dañiel Gerhardt (CISPA Helmholtz Center for Information Security, Saarbrücken, Germany), Divyanshu Bhardwaj (CISPA Helmholtz Center for Information Security, Saarbrücken, Germany), Ashwin Ram (Saarland University, Saarland Informatics Campus, Saarbrücken, Germany), André Zenner (Saarland University, Saarland Informatics Campus, Saarbrücken, Germany, German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus, Saarbrücken, Germany), Jürgen Steimle (Saarland University, Saarland Informatics Campus, Saarbrücken, Germany), Katharina Krombholz (CISPA Helmholtz Center for Information Security, Saarbrücken, Germany)
Abstract | Tags: Papers | Links:
@bachelorthesis{Gerhardt2026Privacy,
title = {Privacy & Safety Challenges of On-Body Interaction Techniques},
author = {Dañiel Gerhardt (CISPA Helmholtz Center for Information Security, Saarbrücken, Germany), Divyanshu Bhardwaj (CISPA Helmholtz Center for Information Security, Saarbrücken, Germany), Ashwin Ram (Saarland University, Saarland Informatics Campus, Saarbrücken, Germany), André Zenner (Saarland University, Saarland Informatics Campus, Saarbrücken, Germany, German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus, Saarbrücken, Germany), Jürgen Steimle (Saarland University, Saarland Informatics Campus, Saarbrücken, Germany), Katharina Krombholz (CISPA Helmholtz Center for Information Security, Saarbrücken, Germany)},
url = {https://cispa.de/en/research/groups/krombholz, website},
doi = {10.1145/3772318.3790403},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {On-body computing systems offer new forms of interaction, but while they are increasingly integrated into everyday contexts, their unique privacy and safety challenges remain understudied. This paper examines these challenges through a two-round interview study with N = 15 experts in human-computer interaction and privacy and safety, using speculative scenarios and adversarial personas to elicit insights. Our findings reveal risks specific to on-body interactions, including overcollection of sensitive data, unwanted inferences, harm to bystanders, and threats to bodily autonomy and psychological well-being. Importantly, in the on-body context, privacy and safety concerns are deeply interconnected and cannot be addressed in isolation. We contribute an empirically grounded characterization of these entangled challenges and derive eight actionable design guidelines to support safer, more privacy-aware, on-body systems. This work informs future research and design in ubiquitous computing by highlighting the need for proactive and integrated approaches to privacy and safety in trustworthy on-body computing.},
keywords = {Papers},
pubstate = {published},
tppubtype = {bachelorthesis}
}
Reactive Writers: How Co-Writing with AI Changes How We Engage with Ideas
Advait Bhat (University of Washington), Marianne Aubin Le Quéré (Princeton University), Mor Naaman (Cornell Tech), Maurice Jakesch (Bauhaus-Universität Weimar)
Abstract | Tags: Papers | Links:
@inproceedings{Bhat2026ReactiveWriters,
title = {Reactive Writers: How Co-Writing with AI Changes How We Engage with Ideas},
author = {Advait Bhat (University of Washington), Marianne Aubin Le Quéré (Princeton University), Mor Naaman (Cornell Tech), Maurice Jakesch (Bauhaus-Universität Weimar)},
url = {https://www.csslab.net, website},
doi = {10.1145/3772318.3791529},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Emerging experimental evidence shows that writing with AI assistance can change both the views people express in writing and the opinions they hold afterwards. Yet, we lack substantive understanding of procedural and behavioral changes in co-writing with AI that underlie the observed opinion-shaping power of AI writing tools. We conducted a mixed-methods study, combining a retrospective analysis of AI-cowriting with 19 participants with a quantitative analysis tracing the engagement with ideas and opinions in 1,291 AI-cowriting-sessions. Our analysis shows that engaging with the AI's suggestions–reading them and deciding whether to accept them–becomes a central activity in the writing process, taking away from more traditional processes of ideation and language generation. As writers often do not complete their own ideation before engaging with suggestions, the suggested ideas and opinions seeded directions that writers then elaborated. At the same time, writers did not notice the AI's influence and felt in full control of their writing, as they–in principle–always could edit the final text. We term this shift textitReactive Writing: an evaluation-first, suggestion-led writing practice that departs substantially from conventional composing in the presence of AI assistance and is highly vulnerable to AI-induced biases and opinion shifts.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Reflecting on 1,000 Social Media Journeys: Generational Patterns in Platform Transition
Artur Solomonik (Center for Advanced Internet Studies (CAIS)), Nicolas Ruiz (Center for Advanced Internet Studies (CAIS)), Hendrik Heuer (Center for Advanced Internet Studies (CAIS), Universität Wuppertal)
Abstract | Tags: Papers | Links:
@inproceedings{Solomonik2026Reflecting1000,
title = {Reflecting on 1,000 Social Media Journeys: Generational Patterns in Platform Transition},
author = {Artur Solomonik (Center for Advanced Internet Studies (CAIS)), Nicolas Ruiz (Center for Advanced Internet Studies (CAIS)), Hendrik Heuer (Center for Advanced Internet Studies (CAIS), Universität Wuppertal)},
url = {https://www.cais-research.de/en/research-program-design-of-trustworthy-artificial-intelligence/, website
https://de.linkedin.com/in/artur-solomonik-86131824a, author's linkedin},
doi = {10.1145/3772318.3791413},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Social media has billions of users, but we still do not fully understand why users prefer one platform over another. Establishing new platforms among already popular competitors is difficult. Prior research has richly documented people's experiences within individual platforms, yet situating those experiences within the entirety of a user's social media experience remains challenging. What platforms have people used, and why have they transitioned between them? We collected data from a quota-based sample of 1,000 U.S. participants. We introduce the concept of emphSocial Media Journeys to study the entirety of their social media experiences systematically. We identify push and pull factors across the social media landscape. We also show how different generations adopted social media platforms based on personal needs. With this work, we advance HCI by moving towards holistic perspectives when discussing social media technology, offering new insights for platform design, governance, and regulation.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Reflective Motion and a Physical Canvas: Exploring Embodied Journaling in Virtual Reality
Michael Yin (University of British Columbia), Robert Xiao (University of British Columbia), Nadine Wagener (OFFIS)
Abstract | Tags: Papers | Links:
@publication{Yin2026ReflectiveMotion,
title = {Reflective Motion and a Physical Canvas: Exploring Embodied Journaling in Virtual Reality},
author = {Michael Yin (University of British Columbia), Robert Xiao (University of British Columbia), Nadine Wagener (OFFIS)},
url = {https://www.offis.de/anwendungen/gesellschaft/personal-pervasive-computing.html, website https://www.linkedin.com/company/offis-institute-for-information-technology/posts/?feedView=all, lab's linkedin
https://www.linkedin.com/in/nadinewagener/, author's linkedin},
doi = {10.1145/3772318.3790486},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {In traditional journaling practices, authors express and process their thoughts by writing them down. We propose a somaesthetic-inspired alternative that uses the human body, rather than written words, as the medium of expression. We coin this embodied journaling, as people's isolated body movements and spoken words become the canvas of reflection. We implemented embodied journaling in virtual reality and conducted a within-subject user study (n=20) to explore the emergent behaviours from the process, comparing its expressive and reflective qualities to those of written journaling. When writing-based norms and affordances were absent, we found that participants defaulted towards unfiltered emotional expression, often forgoing words altogether. Rather, subconscious body motion and paralinguistic acoustic qualities unveiled deeper, sometimes hidden feelings, prompting reflection that happens after emotional expression rather than during it. We discuss both the capabilities and pitfalls of embodied journaling, ultimately challenging the idea that reflection culminates in linguistic reasoning.},
keywords = {Papers},
pubstate = {published},
tppubtype = {publication}
}
Responsible Trauma Research: Designing Effective and Sustainable Virtual Reality Exposure Studies
Annalisa Degenhard (Ulm University), Sophia Ppali (CYENS Centre of Excellence), Fotis Liarokapis (CYENS Centre of Excellence), Enrico Rukzio (Ulm University), Stefan Tschöke (Clinicfor Psychiatry, Psychotherapy I (Weissenau), Ulm University), Jennifer Spohrs (Department of Psychiatry, Psychotherapy, Psychotraumatology, Military Medical Centre; Department for Child, Adolescent Psychiatry, Psychotherapy, Ulm University Medical Centre)
Abstract | Tags: Papers | Links:
@inproceedings{Degenhard2026ResponsibleTrauma,
title = {Responsible Trauma Research: Designing Effective and Sustainable Virtual Reality Exposure Studies},
author = {Annalisa Degenhard (Ulm University), Sophia Ppali (CYENS Centre of Excellence), Fotis Liarokapis (CYENS Centre of Excellence), Enrico Rukzio (Ulm University), Stefan Tschöke (Clinicfor Psychiatry and Psychotherapy I (Weissenau), Ulm University), Jennifer Spohrs (Department of Psychiatry, Psychotherapy and Psychotraumatology, Military Medical Centre; Department for Child and Adolescent Psychiatry and Psychotherapy, Ulm University Medical Centre)},
url = {https://www.uni-ulm.de/en/in/mi/, website
https://youtu.be/r7dHvdqlNu4, full video
https://www.linkedin.com/in/annalisa-degenhard-7950241b4, author's social media},
doi = {10.1145/3772318.3791964},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Virtual reality exposure therapy (VRET) enables controlled exposure to trauma-related stimulito facilitate memory access and emotional processing. However, the fi eld remainsunderexplored for complex post-traumatic stress disorder (C-PTSD). Unlike single-traumaPTSD, C-PTSD requires highly individualized triggers that are diffi cult to identify andimplement safely. We conducted a feasibility study with 11 patients, two trauma therapists,and a VR developer to explore integrating VRET into C-PTSD treatment while safeguarding allstakeholders. Initial fi ndings indicate that simple objects can be just as eff ective as complexscenes, therapeutic success does not correlate with VR presence levels, and the designprocess itself became integral to therapy rather than preparatory. However, involvingdevelopers in therapy sessions led to considerable emotional stress and role confusion, whichrequired a cautious approach. Based on these insights, we provide methodologicalrecommendations for safe and patient-centered VRET studies that balance therapeuticeff ectiveness with stakeholder safety across the research process.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Rest Assured: Detecting Mental Fatigue and Recovery with EEG Headphones
Lukas Schick (KIT), Emilia Frey (KIT), Felix Putze (University Bremen), Michael T. Knierim (KIT, University of Nottingham)
Abstract | Tags: Papers | Links:
@inproceedings{Schick2026RestAssured,
title = {Rest Assured: Detecting Mental Fatigue and Recovery with EEG Headphones},
author = {Lukas Schick (KIT), Emilia Frey (KIT), Felix Putze (University Bremen), Michael T. Knierim (KIT, University of Nottingham)},
url = {https://www.linkedin.com/in/dr-michael-knierim-13397881/, author's linkedin},
doi = {10.1145/3772318.3791781},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Mental fatigue, a common consequence of cognitively demanding work, impairs concentration and well-being, posing long-term health risks. Distinct from drowsiness, mental fatigue is reliably measured with EEG, yet conventional setups remain too cumbersome for everyday use. To overcome this barrier, this study investigates whether EEG headphones can detect mental fatigue and recovery across two common digital break activities: playing a video game and browsing social media. We conducted an experiment with consecutive task sessions and an intermittent break, collecting self-report, performance, and EEG data. Our results show that EEG headphones can detect mental fatigue and recovery dynamics via relative alpha power, and differentiate recovery effects between break types. Social media proved more restorative than gaming, with effects persisting into the subsequent task. These findings establish needed working principles for using headphone-EEG in naturalistic fatigue and recovery research, providing a foundation for future studies.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Rethinking Interdependence in HCI: A Systematic Literature Review for Understanding its Use in Accessibility Studies
Zeynep Yildiz (Karlsruhe Institute of Technology), Kathrin Gerling (Karlsruhe Institute of Technology)
Abstract | Tags: Papers | Links:
@inproceedings{Yildiz2026RethinkingInterdependence,
title = {Rethinking Interdependence in HCI: A Systematic Literature Review for Understanding its Use in Accessibility Studies},
author = {Zeynep Yildiz (Karlsruhe Institute of Technology), Kathrin Gerling (Karlsruhe Institute of Technology)},
url = {https://hci.iar.kit.edu, website
https://www.linkedin.com/in/zeynep-şölen-yıldız-33037b103/, author's linkedin},
doi = {10.1145/3772318.3790600},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Interdependence has long been a core concept in Disability Studies and activism, offering a critical response to dominant ideals of independence. While Bennett et al.’s work introduced interdependence into accessibility research in HCI by linking it with research and design practices, the extent to which HCI has meaningfully engaged with the theoretical and political roots of the concept remains unclear. In this literature review, we systematically analyze 70 HCI accessibility papers that engage with the concept of interdependence. Guided by the PRISMA framework, we investigate how interdependence is conceptualized and applied in HCI, identifying strengths and shortcomings of current conceptualizations. Our findings reveal that interdependence is used across a range of use cases that broaden its scope, but that integration remains partial and fragmented, often disconnected from its origins in Disability Studies and activism. We conclude by calling for a more meaningful integration of interdependence into HCI accessibility research.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Sensing What Surveys Miss: Understanding and Personalizing Proactive LLM Support by User Modeling
Ailin Liu (LMU Munich, Munich, Germany; Munich Center for Machine Learning (MCML), Munich, Germany),, Yesmine Karoui (LMU Munich, Munich, Germany),, Fiona Draxler (University of Mannheim, Mannheim, Germany),, Frauke Kreuter (LMU Munich, Munich, Germany; University of Maryland, College Park, Maryland, United States),, Francesco Chiossi (LMU Munich, Munich, Germany)
Abstract | Tags: Papers | Links:
@inproceedings{Liu2026SensingWhat,
title = {Sensing What Surveys Miss: Understanding and Personalizing Proactive LLM Support by User Modeling},
author = {Ailin Liu (LMU Munich, Munich, Germany; Munich Center for Machine Learning (MCML), Munich, Germany),, Yesmine Karoui (LMU Munich, Munich, Germany),, Fiona Draxler (University of Mannheim, Mannheim, Germany),, Frauke Kreuter (LMU Munich, Munich, Germany; University of Maryland, College Park, Maryland, United States),, Francesco Chiossi (LMU Munich, Munich, Germany)},
url = {https://www.stat.lmu.de/soda/en/, website},
doi = {10.1145/3772318.3791191},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Difficulty spillover and suboptimal help-seeking challenge the sequential, knowledge-intensive nature of digital tasks. In online surveys, tough questions can drain mental energy and hurt performance on later questions, while users often fail to recognize when they need assistance or may satisfy, lacking motivation to seek help. We developed a proactive, adaptive system using electrodermal activity and mouse movement to predict when respondents need support. Personalized classifiers with a rule-based threshold adaptation trigger timely LLM-based clarifications and explanations. In a within-subjects study (N=32), aligned-adaptive timing was compared to misaligned-adaptive and random-adaptive controls. Aligned-adaptive assistance improved response accuracy by 21%, reduced false negative rates from 50.9% to 22.9%, and improved perceived efficiency, dependability, and benevolence. Properly timed interventions prevent cascades of degraded responses, showing that aligning support with cognitive states improves both the outcomes and the user experience. This enables more effective, personalized LLM-assisted support in education, healthcare, and survey-based research.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Sensing Your Vocals: Exploring the Activity of Vocal Cord Muscles for Pitch Assessment Using Electromyography and Ultrasonography
Kanyu Chen (Keio University), Rebecca Panskus (Ruhr University Bochum), Erwin Wu (Institute of Science Tokyo, Keio University), Yichen Peng (Institute of Science Tokyo), Daichi Saito (Institute of Science Tokyo), Emiko Kamiyama (Keio University) Ruiteng Li (Waseda University), Chen-Chieh Liao (Institute of Science Tokyo), Karola Marky (Ruhr University Bochum), Kato Akira (Keio University), Hideki Koike (Institute of Science Tokyo), Kai Kunze (Keio University),
Abstract | Tags: Papers | Links:
@inproceedings{Chen2026SensingYour,
title = {Sensing Your Vocals: Exploring the Activity of Vocal Cord Muscles for Pitch Assessment Using Electromyography and Ultrasonography},
author = {Kanyu Chen (Keio University), Rebecca Panskus (Ruhr University Bochum), Erwin Wu (Institute of Science Tokyo, Keio University), Yichen Peng (Institute of Science Tokyo), Daichi Saito (Institute of Science Tokyo), Emiko Kamiyama (Keio University) Ruiteng Li (Waseda University), Chen-Chieh Liao (Institute of Science Tokyo), Karola Marky (Ruhr University Bochum), Kato Akira (Keio University), Hideki Koike (Institute of Science Tokyo), Kai Kunze (Keio University),},
doi = {10.1145/3772318.3790965},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Vocal training is difficult because the muscles that control pitch, resonance, and phonation are internal and invisible to learners. This paper investigates how Electromyography (EMG) and ultrasonic imaging (UI) can make these muscles observable for training purposes. We report three studies. First, we analyze the EMG and UI data from 16 singers (beginners, experienced & professionals), revealing differences among three vocal groups of the muscle control proficiency. Second, we use the collected data to create a system that visualizes an expert's muscle activity as reference. This system is tested in a user study with 12 novices, showing that EMG highlighted muscle activation nuances, while UI provided insights into vocal cord length and dynamics. Third, to compare our approach to traditional methods (audio analysis and coach instructions), we conducted a focus group study with 15 experienced singers. Our results suggest that EMG is promising for improving vocal skill development and enhancing feedback systems. We conclude the paper with a detailed comparison of the analyzed modalities (EMG, UI and traditional methods), resulting in recommendations to improve vocal muscle training systems.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
ShadAR: LLM-driven shader generation to transform visual perception in Augmented Reality
Yanni Mei (TU Darmstadt), Samuel Wendt (TU Darmstadt), Jonas Wombacher (TU Darmstadt), Florian Müller (TU Darmstadt), Jan Gugenheimer (TU Darmstadt)
Abstract | Tags: Interactive Demos | Links:
@inproceedings{Mei2026Shadar,
title = {ShadAR: LLM-driven shader generation to transform visual perception in Augmented Reality},
author = {Yanni Mei (TU Darmstadt), Samuel Wendt (TU Darmstadt), Jonas Wombacher (TU Darmstadt), Florian Müller (TU Darmstadt), Jan Gugenheimer (TU Darmstadt)},
url = {https://www.informatik.tu-darmstadt.de/hci/hci_tuda/index.en.jsp, website},
doi = {10.1145/3772363.3799378},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Augmented Reality (AR) can visually transform a user's world by rendering virtual content on top of reality. However, developing such AR apps and visualizations remains a complex process that requires an understanding of computer vision and programming skills. We present ShadAR, an AR prototyping pipeline that enables real-time creation of small AR visualizations and applications using large language models (LLMs) and object detection. ShadAR allows users to express their visual intent (e.g., pixelate every person around me) via natural language, which is interpreted by an LLM to generate corresponding shader code. This shader is then compiled in real-time and applied to the passthrough video stream.},
keywords = {Interactive Demos},
pubstate = {published},
tppubtype = {inproceedings}
}
Show Me How to Play: Exploring Self-Modeling for Onboarding in Virtual Reality Exergames
Sukran Karaosmanoglu (Human-Computer Interaction, Universität Hamburg), Silas Ueberschaer (Human-Computer Interaction, Universität Hamburg), Sebastian Cmentowski (Industrial Design, Eindhoven University of Technology), Frank Steinicke (Human-Computer Interaction, Universität Hamburg)
Honorable MentionAbstract | Tags: Honorable Mention, Papers | Links:
@inproceedings{Karaosmanoglu2026ShowMe,
title = {Show Me How to Play: Exploring Self-Modeling for Onboarding in Virtual Reality Exergames},
author = {Sukran Karaosmanoglu (Human-Computer Interaction, Universität Hamburg), Silas Ueberschaer (Human-Computer Interaction, Universität Hamburg), Sebastian Cmentowski (Industrial Design, Eindhoven University of Technology), Frank Steinicke (Human-Computer Interaction, Universität Hamburg)},
url = {https://www.inf.uni-hamburg.de/en/inst/ab/hci.html, website
https://www.linkedin.com/in/frank-steinicke-b239639/, lab's linkedin
https://www.linkedin.com/in/sukran-karaosmanoglu/, author's linkedin},
doi = {10.1145/3772318.3790333},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Exergames combine motivating game elements with bodily movement to encourage physical activity. However, onboarding players to perform correct movements remains a challenge, especially in virtual reality (VR) environments where safety and performance are critical. Drawing inspiration from sports training and learning sciences, we contrast two onboarding approaches: (i) trial-and-error and (ii) observational learning via a novel self-model tutorial. In this tutorial, players temporarily lose agency and observe their own avatar performing the movements, leveraging VR’s unique affordances for embodied experiences. To explore which of these two approaches yields a better performance and player experience, we conducted a between-participants study (N=60), comparing them against a baseline condition without a tutorial. Our findings show that the self-model tutorial not only improves players' performance but also increases the perceived ease of control and progress feedback. We discuss tradeoffs and implications for the design of future onboarding experiences in VR exergames.},
keywords = {Honorable Mention, Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Small Talk, Big Impact: The Role of Everyday Conversations in Cybersecurity Practices
Doruntina Murtezaj (University of the Bundeswehr Munich/LMU Munich), Leonard Johannes Rössert (LMU Munich), Yomna Abdelrahman (University of the Bundeswehr Munich), Viktorija Paneva (LMU Munich), Florian Alt (LMU Munich/University of the Bundeswehr Munich)
Abstract | Tags: Papers | Links:
@inproceedings{Murtezaj2026SmallTalk,
title = {Small Talk, Big Impact: The Role of Everyday Conversations in Cybersecurity Practices},
author = {Doruntina Murtezaj (University of the Bundeswehr Munich/LMU Munich), Leonard Johannes Rössert (LMU Munich), Yomna Abdelrahman (University of the Bundeswehr Munich), Viktorija Paneva (LMU Munich), Florian Alt (LMU Munich/University of the Bundeswehr Munich)},
url = {https://www.medien.ifi.lmu.de/, website
https://de.linkedin.com/company/lmu-media-informatics-group, lab's linkedin
https://www.linkedin.com/in/doruntina-murtezaj-3334691a4/, author's linkedin},
doi = {10.1145/3772318.3791412},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Everyday talk is often treated as casual chatter, yet it plays a crucial role in how people acquire and share knowledge. Typically, cybersecurity practices are informed by formal training, but they often overlook the impact of social exchanges. This paper investigates how informal conversations can act as a socio-technical mechanism for shaping cybersecurity awareness and practices. We conducted an online survey (N=215) where participants described recent discussions about cybersecurity, including who was involved, where they took place, and what triggered them. Quantitative and thematic analysis revealed common contexts, social settings, and topics. Most conversations occurred spontaneously in private environments, with personal experiences being the most frequent trigger. We contribute empirical insights on informal security conversations to inform the design of human-centered technologies that surface and mediate security-related discussions in everyday contexts, to ensure implicit and continuous security awareness.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Speaking Through Chatbots or Text: How Format Shapes Information Agreement, Reactance , Environmental Awareness, and Trust
Yuri Hwang (Human Computer Interaction, University of Siegen), Vera Maria Fahrner (Transport Research, German Aerospace Center & Department of Psychology, University of Siegen) , Kai Horstmann (Department of Psychology, University of Siegen), Marc Hassenzahl (Ubiquitous Design / Experience & Interaction, University of Siegen)
Abstract | Tags: Papers | Links:
@inproceedings{Hwang2026SpeakingThrough,
title = {Speaking Through Chatbots or Text: How Format Shapes Information Agreement, Reactance , Environmental Awareness, and Trust},
author = {Yuri Hwang (Human Computer Interaction, University of Siegen), Vera Maria Fahrner (Transport Research, German Aerospace Center & Department of Psychology, University of Siegen) , Kai Horstmann (Department of Psychology, University of Siegen), Marc Hassenzahl (Ubiquitous Design / Experience & Interaction, University of Siegen)},
url = {https://www.experienceandinteraction.com/, website
https://www.linkedin.com/in/yuri-hwang/, author's linkedin},
doi = {10.1145/3772318.3790737},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Recent advances in large language models suggest that conversational agents (CAs) equipped with environmental knowledge are a promising way to promote environmental awareness. However, so far it remains unclear whether information provided by a CA outperforms static text in increasing agreement, decreasing reactance, fostering environmental awareness and trust. In this preregistered, multi-week, repeated-measures online intervention (N = 449), we varied information format (CA, text) and information valence (positive, negative, neutral) (between-subjects). Participants interacted with the CA over four consecutive weeks. Information delivered by the CA led to higher agreement and lower reactance regardless of the valence of the information and time point. Environmental awareness increased over time, especially for participants with low initial environmental awareness, but these increases were independent of format and valence. Trust in the CA also increased over time in negative and neutral valence, but not in positive.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Take the Power Back: Screen-Based Personal Moderation Against Hate Speech on Instagram
Anna Ricarda Luther (Institut for Information Management GmbH, University of Bremen), Hendrik Heuer (Center for Advanced Internet Studies (CAIS)), Stephanie Geise (Centre for Media, Communication, Information, Research (ZeMKI), University of Bremen), Sebastian Haunss (Research Center on Inequality and, Social Policy (SOCIUM), University of Bremen), Andreas Breiter (Institute for Information Management Bremen GmbH, University of Bremen)
Abstract | Tags: Papers | Links:
@inproceedings{Luther2026TakePower,
title = {Take the Power Back: Screen-Based Personal Moderation Against Hate Speech on Instagram},
author = {Anna Ricarda Luther (Institut for Information Management GmbH, University of Bremen), Hendrik Heuer (Center for Advanced Internet Studies (CAIS)), Stephanie Geise (Centre for Media, Communication and Information, Research (ZeMKI), University of Bremen), Sebastian Haunss (Research Center on Inequality and, Social Policy (SOCIUM), University of Bremen), Andreas Breiter (Institute for Information Management Bremen GmbH, University of Bremen)},
url = {https://www.linkedin.com/in/anna-ricarda-luther/, author's linkedin
https://www.linkedin.com/in/hendrikheuer/, author's linkedin
https://hci.social/@hen_drik, hci.social
https://bsky.app/profile/hheuer.bsky.social, bluesky},
doi = {10.1145/3772318.3790748},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Hate speech remains a pressing challenge on social media, where platform moderation often fails to protect targeted users. Personal moderation tools that let users decide how content is filtered can address some of these shortcomings. However, it remains an open question on which screens (e.g., the comments, the reels tab, or the home feed) users want personal moderation and which features they value most. To address these gaps, we conducted a three-wave Delphi study with 40 activists who experienced hate speech. We combined quantitative ratings and rankings with open questions about required features. Participants prioritized personal moderation for conversational and algorithmically curated screens. They valued features allowing for reversibility and oversight across screens, while input-based, content-type specific, and highly automated features are more screen specific. We discuss the importance of personal moderation and offer user-centered design recommendations for personal moderation on Instagram.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
TARDIS: Tabletop Augmented Reality for Dynamic Immersive Storytelling
Paul Preuschoff (RWTH Aachen University), René Schäfer (RWTH Aachen University), Phillip Ahlers (RWTH Aachen University). David Gilbert (RWTH Aachen University), Jan Borchers (RWTH Aachen University)
Abstract | Tags: Posters | Links:
@inproceedings{Preuschoff2026Tardis,
title = {TARDIS: Tabletop Augmented Reality for Dynamic Immersive Storytelling},
author = {Paul Preuschoff (RWTH Aachen University), René Schäfer (RWTH Aachen University), Phillip Ahlers (RWTH Aachen University). David Gilbert (RWTH Aachen University), Jan Borchers (RWTH Aachen University)},
url = {hci.ac, website
https://www.linkedin.com/in/paul-preuschoff/, author's linkedin},
doi = {10.1145/3772363.379929},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {In tabletop role-playing games, players experience a shared story, coordinated by a game master. This relies heavily on immersion, social interaction, and creative freedom. We explore how VR can increase immersion without undermining these other qualities. We placed players into a CAVE VR system to display virtual environments (VEs) on the walls and floor without requiring glasses that might impede social interaction. We varied how closely VEs textitmatched the game setting described verbally, from reflecting its general atmosphere to being true to details, and investigated impact on immersion, distraction, creativity, and role-play. Players feel more connected to their characters when seeing what their characters would see, but abstract, atmospheric VEs led to fewer problematic divergences and more creative freedom. Surprisingly, medium matching levels were often criticized because players could trust neither what they saw nor their ``cinema of the mind''. Our findings help integrate VR into shared collocated storytelling.},
keywords = {Posters},
pubstate = {published},
tppubtype = {inproceedings}
}
The AI Accomplice: Exploring Generative Artificial Intelligence in Facilitating and Amplifying Deceptive Designs
Thomas Kosch (HU Berlin), Veronika Krauß (HS Ansbach), Christopher Katins (HU Berlin), Dominik Schön (TU Darmstadt), Mark McGill (University of Glasgow), Jan Gugenheimer (TU Darmstadt)
Abstract | Tags: Workshops | Links:
@inproceedings{Kosch2026AiAccomplice,
title = {The AI Accomplice: Exploring Generative Artificial Intelligence in Facilitating and Amplifying Deceptive Designs},
author = {Thomas Kosch (HU Berlin), Veronika Krauß (HS Ansbach), Christopher Katins (HU Berlin), Dominik Schön (TU Darmstadt), Mark McGill (University of Glasgow), Jan Gugenheimer (TU Darmstadt)},
url = {https://hcistudio.org, website},
doi = {10.1145/3772363.3778770},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {As generative Artificial Intelligence (AI) becomes increasingly embedded and utilized for digital design, it presents both opportunities and risks. One major concern is its potential to facilitate and incorporate deceptive design patterns into computing technologies, which could manipulate or mislead users to their disadvantage. Similar to the concept of precedent-based design, a common approach in design theory that suggests reapplying previous design solutions to similar or identical problems, generative AI can integrate deceptive design patterns included in the training data a model has seen before. Our workshop explores how generative AI suggests and enacts deceptive design patterns in digital design. The goal of the workshop is to explore the ethical challenges of utilizing generative AI models and develop strategies to detect or prevent manipulative practices, thereby creating more transparent and equitable AI-generated experiences.},
keywords = {Workshops},
pubstate = {published},
tppubtype = {inproceedings}
}
The AI Memory Gap: Users Misremember What They Created With AI or Without
Tim Zindulka (University of Bayreuth), Sven Goller (University of Bayreuth), Daniela Fernandes (Aalto University), Robin Welsch (Aalto University), Daniel Buschek (University of Bayreuth),
Abstract | Tags: Papers | Links:
@inproceedings{Zindulka2026AiMemory,
title = {The AI Memory Gap: Users Misremember What They Created With AI or Without},
author = {Tim Zindulka (University of Bayreuth), Sven Goller (University of Bayreuth), Daniela Fernandes (Aalto University), Robin Welsch (Aalto University), Daniel Buschek (University of Bayreuth),},
url = {https://www.hciai.uni-bayreuth.de, website},
doi = {10.1145/3772318.3791494},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {As large language models (LLMs) become embedded in interactive text generation, disclosure of AI as a source depends on people remembering which ideas or texts came from themselves and which were created with AI. We investigate how accurately people remember the source of content when using AI. In a pre-registered experiment, 184 participants generated and elaborated on ideas both unaided and with an LLM-based chatbot. One week later, they were asked to identify the source (noAI vs withAI) of these ideas and texts. Our findings reveal a significant gap in memory: After AI use, the odds of correct attribution dropped, with the steepest decline in mixed human-AI workflows, where either the idea or elaboration was created with AI. We validated our results using a computational model of source memory. Discussing broader implications, we highlight the importance of considering source confusion in the design and use of interactive text generation technologies.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
The Challenge to Design for Relatedness Experiences: An Explorative Investigation of Five Relatedness Technologies from a Psychological Needs Perspective
Angelina Krupp (LMU Munich), Daniel Ullrich (LMU Munich), Sarah Diefenbach (LMU Munich)
Abstract | Tags: Papers | Links:
@inproceedings{Krupp2026ChallengeDesign,
title = {The Challenge to Design for Relatedness Experiences: An Explorative Investigation of Five Relatedness Technologies from a Psychological Needs Perspective},
author = {Angelina Krupp (LMU Munich), Daniel Ullrich (LMU Munich), Sarah Diefenbach (LMU Munich)},
url = {www.linkedin.com/in/angelina-krupp-94380a211, author's social media},
doi = {10.1145/3772318.3790727},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {So-called relatedness technologies aim to create relatedness experiences between people over distance. Typically, such technologies focus on implicit or expressive interaction, as opposed to the explicit, information-focused interaction of conventional communication technologies. Based on psychological theory, previous research has identified different design strategies for relatedness technologies such as awareness, expressivity, or gift giving. However, despite this profound theoretical understanding, designing for a fulfilling relatedness experience remains a challenging task and often conflicts with other psychological needs, such as autonomy or security. This research explores the specific potentials and barriers to the use and acceptance of relatedness technologies. Based on a comparative evaluation of five different relatedness concepts in an online study (N = 221) combining quantitative and qualitative data, we identified overarching patterns of promising design strategies for particular user groups and revealed overall need fulfillment as a central predictor of the intention to use the technology.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
The Role of Personality of Conversational Virtual Avatars on Proxemic Behaviour during Indoor Navigation
Rishab Bhattacharyya (TU Berlin), Wassim Al Shami (TU Berlin), Ceenu George (TU Berlin)
Abstract | Tags: Papers | Links:
@inproceedings{Bhattacharyya2026RolePersonality,
title = {The Role of Personality of Conversational Virtual Avatars on Proxemic Behaviour during Indoor Navigation},
author = {Rishab Bhattacharyya (TU Berlin), Wassim Al Shami (TU Berlin), Ceenu George (TU Berlin)},
url = {https://www.hci.tu-berlin.de/#/, website},
doi = {10.1145/3772318.3791241},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {As LLM-based Conversational Avatars increasingly act as collaborators in hybrid indoor navigation, understanding how their personality traits influence human-avatar proxemic behavior is becoming crucial. Prior work has largely examined personality effects in static or one-sided interactions such as sitting, standing, or approaching. However, there is a gap in research on how avatar personality and motion-related factors (e.g., walking speed) shape proxemics when both the human and avatar are in motion. To address this, we developed an AR indoor navigation system featuring a Conversational Virtual Avatar (CVA) with three distinct personalities: Dominant, Warm, and Conscientious. The CVA guides users to destinations within the environment. In a between-subjects study (𝑁=27), we found statistically significant effects of avatar personality and walking speed on proxemic behavior. Our work contributes to a broader understanding of the role of personality and walking speed of a CVA on human-avatar proxemic behaviour during navigation.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
The Work to Make Soldiers Work: Civilian Engagement in Support of the Ukrainian Army
Volker Wulf (Information Systems, New Media, Siegen, Germany), Margarita Grinko (Information Systems, New Media, Siegen, Germany), Parvin Ghadamighalandari (Information Systems, New Media, University of Siegen, Siegen, Germany), Dave Randall (Information Systems, New Media, Siegen, Germany)
Abstract | Tags: Papers | Links:
@inproceedings{Wulf2026WorkMake,
title = {The Work to Make Soldiers Work: Civilian Engagement in Support of the Ukrainian Army},
author = {Volker Wulf (Information Systems and New Media, Siegen, Germany), Margarita Grinko (Information Systems and New Media, Siegen, Germany), Parvin Ghadamighalandari (Information Systems and New Media, University of Siegen, Siegen, Germany), Dave Randall (Information Systems and New Media, Siegen, Germany)},
url = {https://www.wineme.uni-siegen.de/team/wulf/, website},
doi = {10.1145/3772318.3791850},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Since February 2022 the Ukrainian army has been resisting the Russian invasion; this defense would have been less effective without material and logistical support from civil society. While HCI and CSCW have examined ICT in crises, little addresses how civilian–military cooperation is enacted in interstate war and how civilians appropriate commercial technologies into military infrastructures. We report an interview study with observations (N=13) conducted in Lviv during the first five months of the full-scale invasion (Feb–Jul 2022). Our findings show how civilians performed largely invisible work to make soldiers’ work possible: they circumvented broken supply chains, fundraised through digital micro-donation tools, re-engineered commercial drones and software into command-and-control workflows, and joined early cyber and counter-information operations. We contribute to CSCW by theorizing this civilian engagement as wartime infrastructuring and appropriation under extreme risk, and by detailing methodological implications for conducting cooperative-work research in hybrid-war settings.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Through the Looking-Glass: AI-Mediated Video Communication Reduces Trust and Confidence in Judgement
Nelson Navajas Fernández (Bauhaus-Universität Weimar, Weimar, Germany), Jeff Hancock (Department of Communication, Stanford University, Stanford, California, United States), Maurice Jakesch (Bauhaus-Universität Weimar, Weimar, Germany)
Abstract | Tags: Papers | Links:
@inproceedings{Fernndez2026ThroughLookingGlass,
title = {Through the Looking-Glass: AI-Mediated Video Communication Reduces Trust and Confidence in Judgement},
author = {Nelson Navajas Fernández (Bauhaus-Universität Weimar, Weimar, Germany), Jeff Hancock (Department of Communication, Stanford University, Stanford, California, United States), Maurice Jakesch (Bauhaus-Universität Weimar, Weimar, Germany)},
url = {https://csslab.net, website
https://www.linkedin.com/in/nelson-navajas/, author's linkedin
https://bsky.app/profile/nelocanelo.bsky.social, bluesky},
doi = {10.1145/3772318.3790845},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {AI-based tools that mediate, enhance or generate parts of video communication may interfere with how people evaluate trustworthiness and credibility. In two preregistered online experiments (N = 2,000), we examined whether AI-mediated video retouching, background replacement and avatars affect interpersonal trust, people's ability to detect lies and confidence in their judgments. Participants watched short videos of speakers making truthful or deceptive statements across three conditions with varying levels of AI mediation. We observed that perceived trust and confidence in judgments declined in AI-mediated videos, particularly in settings in which some participants used avatars while others did not. However, participants' actual judgment accuracy remained unchanged, and they were no more inclined to suspect those using AI tools of lying. Our findings provide evidence against concerns that AI mediation undermines people's ability to distinguish truth from lies, and against cue-based accounts of lie detection more generally. They highlight the importance of trustworthy AI mediation tools in contexts where not only truth, but also trust and confidence matter.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Too Many Zombies: Exploring Challenges and Motivations for (Not) Deleting Unused Online Accounts
Franziska Bumiller (Friedrich-Alexander-Universität Erlangen-Nürnberg, LMU Munich), Sarah Delgado Rodriguez (University of the Bundeswehr Munich, LMU Munich), Lukas Mecke (LMU Munich), Verena Distler (Aalto University), Florian Alt (LMU Munich)
Abstract | Tags: Papers | Links:
@inproceedings{Bumiller2026TooMany,
title = {Too Many Zombies: Exploring Challenges and Motivations for (Not) Deleting Unused Online Accounts},
author = {Franziska Bumiller (Friedrich-Alexander-Universität Erlangen-Nürnberg, LMU Munich), Sarah Delgado Rodriguez (University of the Bundeswehr Munich, LMU Munich), Lukas Mecke (LMU Munich), Verena Distler (Aalto University), Florian Alt (LMU Munich)},
url = {https://www.medien.ifi.lmu.de/, website
https://www.linkedin.com/company/lmu-media-informatics-group/, lab's linkedin
https://www.linkedin.com/company/usable-security-and-privacy-group-unibwm/, lab's linkedin
https://www.instagram.com/mediagroup.lmu, instagram},
doi = {10.1145/3772318.3790497},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Unused online accounts (“zombie accounts”) pose avoidable privacy and security risks by retaining personal data that may be exposed in breaches. Yet, little is known about when and how to effectively prompt users to delete them. This work investigates the challenges users encounter when attempting to delete zombie accounts. We conducted two online studies with U.S. participants via Prolific: the accounts study (N = 120) to identify common zombie account categories, and the challenges study (N = 100) to examine users’ motivations, perceived abilities, and preferred moments for deletion. Participants reported high self-efficacy but underestimated the number of zombie accounts they had. We identify promising opportune moments — such as when updating account information or setting up a new device — and evaluate potential triggers, including breach notifications and data sensitivity. This work contributes an empirical characterization of end-users' diverse challenges related to zombie accounts and design recommendations for future deletion-support tools.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
User-reconfigured Haptics: Combining User-Reconfiguration and Visual Manipulations to Enhance Dynamic Passive Haptic Experiences for VR
Xinrong Wang (Saarland University, Saarland Informatics Campus (DFKI), Saarbrücken, Germany, xiwa05@dfki.de), Yu Jiang (Saarland University, Saarland Informatics Campus, Saarbrücken, Germany, yjiang@cs.uni-saarland.de), Martin Schmitz (University of Koblenz, Koblenz, Germany), Jürgen Steimle (Saarland University, Saarland Informatics Campus, Saarbrücken, Germany, yjiang@cs.uni-saarland.de), Antonio Krueger (Saarland Informatics Campus, DFKI, Saarbrücken, Germany, antonio.krueger@dfki.de), Donald Degraen (HIT Lab NZ, University of Canterbury, Christchurch, New Zealand, )
Abstract | Tags: Papers | Links:
@inproceedings{Wang2026UserreconfiguredHaptics,
title = {User-reconfigured Haptics: Combining User-Reconfiguration and Visual Manipulations to Enhance Dynamic Passive Haptic Experiences for VR},
author = {Xinrong Wang (Saarland University, Saarland Informatics Campus (DFKI), Saarbrücken, Germany, xiwa05@dfki.de), Yu Jiang (Saarland University, Saarland Informatics Campus, Saarbrücken, Germany, yjiang@cs.uni-saarland.de), Martin Schmitz (University of Koblenz, Koblenz, Germany), Jürgen Steimle (Saarland University, Saarland Informatics Campus, Saarbrücken, Germany, yjiang@cs.uni-saarland.de), Antonio Krueger (Saarland Informatics Campus, DFKI, Saarbrücken, Germany, antonio.krueger@dfki.de), Donald Degraen (HIT Lab NZ, University of Canterbury, Christchurch, New Zealand, )},
url = {https://umtl.cs.uni-saarland.de/, website
https://www.linkedin.com/feed/, author's linkedin},
doi = {10.1145/3772318.3793333},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Virtual Reality (VR) depends on haptic feedback to create immersive experiences. Traditional passive proxies align physical props with their virtual counterparts but remain limited in scalability and expressiveness, or require bulky actuators to support reconfiguration. We introduce User-reconfigured Haptics, an approach that utilizes implicit user actions to reconfigure haptic interfaces to extend the gamut of VR haptic experiences. Modular 3D-printed cells are assembled into dynamic interfaces that express diverse haptic properties such as softness and weight. By masking physical reconfigurations with visual (re)mapping, user actions unnoticeably change haptic properties, resulting in user-driven, dynamic haptic experiences. User studies show that our design can provide distinguishable haptic experiences and is perceived as realistic and enjoyable in a VR task. We further showcase four applications: a fishing rod that changes weight and flexibility, a dynamic desktop of pressable buttons, a glove with adjustable squeezing, and a crossbow with variable pulling resistance.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Using Digital Twins to Design and Evaluate Interactive Exhibitions: A Case Study with Handheld AR
Jonathan Wieland (HCI Group, University of Konstanz, Germany), Daniel Immanuel Fink (HCI Group, University of Konstanz, Germany), Anke V. Reinschluessel (HCI Group, University of Konstanz, Germany), Jonathan Häßler (HCI Group, University of Konstanz, Germany), Tiare Feuchtner (HCI Group, University of Konstanz, Germany), Harald Reiterer (HCI Group, University of Konstanz, Germany)
Abstract | Tags: Papers | Links:
@inproceedings{Wieland2026UsingDigital,
title = {Using Digital Twins to Design and Evaluate Interactive Exhibitions: A Case Study with Handheld AR},
author = {Jonathan Wieland (HCI Group, University of Konstanz, Germany), Daniel Immanuel Fink (HCI Group, University of Konstanz, Germany), Anke V. Reinschluessel (HCI Group, University of Konstanz, Germany), Jonathan Häßler (HCI Group, University of Konstanz, Germany), Tiare Feuchtner (HCI Group, University of Konstanz, Germany), Harald Reiterer (HCI Group, University of Konstanz, Germany)},
url = {https://hci.uni-konstanz.de/, website https://youtu.be/GoZzRKFBy28?si=CiNwLdGqPouSMdNS, full video
https://www.linkedin.com/company/105489067, lab's linkedin
www.linkedin.com/in/jonathan-wieland-kn, author's social media},
doi = {10.1145/3772318.3790852},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Contemporary museum experiences often incorporate digital media through modern technologies, such as handheld augmented reality (AR). However, these often fall short of providing a holistic visitor experience, as exhibits are still thought, designed, and experienced in isolation and fail to consider the user's contexts (e.g., physical, social, and personal). To address this issue, we investigate leveraging a digital twin for designing and evaluating interactive exhibitions in large connected spaces through a case study: our publicly available handheld AR-based exhibition "Stayin' Alive". During this exhibition, we gained insights from the interaction data of 1303 visitors, post-visit interviews, as well as rich experiences and observations, based on which we identify four opportunity-challenge pairs that contribute design process insights for practitioners and a road map for future research.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
When Handwriting Goes Social: Creativity, Anonymity, and Communication in Graphonymous Online Spaces
Aditya Kumar Purohit ( CAIS ), Hendrik Heuer (CAIS)
Abstract | Tags: Papers | Links:
@inproceedings{Purohit2026WhenHandwriting,
title = {When Handwriting Goes Social: Creativity, Anonymity, and Communication in Graphonymous Online Spaces},
author = {Aditya Kumar Purohit ( CAIS ), Hendrik Heuer (CAIS)},
url = {https://www.linkedin.com/in/adityakumarpurohit/, author's linkedin},
doi = {10.1145/3772318.3790828},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {While most digital communication platforms rely on text, relatively little research has examined how users engage through handwriting and drawing in anonymous, collaborative environments. We introduce Graphonymous Interaction, a form of communication where users interact anonymously via handwriting and drawing. Our study analyzed over 600 canvas pages from the Graphonymous Online Space (GOS) CollaNote and conducted interviews with 20 users. Additionally, we examined 70 minutes of real-time GOS sessions using Conversation Analysis and Multimodal Discourse Analysis. Findings reveal that Graphonymous Interaction fosters artistic expression, intellectual engagement, sharing and supporting, and social connection. Notably, anonymity coexisted with moments of recognition through graphological identification. Distinct conversational strategies also emerged, enabling smoother exchanges and fewer conversational repairs compared to text-based communication. This study contributes to understanding Graphonymous Interaction and Online Spaces, offering insights into designing platforms that support creative and socially engaging forms of communication beyond text.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
When Play Hurts: Understanding Common Barriers in Movement-Based Games
Sebastian Cmentowski (Industrial Design, Eindhoven University of Technology), Sukran Karaosmanoglu (Human-Computer Interaction, Universität Hamburg), Frank Steinicke (Human-Computer Interaction, Universität Hamburg), Regina Bernhaupt (Industrial Design, Eindhoven University of Technology)
Abstract | Tags: Papers | Links:
@inproceedings{Cmentowski2026WhenPlay,
title = {When Play Hurts: Understanding Common Barriers in Movement-Based Games},
author = {Sebastian Cmentowski (Industrial Design, Eindhoven University of Technology), Sukran Karaosmanoglu (Human-Computer Interaction, Universität Hamburg), Frank Steinicke (Human-Computer Interaction, Universität Hamburg), Regina Bernhaupt (Industrial Design, Eindhoven University of Technology)},
url = {https://www.inf.uni-hamburg.de/en/inst/ab/hci.html, website
https://www.linkedin.com/in/frank-steinicke-b239639/, lab's linkedin
https://www.linkedin.com/in/sukran-karaosmanoglu/, author's linkedin},
doi = {10.1145/3772318.3790905},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Exergames promise enjoyable physical activity through gameplay, yet players often face barriers that undermine engagement, safety, and retention. To date, knowledge about which barriers are encountered by end-users of commercial exergames and which mitigation strategies are used is limited. To address this gap, we conducted an online survey with 174 participants and provide a comprehensive organization of 60 reported barriers across six categories: physical, mental, social, environmental, technological, and game design. Key barriers include space limitations, social discomfort, addictive gameplay, and injuries. Our analysis reveals that while players try to mitigate barriers through ad-hoc strategies, issues like embarrassment, addiction, and harassment remain difficult to overcome. These findings highlight the need for more adaptive game designs, including dynamic spatial adjustments, personalized pacing mechanisms, and supportive social features. This work advances the understanding of exergame barriers and their impact and offers actionable insights for designing more inclusive and resilient movement-based games.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}
Who Explains Privacy Policies to Me? Embodied and Textual LLM-Powered Privacy Assistants in Virtual Reality
Vincent Freiberger (ScaDS.AI), Moritz Dresch (LMU Munich), Florian Alt (LMU Munich), Arthur Fleig (ScaDS.AI), Viktorija Paneva (LMU Munich)
Abstract | Tags: Posters | Links:
@inproceedings{Freiberger2026WhoExplains,
title = {Who Explains Privacy Policies to Me? Embodied and Textual LLM-Powered Privacy Assistants in Virtual Reality},
author = {Vincent Freiberger (ScaDS.AI), Moritz Dresch (LMU Munich), Florian Alt (LMU Munich), Arthur Fleig (ScaDS.AI), Viktorija Paneva (LMU Munich)},
url = {http://www.medien.ifi.lmu.de/, website
https://www.linkedin.com/company/lmu-media-informatics-group/, lab's linkedin
https://www.linkedin.com/in/viktorija-paneva-hci/, author's linkedin},
doi = {10.1145/3772363.3798567},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Virtual Reality (VR) systems collect fine-grained behavioral and biometric data, yet privacy policies are rarely read or understood due to their complex language, length, and poor integration into users’ interaction workflows. To lower the barrier to informed consent at the point of choice, we explore a Large Language Model (LLM)-powered privacy assistant embedded into a VR app store to support privacy-aware app selection. The assistant is realized in two interaction modes: a text-based chat interface and an embodied virtual avatar providing spoken explanations. We report on an exploratory within-subjects study (𝑁 = 21) in which participants browsed VR productivity applications under unassisted and assisted conditions. Our findings suggest that both interaction modes support more deliberate engagement with privacy information and decision-making, with privacy scores primarily functioning as a veto mechanism rather than a primary selection driver. The impact of embodied interaction varied between participants, while textual interaction supported reflective review.},
keywords = {Posters},
pubstate = {published},
tppubtype = {inproceedings}
}
Why Johnny Checks but Doesn’t Alert: Reporting as the Missing Step in Verifiable Internet Voting
Tobias Hilt (SECUSO, Karlsruhe Institute of Technology), Christian Mack (SECUSO, Karlsruhe Institute of Technology), Benjamin Maximilian Berens (SECUSO, Karlsruhe Institute of Technology), Melanie Volkamer (SECUSO, Karlsruhe Institute of Technology)
Abstract | Tags: Papers | Links:
@inproceedings{Hilt2026WhyJohnnyb,
title = {Why Johnny Checks but Doesn’t Alert: Reporting as the Missing Step in Verifiable Internet Voting},
author = {Tobias Hilt (SECUSO, Karlsruhe Institute of Technology), Christian Mack (SECUSO, Karlsruhe Institute of Technology), Benjamin Maximilian Berens (SECUSO, Karlsruhe Institute of Technology), Melanie Volkamer (SECUSO, Karlsruhe Institute of Technology)},
url = {https://secuso.aifb.kit.edu/, website https://www.linkedin.com/company/secuso-research-group/posts/?feedView=all, lab's linkedin
https://www.linkedin.com/in/tobias-hilt-800b9924b/, author's linkedin},
doi = {10.1145/3772318.3791035},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {End-to-end verifiable Internet voting promises that voters can remotely check whether their ballot was recorded correctly and that all ballots were tallied as cast. However, in order to achieve an adequate level of security, voters actually need to perform the first check. Our research focuses on the cast-then-audit approach for this check. We use related work to improve this approach in particular by providing a step-by-step guide. We conducted a deceptive online user study (N=437) to compare our improved system with a baseline version from an actual election. We also measured the usability and participants confidence in using such systems. Our findings show that participants from the improved system perform significantly better than the baseline w.r.t. manipulation detecting and reporting capabilities. Furthermore, we show that it is important to distinguish between detection and reporting to understand how to further increase the overall security.},
keywords = {Papers},
pubstate = {published},
tppubtype = {inproceedings}
}

























