We’re in the process of curating a list of this year’s publications. If you would like your paper included, please submit it via our dedicated form.
Disclaimer: This list is not complete yet; the DOIs might not be working yet.
BotaXplore: Enhancing Visitor Engagement and Learning in Botanical Gardens Through Mobile Technology
Albin Zeqiri (Ulm University), Tobias Wagner (Ulm University), Johanna Grüneberg (LMU Munich), Enrico Rukzio (Ulm University)
Abstract | Tags: Posters | Links:
@inproceedings{Zeqiri2026Botaxplore,
title = {BotaXplore: Enhancing Visitor Engagement and Learning in Botanical Gardens Through Mobile Technology},
author = {Albin Zeqiri (Ulm University), Tobias Wagner (Ulm University), Johanna Grüneberg (LMU Munich), Enrico Rukzio (Ulm University)},
url = {https://www.uni-ulm.de/in/mi/hci/, website
https://az16.github.io/, author's social media
https://wgnrto.de/, author's social media},
doi = {10.1145/3772363.3799272},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Educational guided visits in botanical gardens offer valuable opportunities for learning and engagement that promote awareness of the importance of biological diversity, its conservation, and sustainable use. However, a focus group with five botanists identified challenges in designing tours for heterogeneous audiences that foster curiosity and interest, as well as in tailoring educational content. To address these aspects, this paper presents BotaXplore, a prototype mobile application that supports plant exploration and learning in botanical gardens through three modes: exploratory, semi-guided, and tour-based. Using photo-based identification, users access short facts and quizzes about plants, and discovered species are added to a personal collection. Building on this prototype, we plan to evaluate the app's impact on nature engagement and learning outcomes after improving learning paths, content generation, and support for collaborative exploration.},
keywords = {Posters},
pubstate = {published},
tppubtype = {inproceedings}
}
Enhancing Memory Recall Through AI-Assisted Method of Loci in Virtual Reality
Clemens Wulff (Universität Hamburg), Lucie Kruse (Universität Hamburg), Frank Steinicke (Universität Hamburg)
Abstract | Tags: Posters | Links:
@inproceedings{Wulff2026EnhancingMemory,
title = {Enhancing Memory Recall Through AI-Assisted Method of Loci in Virtual Reality},
author = {Clemens Wulff (Universität Hamburg), Lucie Kruse (Universität Hamburg), Frank Steinicke (Universität Hamburg)},
url = {https://www.inf.uni-hamburg.de/en/inst/ab/hci.html, website
https://www.linkedin.com/in/lucie-kruse-004740234/, author's linkedin},
doi = {10.1145/3772363.3798815},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {The Method of Loci is a well-established mnemonic technique that involves associating words with objects placed along a route. A key factor in its effectiveness is creating meaningful connections between the words to-be-remembered and the corresponding objects. In this study, we investigate how artificial intelligence (AI) can enhance this technique by i) selecting appropriate objects for each word and ii) generating coherent textual associations between the words and their objects. These AI-assisted approaches are compared to a control condition, where iii) object-word pairs are chosen randomly without assistance. Our findings demonstrate that the Object condition significantly improves word recall both immediately and after one week. On the other side, the Text condition did not lead to a significant enhancement in recall, and perceived workload showed no significant differences across all conditions. These results offer valuable insights for advancing mnemonic techniques and suggest directions for future research to optimize memory strategies.},
keywords = {Posters},
pubstate = {published},
tppubtype = {inproceedings}
}
Eye Want It All! Investigating Eye Tracking as Implicit Support for Generative Inpainting
Niklas Pfützenreuter (University of Duisburg-Essen), Carina Liebers (University of Duisburg-Essen), David Goedicke (University of Duisburg-Essen), Donald Degraen (University of Canterbury), Uwe Gruenefeld (GENERIO), Stefan Schneegass (University of Duisburg-Essen)
Abstract | Tags: Posters | Links:
@inproceedings{Pfuetzenreuter2026EyeWant,
title = {Eye Want It All! Investigating Eye Tracking as Implicit Support for Generative Inpainting},
author = {Niklas Pfützenreuter (University of Duisburg-Essen), Carina Liebers (University of Duisburg-Essen), David Goedicke (University of Duisburg-Essen), Donald Degraen (University of Canterbury), Uwe Gruenefeld (GENERIO), Stefan Schneegass (University of Duisburg-Essen)},
url = {https://hci.informatik.uni-due.de/, website
https://de.linkedin.com/company/hci-group-essen, lab's linkedin
https://www.linkedin.com/in/niklas-pfützenreuter/, author's linkedin
https://www.facebook.com/HCIEssen, facebook},
doi = {10.1145/3772363.3799314},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Users often struggle to use Generative Artificial Intelligence (GenAI) models to generate a desired image, as controlling them solely with prompts is difficult. Current solutions to this problem, such as adding conditional controls, require users to provide explicit input, which can be tedious. To avoid depending on additional explicit input, this paper explores what implicit gaze behavior tells about user intentions when viewing generated images. In our user study (𝑁 = 16), we evaluated the correlation between gaze behavior and user annotations, showing that users looked longer at areas they wanted to regenerate. While our research is the first step, we believe our work can pave the way for incorporating implicit user input into interactions with GenAI systems.},
keywords = {Posters},
pubstate = {published},
tppubtype = {inproceedings}
}
MultiBand: Adding Multi-Touch to the Smartwatch Wristband for Extended Interaction
David Petersen (Technische Hochschule Köln), Marvin Reuter (Technische Hochschule Köln), Matthias Böhmer (Technische Hochschule Köln)
Abstract | Tags: Posters | Links:
@inproceedings{Petersen2026Multiband,
title = {MultiBand: Adding Multi-Touch to the Smartwatch Wristband for Extended Interaction},
author = {David Petersen (Technische Hochschule Köln), Marvin Reuter (Technische Hochschule Köln), Matthias Böhmer (Technische Hochschule Köln)},
url = {https://moxd.io/, website
https://www.instagram.com/moxdlab/, instagram},
doi = {10.1145/3772363.3799304},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {The small screen size of smartwatches presents input challenges due to the limited touch surface and screen occlusion. To expand the input space and mitigate the fat finger problem, extensive research has explored various strategies for improving smartwatch interaction design. While wristband-based input has also been studied, there is a lack of research on multi-touch interaction and gestures performed directly on the band. To address this gap, we present MultiBand, a functional prototype that expands smartwatch input capabilities by leveraging capacitive touch sensors around the wristband. Our prototype enables users to execute different functions on a smartwatch based on how they place their fingers on the wristband. Our implementation distinguishes between two types of finger interactions to trigger different scrolling techniques when navigating a contact list. We contribute the software and hardware of our prototype as well as first insights from preliminary user tests.},
keywords = {Posters},
pubstate = {published},
tppubtype = {inproceedings}
}
Open Challenges of Immersive AI-based Remembrance Systems using the Example of Interactive Digital Testimonies
Daniel Kolb (Leibniz Supercomputing Centre), Fabian Heindl (Ludwig-Maximilians–Universität München), Markus Gloe (Ludwig-Maximilians–Universität München), Dieter Kranzlmüller (Ludwig-Maximilians–Universität München)
Abstract | Tags: Posters | Links:
@inproceedings{Kolb2026OpenChallenges,
title = {Open Challenges of Immersive AI-based Remembrance Systems using the Example of Interactive Digital Testimonies},
author = {Daniel Kolb (Leibniz Supercomputing Centre), Fabian Heindl (Ludwig-Maximilians–Universität München), Markus Gloe (Ludwig-Maximilians–Universität München), Dieter Kranzlmüller (Ludwig-Maximilians–Universität München)},
url = {https://www.lrz.de/en/technologies/virtual-reality, website
https://www.linkedin.com/in/daniel-kolb-999628129/, author's linkedin},
doi = {10.1145/3772363.3799277},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Immersive AI-based remembrance systems offer users various ways to interact with recordings or recreations of living and deceased individuals. This includes simulated face-to-face conversations based on authentic recordings, such as in Interactive Digital Testimonies. Their potential application ranges from supporting family members during times of grief to educating learners on historical events. Using this example, we shed light on nine as-of-yet unresolved fundamental challenges of immersive AI-based remembrance systems. Given the complexity and relativity of these challenges, we emphasize the need for concerted research across a broad range of scientific and humanities disciplines.},
keywords = {Posters},
pubstate = {published},
tppubtype = {inproceedings}
}
TARDIS: Tabletop Augmented Reality for Dynamic Immersive Storytelling
Paul Preuschoff (RWTH Aachen University), René Schäfer (RWTH Aachen University), Phillip Ahlers (RWTH Aachen University). David Gilbert (RWTH Aachen University), Jan Borchers (RWTH Aachen University)
Abstract | Tags: Posters | Links:
@inproceedings{Preuschoff2026Tardis,
title = {TARDIS: Tabletop Augmented Reality for Dynamic Immersive Storytelling},
author = {Paul Preuschoff (RWTH Aachen University), René Schäfer (RWTH Aachen University), Phillip Ahlers (RWTH Aachen University). David Gilbert (RWTH Aachen University), Jan Borchers (RWTH Aachen University)},
url = {hci.ac, website
https://www.linkedin.com/in/paul-preuschoff/, author's linkedin},
doi = {10.1145/3772363.379929},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {In tabletop role-playing games, players experience a shared story, coordinated by a game master. This relies heavily on immersion, social interaction, and creative freedom. We explore how VR can increase immersion without undermining these other qualities. We placed players into a CAVE VR system to display virtual environments (VEs) on the walls and floor without requiring glasses that might impede social interaction. We varied how closely VEs textitmatched the game setting described verbally, from reflecting its general atmosphere to being true to details, and investigated impact on immersion, distraction, creativity, and role-play. Players feel more connected to their characters when seeing what their characters would see, but abstract, atmospheric VEs led to fewer problematic divergences and more creative freedom. Surprisingly, medium matching levels were often criticized because players could trust neither what they saw nor their ``cinema of the mind''. Our findings help integrate VR into shared collocated storytelling.},
keywords = {Posters},
pubstate = {published},
tppubtype = {inproceedings}
}
Who Explains Privacy Policies to Me? Embodied and Textual LLM-Powered Privacy Assistants in Virtual Reality
Vincent Freiberger (ScaDS.AI), Moritz Dresch (LMU Munich), Florian Alt (LMU Munich), Arthur Fleig (ScaDS.AI), Viktorija Paneva (LMU Munich)
Abstract | Tags: Posters | Links:
@inproceedings{Freiberger2026WhoExplains,
title = {Who Explains Privacy Policies to Me? Embodied and Textual LLM-Powered Privacy Assistants in Virtual Reality},
author = {Vincent Freiberger (ScaDS.AI), Moritz Dresch (LMU Munich), Florian Alt (LMU Munich), Arthur Fleig (ScaDS.AI), Viktorija Paneva (LMU Munich)},
url = {http://www.medien.ifi.lmu.de/, website
https://www.linkedin.com/company/lmu-media-informatics-group/, lab's linkedin
https://www.linkedin.com/in/viktorija-paneva-hci/, author's linkedin},
doi = {10.1145/3772363.3798567},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Virtual Reality (VR) systems collect fine-grained behavioral and biometric data, yet privacy policies are rarely read or understood due to their complex language, length, and poor integration into users’ interaction workflows. To lower the barrier to informed consent at the point of choice, we explore a Large Language Model (LLM)-powered privacy assistant embedded into a VR app store to support privacy-aware app selection. The assistant is realized in two interaction modes: a text-based chat interface and an embodied virtual avatar providing spoken explanations. We report on an exploratory within-subjects study (𝑁 = 21) in which participants browsed VR productivity applications under unassisted and assisted conditions. Our findings suggest that both interaction modes support more deliberate engagement with privacy information and decision-making, with privacy scores primarily functioning as a veto mechanism rather than a primary selection driver. The impact of embodied interaction varied between participants, while textual interaction supported reflective review.},
keywords = {Posters},
pubstate = {published},
tppubtype = {inproceedings}
}

