We’re in the process of curating a list of this year’s publications. If you would like your paper included, please submit it via our dedicated form.
Disclaimer: This list is not complete yet; the DOIs might not be working yet.
Augmented Body Parts: Bridging VR Embodiment and Wearable Robotics
HyeonBeom Yi (Electronics, Telecommunications Research Institute, Daejeon, Republic of Korea), Myung Jin (MJ) Kim (Electronics, Telecommunications Research Institute, Daejeon, Republic of Korea), Seungwoo Je (Southern University of Science, Technology, Shenzhen, China), Seungjae Oh (Kyung Hee University, Yongin, Republic of Korea), Shuto Takashita (University of Tokyo, Tokyo, Japan), Hongyu Zhou (University of Sydney, Sydney, Australia), Marie Muehlhaus (Saarland University, Saarbrücken, Germany), Dr. Eyal Ofek (University of Birmingham, Birmingham, United Kingdom), Andrea Bianchi (KAIST, Daejeon, Republic of Korea)
Abstract | Tags: Workshops | Links:
@inproceedings{Yi2026AugmentedBody,
title = {Augmented Body Parts: Bridging VR Embodiment and Wearable Robotics},
author = {HyeonBeom Yi (Electronics and Telecommunications Research Institute, Daejeon, Republic of Korea), Myung Jin (MJ) Kim (Electronics and Telecommunications Research Institute, Daejeon, Republic of Korea), Seungwoo Je (Southern University of Science and Technology, Shenzhen, China), Seungjae Oh (Kyung Hee University, Yongin, Republic of Korea), Shuto Takashita (University of Tokyo, Tokyo, Japan), Hongyu Zhou (University of Sydney, Sydney, Australia), Marie Muehlhaus (Saarland University, Saarbrücken, Germany), Dr. Eyal Ofek (University of Birmingham, Birmingham, United Kingdom), Andrea Bianchi (KAIST, Daejeon, Republic of Korea)},
url = {https://hci.cs.uni-saarland.de, website
https://www.linkedin.com/company/saarhcilab/, lab's linkedin},
doi = {10.1145/3772363.3778688},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Recent work across HCI/HRI and wearable robotics has investigated how people control and perceive extra body parts in both virtual and physical settings. Virtual embodiment in XR has shown that users can experience ownership and agency with non-anthropomorphic avatars, while wearable robotics has introduced supernumerary limbs such as third arms and robotic tails. Despite these shared goals, connections between findings remain limited because VR and hardware studies rely on different assumptions about sensory feedback, human perception, and physical constraints, making insights difficult to transfer across contexts. This workshop brings together researchers in XR, wearable robotics, haptics, and neuroscience to explore how to foster embodiment and adaptation with augmented body parts, and how to bridge virtual embodiment to effective use with wearable devices. Through a keynote, brief position shares, and two hands-on group activities, participants will examine control mappings and sensory-feedback strategies and identify which aspects of VR-based embodiment realistically transfer when accounting for hardware limits, sensor variability, and cognitive load. Ultimately, the workshop aims to articulate a focused research agenda connecting VR-based insights to feasible wearable robotics implementations, enabling future work on augmenting the human body with new parts and capabilities.},
keywords = {Workshops},
pubstate = {published},
tppubtype = {inproceedings}
}
Human-AI-UI Interactions Across Modalities
Kewen Peng (University of Utah, United States), Jeffrey Nichols (Apple Inc., United States), Christof Lutteroth (University of Bath, United Kingdom), Tiffany Knearem (MBZUAI, United Arab Emirates), Felix Kretzer (Karlsruhe Institute of Technology (KIT), Germany). Jeffrey Bigham (Carnegie Mellon University & Apple Inc., United States), Alexander Maedche (Karlsruhe Institute of Technology (KIT), Germany), Yue Jiang (University of Utah, United States)
Abstract | Tags: Workshops | Links:
@inproceedings{Peng2026HumanaiuiInteractions,
title = {Human-AI-UI Interactions Across Modalities},
author = {Kewen Peng (University of Utah, United States), Jeffrey Nichols (Apple Inc., United States), Christof Lutteroth (University of Bath, United Kingdom), Tiffany Knearem (MBZUAI, United Arab Emirates), Felix Kretzer (Karlsruhe Institute of Technology (KIT), Germany). Jeffrey Bigham (Carnegie Mellon University & Apple Inc., United States), Alexander Maedche (Karlsruhe Institute of Technology (KIT), Germany), Yue Jiang (University of Utah, United States)},
url = {https://h-lab.win.kit.edu/, website
https://www.linkedin.com/company/68838007/, lab's linkedin},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Designing and developing user-friendly interfaces has long been a cornerstone of HCI research, yet today we are at a turning point where UIs are no longer designed solely for humans but also for intelligent agents that act on users’ behalf, while UIs are also expanding beyond 2D screens into extended reality environments with inherently multimodal characteristics, together challenging us to rethink the role of the UI as a mediator of human–AI interaction. This workshop will explore how UI agents bridge human intent and system behavior by interpreting multimodal inputs and generating adaptive outputs across surfaces from screens to extended reality (XR), and we will examine not only their technical capabilities but also their broader impact, including how agents reshape daily workflows, how bidirectional alignment between human and AI activity can be achieved, and how generative models may transform UI creation. XR provides a compelling testbed for these questions and highlights challenges around accuracy, efficiency, transparency, accessibility, and user agency, setting the stage for the next generation of intelligent and adaptive UIs.},
keywords = {Workshops},
pubstate = {published},
tppubtype = {inproceedings}
}
Human-Centered Explainable AI (HCXAI): Re-examining XAI in the Era of Agentic AI
Upol Ehsan (Khoury College of Computer Sciences, Northeastern University , Boston, Massachusetts, United States), Amal Alabdulkarim (Georgia Institute of Technology, Atlanta, Georgia, United States), Kenneth Holstein (Human-Computer Interaction Institute, Carnegie Mellon University, Pittsburgh, Pennsylvania, United States), Min Kyung Lee (School of Information, University of Texas at Austin, Austin, Texas, United States), Andreas Riener (Human-Computer Interaction Group, Technische Hochschule Ingolstadt, Ingolstadt, Bavaria, Germany), Justin D. Weisz (IBM Research, Yorktown Heights, New York, United States)
Abstract | Tags: Workshops | Links:
@inproceedings{Ehsan2026HumancenteredExplainable,
title = {Human-Centered Explainable AI (HCXAI): Re-examining XAI in the Era of Agentic AI},
author = {Upol Ehsan (Khoury College of Computer Sciences, Northeastern University , Boston, Massachusetts, United States), Amal Alabdulkarim (Georgia Institute of Technology, Atlanta, Georgia, United States), Kenneth Holstein (Human-Computer Interaction Institute, Carnegie Mellon University, Pittsburgh, Pennsylvania, United States), Min Kyung Lee (School of Information, University of Texas at Austin, Austin, Texas, United States), Andreas Riener (Human-Computer Interaction Group, Technische Hochschule Ingolstadt, Ingolstadt, Bavaria, Germany), Justin D. Weisz (IBM Research, Yorktown Heights, New York, United States)},
url = {https://hcig.thi.de/, website
https://www.linkedin.com/in/andreas-riener-19233710/, author's linkedin},
doi = {10.1145/3772363.3778728},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {Making AI explainable requires more than algorithmic transparency: it demands understanding who needs explanations and why. In our sixth CHI workshop on Human-Centered XAI (HCXAI), we shift focus to agentic AI systems. LLM-based agents foundationally challenge existing explainability paradigms. Unlike traditional AI that produces single outputs, agents plan multi-step strategies, invoke tools with real-world consequences, and coordinate with other systems; yet current XAI approaches fail to address these complexities. Users need to understand not just what an agent might do, but the cascade of actions it could trigger, the risks involved, and why responses take time. Even our expanded HCXAI frameworks struggle with these new demands. Through our workshop series, we have built a strong community making important conceptual, methodological, and technical impact. This year, we re-examine what human-centered explainable AI means in the agentic era, bringing together researchers and practitioners to shape explainability for both users and developers of these systems.},
keywords = {Workshops},
pubstate = {published},
tppubtype = {inproceedings}
}
The AI Accomplice: Exploring Generative Artificial Intelligence in Facilitating and Amplifying Deceptive Designs
Thomas Kosch (HU Berlin), Veronika Krauß (HS Ansbach), Christopher Katins (HU Berlin), Dominik Schön (TU Darmstadt), Mark McGill (University of Glasgow), Jan Gugenheimer (TU Darmstadt)
Abstract | Tags: Workshops | Links:
@inproceedings{Kosch2026AiAccomplice,
title = {The AI Accomplice: Exploring Generative Artificial Intelligence in Facilitating and Amplifying Deceptive Designs},
author = {Thomas Kosch (HU Berlin), Veronika Krauß (HS Ansbach), Christopher Katins (HU Berlin), Dominik Schön (TU Darmstadt), Mark McGill (University of Glasgow), Jan Gugenheimer (TU Darmstadt)},
url = {https://hcistudio.org, website},
doi = {10.1145/3772363.3778770},
year = {2026},
date = {2026-04-13},
urldate = {2026-04-13},
abstract = {As generative Artificial Intelligence (AI) becomes increasingly embedded and utilized for digital design, it presents both opportunities and risks. One major concern is its potential to facilitate and incorporate deceptive design patterns into computing technologies, which could manipulate or mislead users to their disadvantage. Similar to the concept of precedent-based design, a common approach in design theory that suggests reapplying previous design solutions to similar or identical problems, generative AI can integrate deceptive design patterns included in the training data a model has seen before. Our workshop explores how generative AI suggests and enacts deceptive design patterns in digital design. The goal of the workshop is to explore the ethical challenges of utilizing generative AI models and develop strategies to detect or prevent manipulative practices, thereby creating more transparent and equitable AI-generated experiences.},
keywords = {Workshops},
pubstate = {published},
tppubtype = {inproceedings}
}