Aydın H; Godin-Dubois K; Braz L G; den Hengst F; Baraka K; Çelikok M M; Sauter A; Wang S; Oliehoek F A
SHARPIE: A Modular Framework for Reinforcement Learning and Human-AI Interaction Experiments Proceedings Article
In: AAAI Bridge Program Workshop on Collaborative AI and Modeling of Humans (CAIHU), Philadelphia, Pennsylvania, USA, 2025.
@inproceedings{sharpiecaihu25,
title = {SHARPIE: A Modular Framework for Reinforcement Learning and Human-AI Interaction Experiments},
author = {Hüseyin Ayd{ı}n and Kevin Godin-Dubois and Libio Goncalvez Braz and Floris den Hengst and Kim Baraka and Mustafa Mert Çelikok and Andreas Sauter and Shihan Wang and Frans A. Oliehoek},
url = {https://arxiv.org/abs/2501.19245},
doi = {10.48550/arXiv.2501.19245},
year = {2025},
date = {2025-02-01},
booktitle = {AAAI Bridge Program Workshop on Collaborative AI and Modeling of Humans (CAIHU)},
address = {Philadelphia, Pennsylvania, USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Apeiron A S; Dell’Anna D; Murukannaiah P K; Yolum P
Model and Mechanisms of Consent for Responsible Autonomy Proceedings Article
In: 24th International Conference on Autonomous Agents and Multiagent Systems, 2025.
@inproceedings{apeiron2025model,
title = {Model and Mechanisms of Consent for Responsible Autonomy},
author = {Apeiron, Anastasia S. and Dell’Anna, Davide and Murukannaiah, Pradeep K. and Yolum, P{ı}nar},
year = {2025},
date = {2025-01-01},
booktitle = {24th International Conference on Autonomous Agents and Multiagent Systems},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Onnes A; Dastani M; Dobbe R; Renooij P T I C I P; of Uncertainty in Knowledge-Based Systems (IPMU) M
Extending idioms for Bayesian network construction with qualitative constraints Proceedings Article
In: pp. 415–426, Springer Springer, 2025, (accepted for publication).
@inproceedings{onnes2024,
title = {Extending idioms for Bayesian network construction with qualitative
constraints},
author = {Onnes, Annet and Dastani, Mehdi and Dobbe, Roel and Renooij, Silja}, book title = {Proceedings of the Twentieth International Conference on Information
Processing and Management of Uncertainty in Knowledge-Based Systems (IPMU)},
url = {https://link.springer.com/chapter/10.1007/978-3-031-74003-9_33},
year = {2025},
date = {2025-01-01},
volume = {3},
pages = {415–426},
publisher = {Springer},
organization = {Springer},
series = {Lecture Notes in Networks and Systems},
abstract = {Bayesian networks (BNs) are compact representations of probability
distributions that allow for supporting reasoning and decision making under uncertainty.
Their interpretable structure and probability parameters allow for integrating human
knowledge in their construction and explanation. For BN construction, reusable building
blocks, or idioms, exist that describe the dependencies and reasoning patterns among small
sets of variables. In this paper we formalise the concept of an idiom, explicitly including
qualitative constraints that capture the reasoning patterns among variables as stated in the
informal descriptions that accompany the idioms in literature. Our proposed formalisation
ensures that idioms can be applied more consistently and reliably, improving the BN’s
accountability.},
note = {accepted for publication},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
distributions that allow for supporting reasoning and decision making under uncertainty.
Their interpretable structure and probability parameters allow for integrating human
knowledge in their construction and explanation. For BN construction, reusable building
blocks, or idioms, exist that describe the dependencies and reasoning patterns among small
sets of variables. In this paper we formalise the concept of an idiom, explicitly including
qualitative constraints that capture the reasoning patterns among variables as stated in the
informal descriptions that accompany the idioms in literature. Our proposed formalisation
ensures that idioms can be applied more consistently and reliably, improving the BN’s
accountability.
Ju J; Yang C; Fu S; Tsai M; Wang C
Relevance-aware Diverse Query Generation for Out-of-domain Text Ranking Proceedings Article
In: Zhao, Chen; Mosbach, Marius; Atanasova, Pepa; Goldfarb-Tarrent, Seraphina; Hase, Peter; Hosseini, Arian; Elbayad, Maha; Pezzelle, Sandro; Mozes, Maximilian (Ed.): Proceedings of the 9th Workshop on Representation Learning for NLP (RepL4NLP-2024), pp. 26–36, Association for Computational Linguistics, Bangkok, Thailand, 2024.
@inproceedings{ju2024relevance,
title = {Relevance-aware Diverse Query Generation for Out-of-domain Text Ranking},
author = {Ju, Jia-Huei and Yang, Chao-Han and Fu, Szu-Wei and Tsai, Ming-Feng and Wang, Chuan-Ju},
editor = {Zhao, Chen and Mosbach, Marius and Atanasova, Pepa and Goldfarb-Tarrent, Seraphina and Hase, Peter and Hosseini, Arian and Elbayad, Maha and Pezzelle, Sandro and Mozes, Maximilian},
url = {https://aclanthology.org/2024.repl4nlp-1.3},
year = {2024},
date = {2024-08-01},
booktitle = {Proceedings of the 9th Workshop on Representation Learning for NLP (RepL4NLP-2024)},
pages = {26–36},
publisher = {Association for Computational Linguistics},
address = {Bangkok, Thailand},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Godin-Dubois K; Weissl O; Miras K; Kononova A V
Interactive Embodied Evolution for Socially Adept Artificial General Creatures Proceedings Article
In: Evolution of Things Workshop at the ALife 2024 Conference, arXiv, 2024.
@inproceedings{GodinDubois2024a,
title = {Interactive Embodied Evolution for Socially Adept Artificial General Creatures},
author = {{Godin-Dubois}, Kevin and Weissl, Olivier and Miras, Karine and Kononova, Anna V.},
doi = {10.48550/arXiv.2407.21357},
year = {2024},
date = {2024-07-01},
booktitle = {Evolution of Things Workshop at the ALife 2024 Conference},
publisher = {arXiv},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Dobbe R; Wolters A
Toward Sociotechnical AI: Mapping Vulnerabilities for Machine Learning in Context Journal Article
In: Minds and Machines, vol. 34, no. 2, pp. 12, 2024, ISSN: 1572-8641.
@article{dobbe_toward_2024,
title = {Toward Sociotechnical AI: Mapping Vulnerabilities for Machine Learning in Context},
author = {Dobbe, Roel and Wolters, Anouk},
url = {https://doi.org/10.1007/s11023-024-09668-y},
doi = {10.1007/s11023-024-09668-y},
issn = {1572-8641},
year = {2024},
date = {2024-05-01},
urldate = {2024-05-30},
journal = {Minds and Machines},
volume = {34},
number = {2},
pages = {12},
abstract = {This paper provides an empirical and conceptual account on seeing machine learning models as part of a sociotechnical system to identify relevant vulnerabilities emerging in the context of use. As ML is increasingly adopted in socially sensitive and safety-critical domains, many ML applications end up not delivering on their promises, and contributing to new forms of algorithmic harm. There is still a lack of empirical insights as well as conceptual tools and frameworks to properly understand and design for the impact of ML models in their sociotechnical context. In this paper, we follow a design science research approach to work towards such insights and tools. We center our study in the financial industry, where we first empirically map recently emerging MLOps practices to govern ML applications, and corroborate our insights with recent literature. We then perform an integrative literature research to identify a long list of vulnerabilities that emerge in the sociotechnical context of ML applications, and we theorize these along eight dimensions. We then perform semi-structured interviews in two real-world use cases and across a broad set of relevant actors and organizations, to validate the conceptual dimensions and identify challenges to address sociotechnical vulnerabilities in the design and governance of ML-based systems. The paper proposes a set of guidelines to proactively and integrally address both the dimensions of sociotechnical vulnerability, as well as the challenges identified in the empirical use case research, in the organization of MLOps practices.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Reimann M M; Kunneman F A; Oertel C; Hindriks K V
A Survey on Dialogue Management in Human-Robot Interaction Journal Article
In: J. Hum.-Robot Interact., 2024, (Just Accepted).
@article{10.1145/3648605,
title = {A Survey on Dialogue Management in Human-Robot Interaction},
author = {Reimann, Merle M. and Kunneman, Florian A. and Oertel, Catharine and Hindriks, Koen V.},
url = {https://doi.org/10.1145/3648605},
doi = {10.1145/3648605},
year = {2024},
date = {2024-03-01},
journal = {J. Hum.-Robot Interact.},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {As social robots see increasing deployment within the general public, improving the interaction with those robots is essential. Spoken language offers an intuitive interface for the human-robot interaction (HRI), with dialogue management (DM) being a key component in those interactive systems. Yet, to overcome current challenges and manage smooth, informative and engaging interaction a more structural approach to combining HRI and DM is needed. In this systematic review, we analyse the current use of DM in HRI and focus on the type of dialogue manager used, its capabilities, evaluation methods and the challenges specific to DM in HRI. We identify the challenges and current scientific frontier related to the DM approach, interaction domain, robot appearance, physical situatedness and multimodality.},
note = {Just Accepted},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
den Hengst F; Otten M; Elbers P; van Harmelen F; François-Lavet V; Hoogendoorn M
Guideline-informed reinforcement learning for mechanical ventilation in critical care Journal Article
In: Artificial Intelligence in Medicine, vol. 147, pp. 102742, 2024, ISSN: 0933-3657.
@article{DENHENGST2024102742,
title = {Guideline-informed reinforcement learning for mechanical ventilation in critical care},
author = {Floris {den Hengst} and Martijn Otten and Paul Elbers and Frank {van Harmelen} and Vincent François-Lavet and Mark Hoogendoorn},
url = {https://www.sciencedirect.com/science/article/pii/S0933365723002567},
doi = {https://doi.org/10.1016/j.artmed.2023.102742},
issn = {0933-3657},
year = {2024},
date = {2024-01-01},
journal = {Artificial Intelligence in Medicine},
volume = {147},
pages = {102742},
abstract = {Reinforcement Learning (RL) has recently found many applications in the healthcare domain thanks to its natural fit to clinical decision-making and ability to learn optimal decisions from observational data. A key challenge in adopting RL-based solution in clinical practice, however, is the inclusion of existing knowledge in learning a suitable solution. Existing knowledge from e.g. medical guidelines may improve the safety of solutions, produce a better balance between short- and long-term outcomes for patients and increase trust and adoption by clinicians. We present a framework for including knowledge available from medical guidelines in RL. The framework includes components for enforcing safety constraints and an approach that alters the learning signal to better balance short- and long-term outcomes based on these guidelines. We evaluate the framework by extending an existing RL-based mechanical ventilation (MV) approach with clinically established ventilation guidelines. Results from off-policy policy evaluation indicate that our approach has the potential to decrease 90-day mortality while ensuring lung protective ventilation. This framework provides an important stepping stone towards implementations of RL in clinical practice and opens up several avenues for further research.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Reimann M; van de Graaf J; van Gulik N; van de Sanden S; Verhagen T; Hindriks K
Social Robots in the Wild and the Novelty Effect Proceedings Article
In: Ali, Abdulaziz Al; Cabibihan, John-John; Meskin, Nader; Rossi, Silvia; Jiang, Wanyue; He, Hongsheng; Ge, Shuzhi Sam (Ed.): Social Robotics, pp. 38–48, Springer Nature Singapore, Singapore, 2024, ISBN: 978-981-99-8718-4.
@inproceedings{10.1007/978-981-99-8718-4_4,
title = {Social Robots in the Wild and the Novelty Effect},
author = {Reimann, Merle
and van de Graaf, Jesper
and van Gulik, Nina
and van de Sanden, Stephanie
and Verhagen, Tibert
and Hindriks, Koen},
editor = {Ali, Abdulaziz Al
and Cabibihan, John-John
and Meskin, Nader
and Rossi, Silvia
and Jiang, Wanyue
and He, Hongsheng
and Ge, Shuzhi Sam},
url = {https://rdcu.be/dtXE5},
isbn = {978-981-99-8718-4},
year = {2024},
date = {2024-01-01},
booktitle = {Social Robotics},
pages = {38–48},
publisher = {Springer Nature Singapore},
address = {Singapore},
abstract = {We designed a wine recommendation robot and deployed it in a small supermarket. In a study aimed to evaluate our design we found that people with no intent to buy wine were interacting with the robot rather than the intended audience of wine-buying customers. Behavioural data, moreover, suggests a very different evaluation of the robot than the surveys that were completed. We also found that groups were interacting more with the robot than individuals, a finding that has been reported more often in the literature. All of these findings taken together suggest that a novelty effect may have been at play. It also suggests that field studies should take this effect more seriously. The main contribution of our work is in identifying and proposing a set of indicators and thresholds that can be used to identify that a novelty effect is present. We argue that it is important to focus more on measuring attitudes towards robots that may explain behaviour due to novelty effects. Our findings also suggest research should focus more on verifying whether real user needs are met."},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhu C; Dastani M; Wang S
A survey of multi-agent deep reinforcement learning with communication Journal Article
In: Autonomous Agents and Multi-Agent Systems, vol. 38, no. 1, pp. 4, 2024.
@article{zhu2024survey,
title = {A survey of multi-agent deep reinforcement learning with communication},
author = {Zhu, Changxi and Dastani, Mehdi and Wang, Shihan},
doi = {https://doi.org/10.1007/s10458-023-09633-6},
year = {2024},
date = {2024-01-01},
journal = {Autonomous Agents and Multi-Agent Systems},
volume = {38},
number = {1},
pages = {4},
publisher = {Springer},
abstract = {Communication is an effective mechanism for coordinating the behaviors of multiple agents, broadening their views of the environment, and to support their collaborations. In the field of multi-agent deep reinforcement learning (MADRL), agents can improve the overall learning performance and achieve their objectives by communication. Agents can communicate various types of messages, either to all agents or to specific agent groups, or conditioned on specific constraints. With the growing body of research work in MADRL with communication (Comm-MADRL), there is a lack of a systematic and structural approach to distinguish and classify existing Comm-MADRL approaches. In this paper, we survey recent works in the Comm-MADRL field and consider various aspects of communication that can play a role in designing and developing multi-agent reinforcement learning systems. With these aspects in mind, we propose 9 dimensions along which Comm-MADRL approaches can be analyzed, developed, and compared. By projecting existing works into the multi-dimensional space, we discover interesting trends. We also propose some novel directions for designing future Comm-MADRL systems through exploring possible combinations of the dimensions.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Dudzik B J W; van der Waa J S; Chen P; Dobbe R; de Troya Í M D R; Bakker R M; de Boer M H T; Smit Q T S; Dell'Anna D; Erdogan E; Yolum P; Wang S; Baez Santamaria S; Krause L; Kamphorst B A
Viewpoint: Hybrid Intelligence Supports Application Development for Diabetes Lifestyle Management Journal Article
In: Journal of Artificial Intelligence Research, vol. 80, pp. 919–929, 2024.
@article{dudzik2024,
title = {Viewpoint: Hybrid Intelligence Supports Application Development for Diabetes Lifestyle Management},
author = {Dudzik, Bernd J. W. and van der Waa, Jasper S. and Chen, Pei-Yu and Dobbe, Roel and de Troya, Íñigo M.D.R. and Bakker, Roos M. and de Boer, Maaike H. T. and Smit, Quirine T.S. and Dell'Anna, Davide and Erdogan, Emre and Yolum, Pinar and Wang, Shihan and Baez Santamaria, Selene and Krause, Lea and Kamphorst, Bart A.},
year = {2024},
date = {2024-01-01},
journal = {Journal of Artificial Intelligence Research},
volume = {80},
pages = {919–929},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Jorge C C; van Zoelen E M; Verhagen R; Mehrotra S; Jonker C M; Tielman M L
In: Dasgupta, Prithviraj; Llinas, James; Gillespie, Tony; Fouse, Scott; Lawless, William; Mittu, Ranjeev; Sofge, Donald (Ed.): Putting AI in the Critical Loop, pp. 41-60, Academic Press, 2024, ISBN: 978-0-443-15988-6.
@incollection{CENTEIOJORGE202441,
title = {4 - Appropriate context-dependent artificial trust in human-machine teamwork⁎⁎This document is the result of the research project funded by AI*MAN lab from TU Delft AI Initiative.},
author = {Carolina {Centeio Jorge} and Emma M. {van Zoelen} and Ruben Verhagen and Siddharth Mehrotra and Catholijn M. Jonker and Myrthe L. Tielman},
editor = {Prithviraj Dasgupta and James Llinas and Tony Gillespie and Scott Fouse and William Lawless and Ranjeev Mittu and Donald Sofge},
url = {https://www.sciencedirect.com/science/article/pii/B9780443159886000078},
doi = {https://doi.org/10.1016/B978-0-443-15988-6.00007-8},
isbn = {978-0-443-15988-6},
year = {2024},
date = {2024-01-01},
booktitle = {Putting AI in the Critical Loop},
pages = {41-60},
publisher = {Academic Press},
abstract = {As human-machine teams become a more common scenario, we need to ensure mutual trust between humans and machines. More important than having trust, we need all teammates to trust each other appropriately. This means that they should not overtrust or undertrust each other, avoiding risks and inefficiencies, respectively. We usually think of natural trust, that is, humans trusting machines, but we should also consider artificial trust, that is, artificial agents trusting humans. Appropriate artificial trust allows the agents to interpret human behavior and predict their behavior in a certain context. In this chapter, we explore how we can define this context in terms of task and team characteristics. We present a taxonomy that shows how trust is context-dependent. In fact, we propose that no trust model presented in the literature fits all contexts and argue that our taxonomy facilitates the choice of the trust model that better fits a certain context. The taxonomy helps to understand which internal characteristics of the teammate (krypta) are important to consider and how they will show in behavior cues (manifesta). This taxonomy can also be used to help human-machine teams’ researchers in the problem definition and process of experimental design as it allows a detailed characterization of the task and team configuration. Furthermore, we propose a formalization of the belief of trust as context-dependent trustworthiness, and show how beliefs of trust can be used to reach appropriate trust. Our work provides a starting point to implement mutual appropriate trust in human-machine teams.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Degachi C; Mehrotra S; Yurrita M; Niforatos E; Tielman M L
Practising Appropriate Trust in Human-Centred AI Design Proceedings Article
In: Extended Abstracts of the 2024 CHI Conference on Human Factors in Computing Systems, Association for Computing Machinery, New York, NY, USA, 2024, ISBN: 9798400703317.
@inproceedings{10.1145/3613905.3650825,
title = {Practising Appropriate Trust in Human-Centred AI Design},
author = {Degachi, Chadha and Mehrotra, Siddharth and Yurrita, Mireia and Niforatos, Evangelos and Tielman, Myrthe Lotte},
url = {https://doi.org/10.1145/3613905.3650825},
doi = {10.1145/3613905.3650825},
isbn = {9798400703317},
year = {2024},
date = {2024-01-01},
booktitle = {Extended Abstracts of the 2024 CHI Conference on Human Factors in Computing Systems},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {CHI EA '24},
abstract = {Appropriate trust, trust which aligns with system trustworthiness, in Artificial Intelligence (AI) systems has become an important area of research. However, there remains debate in the community about how to design for appropriate trust. This debate is a result of the complex nature of trust in AI, which can be difficult to understand and evaluate, as well as the lack of holistic approaches to trust. In this paper, we aim to clarify some of this debate by operationalising appropriate trust within the context of the Human-Centred AI Design (HCD) process. To do so, we organised three workshops with 13 participants total from design and development backgrounds. We carried out design activities to stimulate discussion on appropriate trust in the HCD process. This paper aims to help researchers and practitioners understand appropriate trust in AI through a design lens by illustrating how it interacts with the HCD process.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mehrotra S; Jorge C C; Jonker C M; Tielman M L
Integrity-based Explanations for Fostering Appropriate Trust in AI Agents Journal Article
In: ACM Trans. Interact. Intell. Syst., vol. 14, no. 1, 2024, ISSN: 2160-6455.
@article{10.1145/3610578,
title = {Integrity-based Explanations for Fostering Appropriate Trust in AI Agents},
author = {Mehrotra, Siddharth and Jorge, Carolina Centeio and Jonker, Catholijn M. and Tielman, Myrthe L.},
url = {https://doi.org/10.1145/3610578},
doi = {10.1145/3610578},
issn = {2160-6455},
year = {2024},
date = {2024-01-01},
journal = {ACM Trans. Interact. Intell. Syst.},
volume = {14},
number = {1},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {Appropriate trust is an important component of the interaction between people and AI systems, in that “inappropriate” trust can cause disuse, misuse, or abuse of AI. To foster appropriate trust in AI, we need to understand how AI systems can elicit appropriate levels of trust from their users. Out of the aspects that influence trust, this article focuses on the effect of showing integrity. In particular, this article presents a study of how different integrity-based explanations made by an AI agent affect the appropriateness of trust of a human in that agent. To explore this, (1) we provide a formal definition to measure appropriate trust, (2) present a between-subject user study with 160 participants who collaborated with an AI agent in such a task. In the study, the AI agent assisted its human partner in estimating calories on a food plate by expressing its integrity through explanations focusing on either honesty, transparency, or fairness. Our results show that (a) an agent who displays its integrity by being explicit about potential biases in data or algorithms achieved appropriate trust more often compared to being honest about capability or transparent about the decision-making process, and (b) subjective trust builds up and recovers better with honesty-like integrity explanations. Our results contribute to the design of agent-based AI systems that guide humans to appropriately trust them, a formal method to measure appropriate trust, and how to support humans in calibrating their trust in AI.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mehrotra S; Degachi C; Vereschak O; Jonker C M; Tielman M L
A Systematic Review on Fostering Appropriate Trust in Human-AI Interaction: Trends, Opportunities and Challenges Journal Article
In: ACM Journal on Responsible Computing, 2024.
@article{mehrotra2024systematic,
title = {A Systematic Review on Fostering Appropriate Trust in Human-AI Interaction: Trends, Opportunities and Challenges},
author = {Mehrotra, Siddharth and Degachi, Chadha and Vereschak, Oleksandra and Jonker, Catholijn M and Tielman, Myrthe L},
doi = {10.1145/3696449 publisher=ACM New York, NY},
year = {2024},
date = {2024-01-01},
journal = {ACM Journal on Responsible Computing},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Orzan N; Acar E; Grossi D; Rădulescu R
Emergent Cooperation under Uncertain Incentive Alignment Proceedings Article
In: Proceedings of the 2024 International Conference on Autonomous Agents and Multiagent Systems (AAMAS 2024), 2024.
@inproceedings{orzan2024emergent,
title = {Emergent Cooperation under Uncertain Incentive Alignment},
author = {Orzan, Nicole and Acar, Erman and Grossi, Davide and R{ă}dulescu, Roxana},
year = {2024},
date = {2024-01-01},
booktitle = {Proceedings of the 2024 International Conference on Autonomous Agents and Multiagent Systems (AAMAS 2024)},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Han S; Dastani M; Wang S
Learning Reward Structure with Subtasks in Reinforcement Learning Proceedings Article
In: ECAI 2024 - 27th European Conference on Artificial Intelligence, 19-24 October 2024, Santiago de Compostela, Spain - Including 13th Conference on Prestigious Applications of Intelligent Systems (PAIS 2024), pp. 2282–2289, IOS Press, 2024.
@inproceedings{DBLP:conf/ecai/HanD024,
title = {Learning Reward Structure with Subtasks in Reinforcement Learning},
author = {Shuai Han and
Mehdi Dastani and
Shihan Wang},
url = {https://doi.org/10.3233/FAIA240751},
doi = {10.3233/FAIA240751},
year = {2024},
date = {2024-01-01},
booktitle = {ECAI 2024 - 27th European Conference on Artificial Intelligence,
19-24 October 2024, Santiago de Compostela, Spain - Including 13th
Conference on Prestigious Applications of Intelligent Systems (PAIS
2024)},
volume = {392},
pages = {2282–2289},
publisher = {IOS Press},
series = {Frontiers in Artificial Intelligence and Applications},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhao Y; Niu B; Dastani M; Wang S
Bootstrapped Policy Learning for Task-oriented Dialogue through Goal Shaping Proceedings Article
In: Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, EMNLP 2024, Miami, FL, USA, November 12-16, 2024, pp. 4566–4580, Association for Computational Linguistics, 2024.
@inproceedings{DBLP:conf/emnlp/ZhaoND024,
title = {Bootstrapped Policy Learning for Task-oriented Dialogue through Goal
Shaping},
author = {Yangyang Zhao and
Ben Niu and
Mehdi Dastani and
Shihan Wang},
url = {https://aclanthology.org/2024.emnlp-main.263},
year = {2024},
date = {2024-01-01},
booktitle = {Proceedings of the 2024 Conference on Empirical Methods in Natural
Language Processing, EMNLP 2024, Miami, FL, USA, November 12-16,
2024},
pages = {4566–4580},
publisher = {Association for Computational Linguistics},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhao Y; Dastani M; Long J; Wang Z; Wang S
Rescue Conversations from Dead-ends: Efficient Exploration for Task-oriented Dialogue Policy Optimization Journal Article
In: Transactions of the Association for Computational Linguistics, vol. 12, pp. 1578–1596, 2024.
@article{zhao-etal-2024-rescue,
title = {Rescue Conversations from Dead-ends: Efficient Exploration for Task-oriented Dialogue Policy Optimization},
author = {Zhao, Yangyang and
Dastani, Mehdi and
Long, Jinchuan and
Wang, Zhenyu and
Wang, Shihan},
url = {https://aclanthology.org/2024.tacl-1.86/},
doi = {10.1162/tacl_a_00717},
year = {2024},
date = {2024-01-01},
journal = {Transactions of the Association for Computational Linguistics},
volume = {12},
pages = {1578–1596},
publisher = {MIT Press},
address = {Cambridge, MA},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chang W; Wang S; Kramer S; Oey M; Allouch S B
Human-Centered AI for Dementia Care: Using Reinforcement Learning for Personalized Interventions Support in Eating and Drinking Scenarios Proceedings Article
In: HHAI 2024: Hybrid Human AI Systems for the Social Good - Proceedings of the Third International Conference on Hybrid Human-Artificial Intelligence, Malmö, Sweden, 10-14 June 2024, pp. 84–93, IOS Press, 2024.
@inproceedings{DBLP:conf/hhai/Chang0KOA24,
title = {Human-Centered AI for Dementia Care: Using Reinforcement Learning
for Personalized Interventions Support in Eating and Drinking Scenarios},
author = {Wen{-}Tseng Chang and
Shihan Wang and
Stephanie Kramer and
Michel Oey and
Somaya Ben Allouch},
url = {https://doi.org/10.3233/FAIA240185},
doi = {10.3233/FAIA240185},
year = {2024},
date = {2024-01-01},
booktitle = {HHAI 2024: Hybrid Human AI Systems for the Social Good - Proceedings
of the Third International Conference on Hybrid Human-Artificial Intelligence,
Malmö, Sweden, 10-14 June 2024},
volume = {386},
pages = {84–93},
publisher = {IOS Press},
series = {Frontiers in Artificial Intelligence and Applications},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Loftin* R; Çelikok* M M; Van Hoof H; Kaski S; Oliehoek F A
Uncoupled Learning of Differential Stackelberg Equilibria with Commitments Proceedings Article
In: AAMAS '24: Proceedings of the 23rd International Conference on Autonomous Agents and Multiagent Systems, 2024, (The * denotes equal contribution).
@inproceedings{loftin2024uncoupled,
title = {Uncoupled Learning of Differential Stackelberg Equilibria with Commitments},
author = {Loftin*, Robert and {Ç}elikok*, Mustafa Mert and Van Hoof, Herke and Kaski, Samuel and Oliehoek, Frans A},
year = {2024},
date = {2024-01-01},
booktitle = {AAMAS '24: Proceedings of the 23rd International Conference on Autonomous Agents and Multiagent Systems},
note = {The * denotes equal contribution},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ho L; Schlobach S
A General Dialogue Framework for Logic-based Argumentation Proceedings Article
In: Kampik, Timotheus; Cyras, Kristijonas; Rago, Antonio; Cocarascu, Oana (Ed.): Proceedings of the 2nd International Workshop on Argumentation for eXplainable AI co-located with the 10th International Conference on Computational Models of Argument (COMMA 2024), Hagen, Germany, September 16, 2024, pp. 41–55, CEUR-WS.org, 2024.
@inproceedings{DBLP:conf/comma/HoS24,
title = {A General Dialogue Framework for Logic-based Argumentation},
author = {Loan Ho and
Stefan Schlobach},
editor = {Timotheus Kampik and
Kristijonas Cyras and
Antonio Rago and
Oana Cocarascu},
url = {https://ceur-ws.org/Vol-3768/paper7.pdf},
year = {2024},
date = {2024-01-01},
booktitle = {Proceedings of the 2nd International Workshop on Argumentation for
eXplainable AI co-located with the 10th International Conference
on Computational Models of Argument (COMMA 2024), Hagen, Germany,
September 16, 2024},
volume = {3768},
pages = {41–55},
publisher = {CEUR-WS.org},
series = {CEUR Workshop Proceedings},
abstract = {There is an extensive body of work in logic-based argumentation, which links logic and argumentation, which is a potential solution to address inconsistencies or conflicting information in knowledge bases (KBs) by offering dialogue games as proof procedures to determine and explain the acceptance of propositions. Most existing work, though, focuses on specific logics (such as description logics, existential rules, defeasible and propositional logics), has limitations of representational aspects, for selected semantics and binary conflicts. In this paper, we generalise this work by introducing G-SAF, which generalises the notions of arguments, dialogues and dialogue trees for more general logical reasoning with inconsistencies, including the most common semantics and to facilitate reasoning with non-binary conflicts using argumentation with collective attacks (SAFs).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Malnatsky E; Wang S; Hindriks K V; Ligthart M E U
Shaping Relatable Robots: A Child-Centered Approach to Social Personalization Proceedings Article
In: Grollman, Dan; Broadbent, Elizabeth; Ju, Wendy; Soh, Harold; Williams, Tom (Ed.): Companion of the 2024 ACM/IEEE International Conference on Human-Robot Interaction, HRI 2024, Boulder, CO, USA, March 11-15, 2024, pp. 127–129, ACM, 2024.
@inproceedings{DBLP:conf/hri/MalnatskyWHL24,
title = {Shaping Relatable Robots: A Child-Centered Approach to Social Personalization},
author = {Elena Malnatsky and
Shenghui Wang and
Koen V. Hindriks and
Mike E. U. Ligthart},
editor = {Dan Grollman and
Elizabeth Broadbent and
Wendy Ju and
Harold Soh and
Tom Williams},
url = {https://doi.org/10.1145/3610978.3638374},
doi = {10.1145/3610978.3638374},
year = {2024},
date = {2024-01-01},
booktitle = {Companion of the 2024 ACM/IEEE International Conference on Human-Robot
Interaction, HRI 2024, Boulder, CO, USA, March 11-15, 2024},
pages = {127–129},
publisher = {ACM},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Dudzik B; van der Waa J; Chen P; Dobbe R; de Troya I M D R; Bakker R M; de Boer M H T; Smit Q T S; Dell'Anna D; Erdogan E; Yolum P; Wang S; Santamar'ıa S B; Krause L; Kamphorst B A
Viewpoint: Hybrid Intelligence Supports Application Development for Diabetes Lifestyle Management Journal Article
In: J. Artif. Intell. Res., vol. 80, pp. 919–929, 2024.
@article{DBLP:journals/jair/DudzikWCDTBBSDEYWSKK24,
title = {Viewpoint: Hybrid Intelligence Supports Application Development for
Diabetes Lifestyle Management},
author = {Bernd Dudzik and
Jasper van der Waa and
Pei{-}Yu Chen and
Roel Dobbe and
Inago M. D. R. de Troya and
Roos M. Bakker and
Maaike H. T. de Boer and
Quirine T. S. Smit and
Davide Dell'Anna and
Emre Erdogan and
Pinar Yolum and
Shihan Wang and
Selene Baez Santamar{'{ı}}a and
Lea Krause and
Bart A. Kamphorst},
url = {https://doi.org/10.1613/jair.1.15916},
doi = {10.1613/JAIR.1.15916},
year = {2024},
date = {2024-01-01},
journal = {J. Artif. Intell. Res.},
volume = {80},
pages = {919–929},
abstract = {Type II diabetes is a complex health condition requiring patients to closely and continuously collaborate with healthcare professionals and other caretakers on lifestyle changes. While intelligent products have tremendous potential to support such Diabetes Lifestyle Management (DLM), existing products are typically conceived from a technology-centered perspective that insufficiently acknowledges the degree to which collaboration and inclusion of stakeholders is required. In this article, we argue that the emergent design philosophy of Hybrid Intelligence (HI) forms a suitable alternative lens for research and development. In particular, we (1) highlight a series of pragmatic challenges for effective AI-based DLM support based on results from an expert focus group, and (2) argue for HI’s potential to address these by outlining relevant research trajectories.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Erdogan E; Verbrugge R; Yolum P
Computational Theory of Mind with Abstractions for Effective Human-Agent Collaboration Proceedings Article
In: Dastani, Mehdi; Sichman, Jaime Simão; Alechina, Natasha; Dignum, Virginia (Ed.): Proceedings of the 23rd International Conference on Autonomous Agents and Multiagent Systems, AAMAS 2024, Auckland, New Zealand, May 6-10, 2024, pp. 2249–2251, International Foundation for Autonomous Agents and Multiagent Systems / ACM, 2024.
@inproceedings{DBLP:conf/atal/ErdoganVY24,
title = {Computational Theory of Mind with Abstractions for Effective Human-Agent
Collaboration},
author = {Emre Erdogan and
Rineke Verbrugge and
Pinar Yolum},
editor = {Mehdi Dastani and
Jaime Sim{ã}o Sichman and
Natasha Alechina and
Virginia Dignum},
url = {https://dl.acm.org/doi/10.5555/3635637.3663123},
doi = {10.5555/3635637.3663123},
year = {2024},
date = {2024-01-01},
booktitle = {Proceedings of the 23rd International Conference on Autonomous Agents
and Multiagent Systems, AAMAS 2024, Auckland, New Zealand, May 6-10,
2024},
pages = {2249–2251},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems
/ ACM},
abstract = {Empowering artificially intelligent agents with capabilities that humans use regularly is crucial to enable effective human-agent collaboration. One of these crucial capabilities is the modeling of Theory of Mind (ToM) reasoning: the human ability to reason about the mental content of others such as their beliefs, desires, and goals. However, it is generally impractical to track all individual mental attitudes of all other individuals and for many practical situations not even necessary. Hence, what is important is to capture enough information to create an approximate model that is effective and flexible. Accordingly, this paper proposes a computational ToM mechanism based on abstracting beliefs and knowledge into higher-level human concepts, called abstractions, similar to the ones that guide humans to effectively interact with each other (e.g., trust). We develop an agent architecture based on epistemic logic to formalize the computational dynamics of ToM reasoning. We identify important challenges regarding effective maintenance of abstractions and accurate use of ToM reasoning and demonstrate how our approach addresses these challenges over multiagent simulations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Erdogan E; Dignum F; Verbrugge R
Effective Maintenance of Computational Theory of Mind for Human-AI Collaboration Proceedings Article
In: Lorig, Fabian; Tucker, Jason; Lindström, Adam Dahlgren; Dignum, Frank; Murukannaiah, Pradeep K.; Theodorou, Andreas; Yolum, Pinar (Ed.): HHAI 2024: Hybrid Human AI Systems for the Social Good - Proceedings of the Third International Conference on Hybrid Human-Artificial Intelligence, Malmö, Sweden, 10-14 June 2024, pp. 114–123, IOS Press, 2024.
@inproceedings{DBLP:conf/hhai/ErdoganDV24,
title = {Effective Maintenance of Computational Theory of Mind for Human-AI
Collaboration},
author = {Emre Erdogan and
Frank Dignum and
Rineke Verbrugge},
editor = {Fabian Lorig and
Jason Tucker and
Adam Dahlgren Lindström and
Frank Dignum and
Pradeep K. Murukannaiah and
Andreas Theodorou and
Pinar Yolum},
url = {https://doi.org/10.3233/FAIA240188},
doi = {10.3233/FAIA240188},
year = {2024},
date = {2024-01-01},
booktitle = {HHAI 2024: Hybrid Human AI Systems for the Social Good - Proceedings
of the Third International Conference on Hybrid Human-Artificial Intelligence,
Malmö, Sweden, 10-14 June 2024},
volume = {386},
pages = {114–123},
publisher = {IOS Press},
series = {Frontiers in Artificial Intelligence and Applications},
abstract = {In order to enhance collaboration between humans and artificially intelligent agents, it is crucial to equip the computational agents with capabilities commonly used by humans. One of these capabilities is called Theory of Mind (ToM) reasoning, which is the human ability to reason about the mental contents of others, such as their beliefs, desires, and goals. For an agent to efficiently benefit from having a functioning computational ToM of its human partner in a collaboration, it needs to be practical in computationally tracking their mental attitudes and it needs to create approximate ToM models that can be effectively maintained. In this paper, we propose a computational ToM mechanism based on abstracting beliefs and knowledge into higher-level human concepts, referred to as abstractions. These abstractions, similar to those guiding human interactions (e.g., trust), form the basis of our modular agent architecture. We address an important challenge related to maintaining abstractions effectively, namely abstraction consistency. We propose different approaches to study this challenge in the context of a scenario inspired by a medical domain and provide an experimental evaluation over agent simulations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Erdogan E; Dignum F; Verbrugge R; Yolum P
Resolving Multi-user Privacy Conflicts with Computational Theory of Mind Proceedings Article
In: Workshop on Citizen-Centric Multiagent Systems, pp. 22, 2024.
@inproceedings{erdogan2024resolving,
title = {Resolving Multi-user Privacy Conflicts with Computational Theory of Mind},
author = {Erdogan, Emre and Dignum, Frank and Verbrugge, Rineke and Yolum, P{ı}nar},
url = {https://eprints.soton.ac.uk/489958/1/Proceedings_of_the_Second_International_Workshop_on_Citizen-Centric_Multiagent_Systems_2024_C-MAS_2024_.pdf},
year = {2024},
date = {2024-01-01},
booktitle = {Workshop on Citizen-Centric Multiagent Systems},
pages = {22},
abstract = {Online Social Networks (OSNs) serve as digital platforms for users to share information and build relationships. These networks facilitate the sharing of diverse content, which may disclose personal information about users. Some of these contents pertain to multiple users (such as group pictures), with different privacy expectations. Sharing of such content may lead to multi-user privacy conflicts. Decision-making mechanisms are crucial to managing conflicting privacy preferences among users, reducing their effort in conflict resolution. Various mechanisms are proposed in the literature, most of which demand significant computational resources. We propose a novel approach based on computational modeling of Theory of Mind (ToM), the human ability to understand others’ mental states (e.g., their beliefs, preferences, goals, etc.), to portray users’ privacy expectations. We argue that leveraging computational ToM modeling allows the design of agents capable of accurately capturing users’ behavior and reasoning about other agents’ privacy understanding, making them effective tools in multi-user privacy conflict management. To illustrate our ideas, we consider a content-sharing scenario and point out potential benefits of using our agent-based computational ToM approach in resolution of privacy conflicts.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Dobbe R; Wolters A
Toward Sociotechnical AI: Mapping Vulnerabilities for Machine Learning in Context Journal Article
In: Minds Mach., vol. 34, no. 2, pp. 12, 2024.
@article{DBLP:journals/mima/DobbeW24,
title = {Toward Sociotechnical AI: Mapping Vulnerabilities for Machine Learning
in Context},
author = {Roel Dobbe and
Anouk Wolters},
url = {https://doi.org/10.1007/s11023-024-09668-y},
doi = {10.1007/S11023-024-09668-Y},
year = {2024},
date = {2024-01-01},
journal = {Minds Mach.},
volume = {34},
number = {2},
pages = {12},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Amin S; Renes S; Dobbe R
Designing organizational control mechanisms for consequential AI systems: towards a situated methodology (poster) Proceedings Article
In: Ubacht, Jolien; Crompvoets, Joep; Csáki, Csaba; Danneels, Lieselot; Janssen, Marijn; Johannessen, Marius Rohde; Lampoltshammer, Thomas J.; Lee, Habin; Lindgren, Ida; Hofmann, Sara; Parycek, Peter; Pereira, Gabriela Viale; Schwabe, Gerhard; Susha, Iryna; Tambouris, Efthimios; Zuiderwijk, Anneke (Ed.): Proceedings of Ongoing Research, Practitioners, Posters, Workshops, and Projects of the International Conference EGOV-CeDEM-ePart 2024, Ghent University and KU Leuven, Ghent/Leuven, Belgium, September 1-5, 2024, CEUR-WS.org, 2024.
@inproceedings{DBLP:conf/egov/AminRD24,
title = {Designing organizational control mechanisms for consequential AI
systems: towards a situated methodology (poster)},
author = {Shan Amin and
Sander Renes and
Roel Dobbe},
editor = {Jolien Ubacht and
Joep Crompvoets and
Csaba Csáki and
Lieselot Danneels and
Marijn Janssen and
Marius Rohde Johannessen and
Thomas J. Lampoltshammer and
Habin Lee and
Ida Lindgren and
Sara Hofmann and
Peter Parycek and
Gabriela Viale Pereira and
Gerhard Schwabe and
Iryna Susha and
Efthimios Tambouris and
Anneke Zuiderwijk},
url = {https://ceur-ws.org/Vol-3737/paper52.pdf},
year = {2024},
date = {2024-01-01},
booktitle = {Proceedings of Ongoing Research, Practitioners, Posters, Workshops,
and Projects of the International Conference EGOV-CeDEM-ePart 2024,
Ghent University and KU Leuven, Ghent/Leuven, Belgium, September
1-5, 2024},
volume = {3737},
publisher = {CEUR-WS.org},
series = {CEUR Workshop Proceedings},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lindström A D; Methnani L; Krause L; Ericson P; de Rituerto de Troya Í M; Mollo D C; Dobbe R
AI Alignment through Reinforcement Learning from Human Feedback? Contradictions and Limitations Journal Article
In: CoRR, vol. abs/2406.18346, 2024.
@article{DBLP:journals/corr/abs-2406-18346,
title = {AI Alignment through Reinforcement Learning from Human Feedback?
Contradictions and Limitations},
author = {Adam Dahlgren Lindström and
Leila Methnani and
Lea Krause and
Petter Ericson and
{Í}{ñ}igo Martinez de Rituerto de Troya and
Dimitri Coelho Mollo and
Roel Dobbe},
url = {https://doi.org/10.48550/arXiv.2406.18346},
doi = {10.48550/ARXIV.2406.18346},
year = {2024},
date = {2024-01-01},
journal = {CoRR},
volume = {abs/2406.18346},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Delfos J; Eijk A Z; van Cranenburgh S; Chorus C G; Dobbe R I J
Integral system safety for machine learning in the public sector: An empirical account Journal Article
In: Gov. Inf. Q., vol. 41, no. 3, pp. 101963, 2024.
@article{DBLP:journals/giq/DelfosECCD24,
title = {Integral system safety for machine learning in the public sector:
An empirical account},
author = {Jeroen Delfos and
Anneke Zuiderwijk{-}van Eijk and
Sander van Cranenburgh and
Caspar G. Chorus and
Roel I. J. Dobbe},
url = {https://doi.org/10.1016/j.giq.2024.101963},
doi = {10.1016/J.GIQ.2024.101963},
year = {2024},
date = {2024-01-01},
journal = {Gov. Inf. Q.},
volume = {41},
number = {3},
pages = {101963},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
van der Meer M
Facilitating Opinion Diversity through Hybrid NLP Approaches Proceedings Article
In: Cao, Yang (Trista); Papadimitriou, Isabel; Ovalle, Anaelia; Zampieri, Marcos; Ferraro, Francis; Swayamdipta, Swabha (Ed.): Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies: Student Research Workshop, NAACL 2024, Mexico City, Mexico, June 18, 2024, pp. 272–284, Association for Computational Linguistics, 2024.
@inproceedings{DBLP:conf/naacl/Meer24,
title = {Facilitating Opinion Diversity through Hybrid NLP Approaches},
author = {Michiel van der Meer},
editor = {Yang (Trista) Cao and
Isabel Papadimitriou and
Anaelia Ovalle and
Marcos Zampieri and
Francis Ferraro and
Swabha Swayamdipta},
url = {https://doi.org/10.18653/v1/2024.naacl-srw.29},
doi = {10.18653/V1/2024.NAACL-SRW.29},
year = {2024},
date = {2024-01-01},
booktitle = {Proceedings of the 2024 Conference of the North American Chapter of
the Association for Computational Linguistics: Human Language Technologies:
Student Research Workshop, NAACL 2024, Mexico City, Mexico, June
18, 2024},
pages = {272–284},
publisher = {Association for Computational Linguistics},
abstract = {Modern democracies face a critical issue of declining citizen participation in decision-making. Online discussion forums are an important avenue for enhancing citizen participation. This thesis proposal 1) identifies the challenges involved in facilitating large-scale online discussions with Natural Language Processing (NLP), 2) suggests solutions to these challenges by incorporating hybrid human-AI technologies, and 3) investigates what these technologies can reveal about individual perspectives in online discussions. We propose a three-layered hierarchy for representing perspectives that can be obtained by a mixture of human intelligence and large language models. We illustrate how these representations can draw insights into the diversity of perspectives and allow us to investigate interactions in online discussions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
van der Meer M; Vossen P; Jonker C M; Murukannaiah P K
Value-Sensitive Disagreement Analysis for Online Deliberation Proceedings Article
In: Lorig, Fabian; Tucker, Jason; Lindström, Adam Dahlgren; Dignum, Frank; Murukannaiah, Pradeep K.; Theodorou, Andreas; Yolum, Pinar (Ed.): HHAI 2024: Hybrid Human AI Systems for the Social Good - Proceedings of the Third International Conference on Hybrid Human-Artificial Intelligence, Malmö, Sweden, 10-14 June 2024, pp. 481–484, IOS Press, 2024.
@inproceedings{DBLP:conf/hhai/MeerVJM24,
title = {Value-Sensitive Disagreement Analysis for Online Deliberation},
author = {Michiel van der Meer and
Piek Vossen and
Catholijn M. Jonker and
Pradeep K. Murukannaiah},
editor = {Fabian Lorig and
Jason Tucker and
Adam Dahlgren Lindström and
Frank Dignum and
Pradeep K. Murukannaiah and
Andreas Theodorou and
Pinar Yolum},
url = {https://doi.org/10.3233/FAIA240231},
doi = {10.3233/FAIA240231},
year = {2024},
date = {2024-01-01},
booktitle = {HHAI 2024: Hybrid Human AI Systems for the Social Good - Proceedings
of the Third International Conference on Hybrid Human-Artificial Intelligence,
Malmö, Sweden, 10-14 June 2024},
volume = {386},
pages = {481–484},
publisher = {IOS Press},
series = {Frontiers in Artificial Intelligence and Applications},
abstract = {Disagreements are common in online societal deliberation and may be crucial for effective collaboration, for instance in helping users understand opposing viewpoints. Although there exist automated methods for recognizing disagreement, a deeper understanding of factors that influence disagreement is currently missing. We investigate a hypothesis that differences in personal values influence disagreement in online discussions. Using Large Language Models (LLMs) for estimating both profiles of personal values and disagreement, we conduct a large-scale experiment involving 11.4M user comments. We find that the dissimilarity of value profiles correlates with disagreement only in specific cases, but that incorporating self-reported value profiles changes these results to be more undecided.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
van der Meer M; Vossen P; Jonker C M; Murukannaiah P K
An Empirical Analysis of Diversity in Argument Summarization Proceedings Article
In: Graham, Yvette; Purver, Matthew (Ed.): Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics, EACL 2024 - Volume 1: Long Papers, St. Julian's, Malta, March 17-22, 2024, pp. 2028–2045, Association for Computational Linguistics, 2024.
@inproceedings{DBLP:conf/eacl/MeerVJM24,
title = {An Empirical Analysis of Diversity in Argument Summarization},
author = {Michiel van der Meer and
Piek Vossen and
Catholijn M. Jonker and
Pradeep K. Murukannaiah},
editor = {Yvette Graham and
Matthew Purver},
url = {https://aclanthology.org/2024.eacl-long.123},
year = {2024},
date = {2024-01-01},
booktitle = {Proceedings of the 18th Conference of the European Chapter of the
Association for Computational Linguistics, EACL 2024 - Volume 1:
Long Papers, St. Julian's, Malta, March 17-22, 2024},
pages = {2028–2045},
publisher = {Association for Computational Linguistics},
abstract = {Presenting high-level arguments is a crucial task for fostering participation in online societal discussions. Current argument summarization approaches miss an important facet of this task—capturing textitdiversity—which is important for accommodating multiple perspectives. We introduce three aspects of diversity: those of opinions, annotators, and sources. We evaluate approaches to a popular argument summarization task called Key Point Analysis, which shows how these approaches struggle to (1) represent arguments shared by few people, (2) deal with data from various sources, and (3) align with subjectivity in human-provided annotations. We find that both general-purpose LLMs and dedicated KPA models exhibit this behavior, but have complementary strengths. Further, we observe that diversification of training data may ameliorate generalization in zero-shot cases. Addressing diversity in argument summarization requires a mix of strategies to deal with subjectivity.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
van der Meer M; Falk N; Murukannaiah P K; Liscio E
Annotator-Centric Active Learning for Subjective NLP Tasks Proceedings Article
In: Al-Onaizan, Yaser; Bansal, Mohit; Chen, Yun-Nung (Ed.): Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, EMNLP 2024, Miami, FL, USA, November 12-16, 2024, pp. 18537–18555, Association for Computational Linguistics, 2024.
@inproceedings{DBLP:conf/emnlp/MeerFML24,
title = {Annotator-Centric Active Learning for Subjective NLP Tasks},
author = {Michiel van der Meer and
Neele Falk and
Pradeep K. Murukannaiah and
Enrico Liscio},
editor = {Yaser Al{-}Onaizan and
Mohit Bansal and
Yun{-}Nung Chen},
url = {https://aclanthology.org/2024.emnlp-main.1031},
year = {2024},
date = {2024-01-01},
booktitle = {Proceedings of the 2024 Conference on Empirical Methods in Natural
Language Processing, EMNLP 2024, Miami, FL, USA, November 12-16,
2024},
pages = {18537–18555},
publisher = {Association for Computational Linguistics},
abstract = {Active Learning (AL) addresses the high costs of collecting human annotations by strategically annotating the most informative samples. However, for subjective NLP tasks, incorporating a wide range of perspectives in the annotation process is crucial to capture the variability in human judgments. We introduce Annotator-Centric Active Learning (ACAL), which incorporates an annotator selection strategy following data sampling. Our objective is two-fold: (1) to efficiently approximate the full diversity of human judgments, and (2) to assess model performance using annotator-centric metrics, which value minority and majority perspectives equally. We experiment with multiple annotator selection strategies across seven subjective NLP tasks, employing both traditional and novel, human-centered evaluation metrics. Our findings indicate that ACAL improves data efficiency and excels in annotator-centric performance evaluations. However, its success depends on the availability of a sufficiently large and diverse pool of annotators to sample from.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
van der Meer M; Liscio E; Jonker C M; Plaat A; Vossen P; Murukannaiah P K
A Hybrid Intelligence Method for Argument Mining Journal Article
In: J. Artif. Intell. Res., vol. 80, pp. 1187–1222, 2024.
@article{DBLP:journals/jair/MeerLJPVM24,
title = {A Hybrid Intelligence Method for Argument Mining},
author = {Michiel van der Meer and
Enrico Liscio and
Catholijn M. Jonker and
Aske Plaat and
Piek Vossen and
Pradeep K. Murukannaiah},
url = {https://doi.org/10.1613/jair.1.15135},
doi = {10.1613/JAIR.1.15135},
year = {2024},
date = {2024-01-01},
journal = {J. Artif. Intell. Res.},
volume = {80},
pages = {1187–1222},
abstract = {Large-scale survey tools enable the collection of citizen feedback in opinion corpora. Extracting the key arguments from a large and noisy set of opinions helps in understanding the opinions quickly and accurately. Fully automated methods can extract arguments but (1) require large labeled datasets that induce large annotation costs and (2) work well for known viewpoints, but not for novel points of view. We propose HyEnA, a hybrid (human + AI) method for extracting arguments from opinionated texts, combining the speed of automated processing with the understanding and reasoning capabilities of humans. We evaluate HyEnA on three citizen feedback corpora. We find that, on the one hand, HyEnA achieves higher coverage and precision than a state-of-the-art automated method when compared to a common set of diverse opinions, justifying the need for human insight. On the other hand, HyEnA requires less human effort and does not compromise quality compared to (fully manual) expert analysis, demonstrating the benefit of combining human and artificial intelligence.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Steging C; van Leeuwen L
A hybrid approach to legal textual entailment Proceedings Article
In: Eighteenth International Workshop on Juris-Informatics (JURISIN 2024), Hamamatsu, Japan, 2024.
@inproceedings{StegingJURISIN2024,
title = {A hybrid approach to legal textual entailment},
author = {Steging, C. and
{van Leeuwen}, L.},
url = {https://jurisinformaticscenter.github.io/jurisin2024/jurisin2024local_proceedings.pdf},
year = {2024},
date = {2024-01-01},
booktitle = {Eighteenth International Workshop on Juris-Informatics (JURISIN 2024)},
address = {Hamamatsu, Japan},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Top J D; Jonker C; Verbrugge R; de Weerd H
Predictive Theory of Mind Models Based on Public Announcement Logic Proceedings Article
In: Gierasimczuk, Nina; Velázquez-Quesada, Fernando R. (Ed.): Dynamic Logic. New Trends and Applications, pp. 85–103, Springer Nature Switzerland, Cham, 2024, ISBN: 978-3-031-51777-8.
@inproceedings{10.1007/978-3-031-51777-8_6,
title = {Predictive Theory of Mind Models Based on Public Announcement Logic},
author = {Top, Jakob Dirk
and Jonker, Catholijn
and Verbrugge, Rineke
and de Weerd, Harmen},
editor = {Gierasimczuk, Nina
and Velázquez-Quesada, Fernando R.},
isbn = {978-3-031-51777-8},
year = {2024},
date = {2024-01-01},
booktitle = {Dynamic Logic. New Trends and Applications},
pages = {85–103},
publisher = {Springer Nature Switzerland},
address = {Cham},
abstract = {Epistemic logic can be used to reason about statements such as `I know that you know that I know that $$backslashvarphi $$φ'. In this logic, and its extensions, it is commonly assumed that agents can reason about epistemic statements of arbitrary nesting depth. In contrast, empirical findings on Theory of Mind, the ability to (recursively) reason about mental states of others, show that human recursive reasoning capability has an upper bound.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zafarghandi A K; Qi J; Hollink L; Sang E T K; Ceolin D
Investigating the Usefulness of Product Reviews Through Bipolar Argumentation Frameworks Proceedings Article
In: ICWE, Springer, 2024.
@inproceedings{DBLP:conf/icwe/ZafarghandiQHSC24,
title = {Investigating the Usefulness of Product Reviews Through Bipolar Argumentation Frameworks},
author = {Atefeh {Keshavarzi Zafarghandi} and Ji Qi and Laura Hollink and Erik Tjong Kim Sang and Davide Ceolin},
year = {2024},
date = {2024-01-01},
booktitle = {ICWE},
publisher = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zafarghandi A K; Wallner J P
Complexity of Semi-Stable Semantics in Abstract Dialectical Frameworks Proceedings Article
In: Proceedings of the 5th International Conference on Computational Models of Argument (COMMA 2024), 2024.
@inproceedings{ZafarghandiW24,
title = {Complexity of Semi-Stable Semantics in Abstract Dialectical Frameworks},
author = {Atefeh {Keshavarzi Zafarghandi} and Johannes Peter Wallner},
year = {2024},
date = {2024-01-01},
booktitle = {Proceedings of the 5th International Conference on Computational Models of Argument (COMMA 2024)},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Keshavarzi A; Koopmann P
Using ADFs for Inconsistency-Tolerant Query Answering with Existential Rules Proceedings Article
In: 22th International Workshop on Non-Monotonic Reasoning (NMR), 2024.
@inproceedings{DBLP:NMR:ZafarghandiK24,
title = {Using ADFs for Inconsistency-Tolerant Query Answering with Existential Rules},
author = {Atefeh {Keshavarzi} and Patrick Koopmann},
year = {2024},
date = {2024-01-01},
booktitle = {22th International Workshop on Non-Monotonic Reasoning (NMR)},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lera-Leri R X; Liscio E; Bistaffa F; Jonker C M; Lopez-Sanchez M; Murukannaiah P K; Rodriguez-Aguilar J A; Salas-Molina F
Aggregating value systems for decision support Journal Article
In: Knowledge-Based Systems, vol. 287, pp. 111453, 2024.
@article{LERALERI2024111453,
title = {Aggregating value systems for decision support},
author = {Roger X. Lera-Leri and Enrico Liscio and Filippo Bistaffa and Catholijn M. Jonker
and Maite Lopez-Sanchez and Pradeep K. Murukannaiah and Juan A. Rodriguez-Aguilar
and Francisco Salas-Molina},
url = {https://www.sciencedirect.com/science/article/pii/S0950705124000881},
year = {2024},
date = {2024-01-01},
journal = {Knowledge-Based Systems},
volume = {287},
pages = {111453},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Park J; Liscio E; Murukannaiah P
Morality is Non-Binary: Building a Pluralist Moral Sentence Embedding Space using Contrastive Learning Proceedings Article
In: Findings of the Association for Computational Linguistics: EACL 2024, pp. 654–673, Association for Computational Linguistics, St. Julian's, Malta, 2024.
@inproceedings{park-etal-2024-morality,
title = {Morality is Non-Binary: Building a Pluralist Moral Sentence Embedding Space using Contrastive Learning},
author = {Park, Jeongwoo and Liscio, Enrico and Murukannaiah, Pradeep},
url = {https://aclanthology.org/2024.findings-eacl.45},
year = {2024},
date = {2024-01-01},
booktitle = {Findings of the Association for Computational Linguistics: EACL 2024},
pages = {654–673},
publisher = {Association for Computational Linguistics},
address = {St. Julian's, Malta},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Javdani Rikhtehgar D; Tiddi I; Wang S; Schlobach S; Heylen D
Assessing the HI-ness of Virtual Heritage Applications with Knowledge Engineering Book Section
In: HHAI 2024: Hybrid Human AI Systems for the Social Good, pp. 173–187, IOS Press, 2024.
@incollection{javdani2024assessing,
title = {Assessing the HI-ness of Virtual Heritage Applications with Knowledge Engineering},
author = {Javdani Rikhtehgar, Delaram and Tiddi, Ilaria and Wang, Shenghui and Schlobach, Stefan and Heylen, Dirk},
year = {2024},
date = {2024-01-01},
booktitle = {HHAI 2024: Hybrid Human AI Systems for the Social Good},
pages = {173–187},
publisher = {IOS Press},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Jorge C C; van Zoelen E M; Verhagen R; Mehrotra S; Jonker C M; Tielman M L
Appropriate context-dependent artificial trust in human-machine teamwork Book Section
In: Putting AI in the Critical Loop, pp. 41–60, Elsevier, 2024.
@incollection{jorge2024appropriate,
title = {Appropriate context-dependent artificial trust in human-machine teamwork},
author = {Jorge, Carolina Centeio and van Zoelen, Emma M and Verhagen, Ruben and Mehrotra, Siddharth and Jonker, Catholijn M and Tielman, Myrthe L},
year = {2024},
date = {2024-01-01},
booktitle = {Putting AI in the Critical Loop},
pages = {41–60},
publisher = {Elsevier},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Aydın H
Assisting Users in Privacy Conflicts with Partially Observable Multi-Agent Reinforcement Learning Book Section
In: HHAI 2024: Hybrid Human AI Systems for the Social Good, pp. 55–63, IOS Press, 2024.
@incollection{aydin2024assisting,
title = {Assisting Users in Privacy Conflicts with Partially Observable Multi-Agent Reinforcement Learning},
author = {Ayd{ı}n, Hüseyin},
year = {2024},
date = {2024-01-01},
booktitle = {HHAI 2024: Hybrid Human AI Systems for the Social Good},
pages = {55–63},
publisher = {IOS Press},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Otten M; Jagesar A R; Dam T A; Biesheuvel L A; den Hengst F; Ziesemer K A; Thoral P J; de Grooth H; Girbes A R; François-Lavet V; others
Does reinforcement learning improve outcomes for critically ill patients? A systematic review and level-of-readiness assessment Journal Article
In: Critical Care Medicine, vol. 52, no. 2, pp. e79–e88, 2024.
@article{otten2024does,
title = {Does reinforcement learning improve outcomes for critically ill patients? A systematic review and level-of-readiness assessment},
author = {Otten, Martijn and Jagesar, Ameet R and Dam, Tariq A and Biesheuvel, Laurens A and den Hengst, Floris and Ziesemer, Kirsten A and Thoral, Patrick J and de Grooth, Harm-Jan and Girbes, Armand RJ and Fran{ç}ois-Lavet, Vincent and others},
year = {2024},
date = {2024-01-01},
journal = {Critical Care Medicine},
volume = {52},
number = {2},
pages = {e79–e88},
publisher = {LWW},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hengst F; Wolter R; Altmeyer P; Kaygan A
Conformal Intent Classification and Clarification for Fast and Accurate Intent Recognition Proceedings Article
In: Findings of the Association for Computational Linguistics: NAACL 2024, pp. 2412–2432, 2024.
@inproceedings{hengst2024conformal,
title = {Conformal Intent Classification and Clarification for Fast and Accurate Intent Recognition},
author = {Hengst, Floris and Wolter, Ralf and Altmeyer, Patrick and Kaygan, Arda},
year = {2024},
date = {2024-01-01},
booktitle = {Findings of the Association for Computational Linguistics: NAACL 2024},
pages = {2412–2432},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen P; Baez Santamaria S; de Boer M H; den Hengst F; Kamphorst B A; Smit Q; Wang S; Wolff J
Intelligent Support Systems for Lifestyle Change: Integrating Dialogue, Information Extraction, and Reasoning Book Section
In: HHAI 2024: Hybrid Human AI Systems for the Social Good, pp. 457–459, IOS Press, 2024.
@incollection{chen2024intelligent,
title = {Intelligent Support Systems for Lifestyle Change: Integrating Dialogue, Information Extraction, and Reasoning},
author = {Chen, Pei-Yu and Baez Santamaria, Selene and de Boer, Maaike HT and den Hengst, Floris and Kamphorst, Bart A and Smit, Quirine and Wang, Shihan and Wolff, Johanna},
year = {2024},
date = {2024-01-01},
booktitle = {HHAI 2024: Hybrid Human AI Systems for the Social Good},
pages = {457–459},
publisher = {IOS Press},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Zhang J E; Hilpert B; Broekens J; Jokinen J P
Simulating Emotions With an Integrated Computational Model of Appraisal and Reinforcement Learning Proceedings Article
In: Proceedings of the CHI Conference on Human Factors in Computing Systems, pp. 1–12, 2024.
@inproceedings{zhang2024simulating,
title = {Simulating Emotions With an Integrated Computational Model of Appraisal and
Reinforcement Learning},
author = {Zhang, Jiayi Eurus and Hilpert, Bernhard and Broekens, Joost and Jokinen, Jussi
PP},
year = {2024},
date = {2024-01-01},
booktitle = {Proceedings of the CHI Conference on Human Factors in Computing Systems},
pages = {1–12},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}