Apeiron A S; Dell’Anna D; Murukannaiah P K; Yolum P
Model and Mechanisms of Consent for Responsible Autonomy Proceedings Article
In: 24th International Conference on Autonomous Agents and Multiagent Systems, 2025.
@inproceedings{apeiron2025model,
title = {Model and Mechanisms of Consent for Responsible Autonomy},
author = {Apeiron, Anastasia S. and Dell’Anna, Davide and Murukannaiah, Pradeep K. and Yolum, P{ı}nar},
year = {2025},
date = {2025-01-01},
booktitle = {24th International Conference on Autonomous Agents and Multiagent Systems},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Dobbe R; Wolters A
Toward Sociotechnical AI: Mapping Vulnerabilities for Machine Learning in Context Journal Article
In: Minds and Machines, vol. 34, no. 2, pp. 12, 2024, ISSN: 1572-8641.
@article{dobbe_toward_2024,
title = {Toward Sociotechnical AI: Mapping Vulnerabilities for Machine Learning in Context},
author = {Dobbe, Roel and Wolters, Anouk},
url = {https://doi.org/10.1007/s11023-024-09668-y},
doi = {10.1007/s11023-024-09668-y},
issn = {1572-8641},
year = {2024},
date = {2024-05-01},
urldate = {2024-05-30},
journal = {Minds and Machines},
volume = {34},
number = {2},
pages = {12},
abstract = {This paper provides an empirical and conceptual account on seeing machine learning models as part of a sociotechnical system to identify relevant vulnerabilities emerging in the context of use. As ML is increasingly adopted in socially sensitive and safety-critical domains, many ML applications end up not delivering on their promises, and contributing to new forms of algorithmic harm. There is still a lack of empirical insights as well as conceptual tools and frameworks to properly understand and design for the impact of ML models in their sociotechnical context. In this paper, we follow a design science research approach to work towards such insights and tools. We center our study in the financial industry, where we first empirically map recently emerging MLOps practices to govern ML applications, and corroborate our insights with recent literature. We then perform an integrative literature research to identify a long list of vulnerabilities that emerge in the sociotechnical context of ML applications, and we theorize these along eight dimensions. We then perform semi-structured interviews in two real-world use cases and across a broad set of relevant actors and organizations, to validate the conceptual dimensions and identify challenges to address sociotechnical vulnerabilities in the design and governance of ML-based systems. The paper proposes a set of guidelines to proactively and integrally address both the dimensions of sociotechnical vulnerability, as well as the challenges identified in the empirical use case research, in the organization of MLOps practices.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Reimann M M; Kunneman F A; Oertel C; Hindriks K V
A Survey on Dialogue Management in Human-Robot Interaction Journal Article
In: J. Hum.-Robot Interact., 2024, (Just Accepted).
@article{10.1145/3648605,
title = {A Survey on Dialogue Management in Human-Robot Interaction},
author = {Reimann, Merle M. and Kunneman, Florian A. and Oertel, Catharine and Hindriks, Koen V.},
url = {https://doi.org/10.1145/3648605},
doi = {10.1145/3648605},
year = {2024},
date = {2024-03-01},
journal = {J. Hum.-Robot Interact.},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {As social robots see increasing deployment within the general public, improving the interaction with those robots is essential. Spoken language offers an intuitive interface for the human-robot interaction (HRI), with dialogue management (DM) being a key component in those interactive systems. Yet, to overcome current challenges and manage smooth, informative and engaging interaction a more structural approach to combining HRI and DM is needed. In this systematic review, we analyse the current use of DM in HRI and focus on the type of dialogue manager used, its capabilities, evaluation methods and the challenges specific to DM in HRI. We identify the challenges and current scientific frontier related to the DM approach, interaction domain, robot appearance, physical situatedness and multimodality.},
note = {Just Accepted},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
den Hengst F; Otten M; Elbers P; van Harmelen F; François-Lavet V; Hoogendoorn M
Guideline-informed reinforcement learning for mechanical ventilation in critical care Journal Article
In: Artificial Intelligence in Medicine, vol. 147, pp. 102742, 2024, ISSN: 0933-3657.
@article{DENHENGST2024102742,
title = {Guideline-informed reinforcement learning for mechanical ventilation in critical care},
author = {Floris {den Hengst} and Martijn Otten and Paul Elbers and Frank {van Harmelen} and Vincent François-Lavet and Mark Hoogendoorn},
url = {https://www.sciencedirect.com/science/article/pii/S0933365723002567},
doi = {https://doi.org/10.1016/j.artmed.2023.102742},
issn = {0933-3657},
year = {2024},
date = {2024-01-01},
journal = {Artificial Intelligence in Medicine},
volume = {147},
pages = {102742},
abstract = {Reinforcement Learning (RL) has recently found many applications in the healthcare domain thanks to its natural fit to clinical decision-making and ability to learn optimal decisions from observational data. A key challenge in adopting RL-based solution in clinical practice, however, is the inclusion of existing knowledge in learning a suitable solution. Existing knowledge from e.g. medical guidelines may improve the safety of solutions, produce a better balance between short- and long-term outcomes for patients and increase trust and adoption by clinicians. We present a framework for including knowledge available from medical guidelines in RL. The framework includes components for enforcing safety constraints and an approach that alters the learning signal to better balance short- and long-term outcomes based on these guidelines. We evaluate the framework by extending an existing RL-based mechanical ventilation (MV) approach with clinically established ventilation guidelines. Results from off-policy policy evaluation indicate that our approach has the potential to decrease 90-day mortality while ensuring lung protective ventilation. This framework provides an important stepping stone towards implementations of RL in clinical practice and opens up several avenues for further research.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Reimann M; van de Graaf J; van Gulik N; van de Sanden S; Verhagen T; Hindriks K
Social Robots in the Wild and the Novelty Effect Proceedings Article
In: Ali, Abdulaziz Al; Cabibihan, John-John; Meskin, Nader; Rossi, Silvia; Jiang, Wanyue; He, Hongsheng; Ge, Shuzhi Sam (Ed.): Social Robotics, pp. 38–48, Springer Nature Singapore, Singapore, 2024, ISBN: 978-981-99-8718-4.
@inproceedings{10.1007/978-981-99-8718-4_4,
title = {Social Robots in the Wild and the Novelty Effect},
author = {Reimann, Merle
and van de Graaf, Jesper
and van Gulik, Nina
and van de Sanden, Stephanie
and Verhagen, Tibert
and Hindriks, Koen},
editor = {Ali, Abdulaziz Al
and Cabibihan, John-John
and Meskin, Nader
and Rossi, Silvia
and Jiang, Wanyue
and He, Hongsheng
and Ge, Shuzhi Sam},
url = {https://rdcu.be/dtXE5},
isbn = {978-981-99-8718-4},
year = {2024},
date = {2024-01-01},
booktitle = {Social Robotics},
pages = {38–48},
publisher = {Springer Nature Singapore},
address = {Singapore},
abstract = {We designed a wine recommendation robot and deployed it in a small supermarket. In a study aimed to evaluate our design we found that people with no intent to buy wine were interacting with the robot rather than the intended audience of wine-buying customers. Behavioural data, moreover, suggests a very different evaluation of the robot than the surveys that were completed. We also found that groups were interacting more with the robot than individuals, a finding that has been reported more often in the literature. All of these findings taken together suggest that a novelty effect may have been at play. It also suggests that field studies should take this effect more seriously. The main contribution of our work is in identifying and proposing a set of indicators and thresholds that can be used to identify that a novelty effect is present. We argue that it is important to focus more on measuring attitudes towards robots that may explain behaviour due to novelty effects. Our findings also suggest research should focus more on verifying whether real user needs are met."},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhu C; Dastani M; Wang S
A survey of multi-agent deep reinforcement learning with communication Journal Article
In: Autonomous Agents and Multi-Agent Systems, vol. 38, no. 1, pp. 4, 2024.
@article{zhu2024survey,
title = {A survey of multi-agent deep reinforcement learning with communication},
author = {Zhu, Changxi and Dastani, Mehdi and Wang, Shihan},
doi = {https://doi.org/10.1007/s10458-023-09633-6},
year = {2024},
date = {2024-01-01},
journal = {Autonomous Agents and Multi-Agent Systems},
volume = {38},
number = {1},
pages = {4},
publisher = {Springer},
abstract = {Communication is an effective mechanism for coordinating the behaviors of multiple agents, broadening their views of the environment, and to support their collaborations. In the field of multi-agent deep reinforcement learning (MADRL), agents can improve the overall learning performance and achieve their objectives by communication. Agents can communicate various types of messages, either to all agents or to specific agent groups, or conditioned on specific constraints. With the growing body of research work in MADRL with communication (Comm-MADRL), there is a lack of a systematic and structural approach to distinguish and classify existing Comm-MADRL approaches. In this paper, we survey recent works in the Comm-MADRL field and consider various aspects of communication that can play a role in designing and developing multi-agent reinforcement learning systems. With these aspects in mind, we propose 9 dimensions along which Comm-MADRL approaches can be analyzed, developed, and compared. By projecting existing works into the multi-dimensional space, we discover interesting trends. We also propose some novel directions for designing future Comm-MADRL systems through exploring possible combinations of the dimensions.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Dudzik B J W; van der Waa J S; Chen P; Dobbe R; de Troya Í M D R; Bakker R M; de Boer M H T; Smit Q T S; Dell'Anna D; Erdogan E; Yolum P; Wang S; Baez Santamaria S; Krause L; Kamphorst B A
Viewpoint: Hybrid Intelligence Supports Application Development for Diabetes Lifestyle Management Journal Article
In: Journal of Artificial Intelligence Research, vol. 80, pp. 919–929, 2024.
@article{dudzik2024,
title = {Viewpoint: Hybrid Intelligence Supports Application Development for Diabetes Lifestyle Management},
author = {Dudzik, Bernd J. W. and van der Waa, Jasper S. and Chen, Pei-Yu and Dobbe, Roel and de Troya, Íñigo M.D.R. and Bakker, Roos M. and de Boer, Maaike H. T. and Smit, Quirine T.S. and Dell'Anna, Davide and Erdogan, Emre and Yolum, Pinar and Wang, Shihan and Baez Santamaria, Selene and Krause, Lea and Kamphorst, Bart A.},
year = {2024},
date = {2024-01-01},
journal = {Journal of Artificial Intelligence Research},
volume = {80},
pages = {919–929},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Jorge C C; van Zoelen E M; Verhagen R; Mehrotra S; Jonker C M; Tielman M L
In: Dasgupta, Prithviraj; Llinas, James; Gillespie, Tony; Fouse, Scott; Lawless, William; Mittu, Ranjeev; Sofge, Donald (Ed.): Putting AI in the Critical Loop, pp. 41-60, Academic Press, 2024, ISBN: 978-0-443-15988-6.
@incollection{CENTEIOJORGE202441,
title = {4 - Appropriate context-dependent artificial trust in human-machine teamwork⁎⁎This document is the result of the research project funded by AI*MAN lab from TU Delft AI Initiative.},
author = {Carolina {Centeio Jorge} and Emma M. {van Zoelen} and Ruben Verhagen and Siddharth Mehrotra and Catholijn M. Jonker and Myrthe L. Tielman},
editor = {Prithviraj Dasgupta and James Llinas and Tony Gillespie and Scott Fouse and William Lawless and Ranjeev Mittu and Donald Sofge},
url = {https://www.sciencedirect.com/science/article/pii/B9780443159886000078},
doi = {https://doi.org/10.1016/B978-0-443-15988-6.00007-8},
isbn = {978-0-443-15988-6},
year = {2024},
date = {2024-01-01},
booktitle = {Putting AI in the Critical Loop},
pages = {41-60},
publisher = {Academic Press},
abstract = {As human-machine teams become a more common scenario, we need to ensure mutual trust between humans and machines. More important than having trust, we need all teammates to trust each other appropriately. This means that they should not overtrust or undertrust each other, avoiding risks and inefficiencies, respectively. We usually think of natural trust, that is, humans trusting machines, but we should also consider artificial trust, that is, artificial agents trusting humans. Appropriate artificial trust allows the agents to interpret human behavior and predict their behavior in a certain context. In this chapter, we explore how we can define this context in terms of task and team characteristics. We present a taxonomy that shows how trust is context-dependent. In fact, we propose that no trust model presented in the literature fits all contexts and argue that our taxonomy facilitates the choice of the trust model that better fits a certain context. The taxonomy helps to understand which internal characteristics of the teammate (krypta) are important to consider and how they will show in behavior cues (manifesta). This taxonomy can also be used to help human-machine teams’ researchers in the problem definition and process of experimental design as it allows a detailed characterization of the task and team configuration. Furthermore, we propose a formalization of the belief of trust as context-dependent trustworthiness, and show how beliefs of trust can be used to reach appropriate trust. Our work provides a starting point to implement mutual appropriate trust in human-machine teams.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Degachi C; Mehrotra S; Yurrita M; Niforatos E; Tielman M L
Practising Appropriate Trust in Human-Centred AI Design Proceedings Article
In: Extended Abstracts of the 2024 CHI Conference on Human Factors in Computing Systems, Association for Computing Machinery, New York, NY, USA, 2024, ISBN: 9798400703317.
@inproceedings{10.1145/3613905.3650825,
title = {Practising Appropriate Trust in Human-Centred AI Design},
author = {Degachi, Chadha and Mehrotra, Siddharth and Yurrita, Mireia and Niforatos, Evangelos and Tielman, Myrthe Lotte},
url = {https://doi.org/10.1145/3613905.3650825},
doi = {10.1145/3613905.3650825},
isbn = {9798400703317},
year = {2024},
date = {2024-01-01},
booktitle = {Extended Abstracts of the 2024 CHI Conference on Human Factors in Computing Systems},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {CHI EA '24},
abstract = {Appropriate trust, trust which aligns with system trustworthiness, in Artificial Intelligence (AI) systems has become an important area of research. However, there remains debate in the community about how to design for appropriate trust. This debate is a result of the complex nature of trust in AI, which can be difficult to understand and evaluate, as well as the lack of holistic approaches to trust. In this paper, we aim to clarify some of this debate by operationalising appropriate trust within the context of the Human-Centred AI Design (HCD) process. To do so, we organised three workshops with 13 participants total from design and development backgrounds. We carried out design activities to stimulate discussion on appropriate trust in the HCD process. This paper aims to help researchers and practitioners understand appropriate trust in AI through a design lens by illustrating how it interacts with the HCD process.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mehrotra S; Jorge C C; Jonker C M; Tielman M L
Integrity-based Explanations for Fostering Appropriate Trust in AI Agents Journal Article
In: ACM Trans. Interact. Intell. Syst., vol. 14, no. 1, 2024, ISSN: 2160-6455.
@article{10.1145/3610578,
title = {Integrity-based Explanations for Fostering Appropriate Trust in AI Agents},
author = {Mehrotra, Siddharth and Jorge, Carolina Centeio and Jonker, Catholijn M. and Tielman, Myrthe L.},
url = {https://doi.org/10.1145/3610578},
doi = {10.1145/3610578},
issn = {2160-6455},
year = {2024},
date = {2024-01-01},
journal = {ACM Trans. Interact. Intell. Syst.},
volume = {14},
number = {1},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {Appropriate trust is an important component of the interaction between people and AI systems, in that “inappropriate” trust can cause disuse, misuse, or abuse of AI. To foster appropriate trust in AI, we need to understand how AI systems can elicit appropriate levels of trust from their users. Out of the aspects that influence trust, this article focuses on the effect of showing integrity. In particular, this article presents a study of how different integrity-based explanations made by an AI agent affect the appropriateness of trust of a human in that agent. To explore this, (1) we provide a formal definition to measure appropriate trust, (2) present a between-subject user study with 160 participants who collaborated with an AI agent in such a task. In the study, the AI agent assisted its human partner in estimating calories on a food plate by expressing its integrity through explanations focusing on either honesty, transparency, or fairness. Our results show that (a) an agent who displays its integrity by being explicit about potential biases in data or algorithms achieved appropriate trust more often compared to being honest about capability or transparent about the decision-making process, and (b) subjective trust builds up and recovers better with honesty-like integrity explanations. Our results contribute to the design of agent-based AI systems that guide humans to appropriately trust them, a formal method to measure appropriate trust, and how to support humans in calibrating their trust in AI.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mehrotra S; Degachi C; Vereschak O; Jonker C M; Tielman M L
A Systematic Review on Fostering Appropriate Trust in Human-AI Interaction: Trends, Opportunities and Challenges Journal Article
In: ACM Journal on Responsible Computing, 2024.
@article{mehrotra2024systematic,
title = {A Systematic Review on Fostering Appropriate Trust in Human-AI Interaction: Trends, Opportunities and Challenges},
author = {Mehrotra, Siddharth and Degachi, Chadha and Vereschak, Oleksandra and Jonker, Catholijn M and Tielman, Myrthe L},
doi = {10.1145/3696449 publisher=ACM New York, NY},
year = {2024},
date = {2024-01-01},
journal = {ACM Journal on Responsible Computing},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Orzan N; Acar E; Grossi D; Rădulescu R
Emergent Cooperation under Uncertain Incentive Alignment Proceedings Article
In: Proceedings of the 2024 International Conference on Autonomous Agents and Multiagent Systems (AAMAS 2024), 2024.
@inproceedings{orzan2024emergent,
title = {Emergent Cooperation under Uncertain Incentive Alignment},
author = {Orzan, Nicole and Acar, Erman and Grossi, Davide and R{ă}dulescu, Roxana},
year = {2024},
date = {2024-01-01},
booktitle = {Proceedings of the 2024 International Conference on Autonomous Agents and Multiagent Systems (AAMAS 2024)},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Orzan N; Acar E; Grossi D; Mannion P; Rădulescu R
Learning in Multi-Objective Public Goods Games with Non-Linear Utilities Proceedings Article
In: Proceedings of the 27th European Conference on Artificial Intelligence (ECAI 2024), 2024, ((in press)).
@inproceedings{orzan2024mopgg,
title = {Learning in Multi-Objective Public Goods Games with Non-Linear Utilities},
author = {Orzan, Nicole and Acar, Erman and Grossi, Davide and Mannion, Patrick and R{ă}dulescu, Roxana},
year = {2024},
date = {2024-01-01},
booktitle = {Proceedings of the 27th European Conference on Artificial Intelligence (ECAI 2024)},
note = {(in press)},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Han S; Dastani M; Wang S
Learning Reward Structure with Subtasks in Reinforcement Learning Proceedings Article
In: ECAI 2024 - 27th European Conference on Artificial Intelligence, 19-24 October 2024, Santiago de Compostela, Spain - Including 13th Conference on Prestigious Applications of Intelligent Systems (PAIS 2024), pp. 2282–2289, IOS Press, 2024.
@inproceedings{DBLP:conf/ecai/HanD024,
title = {Learning Reward Structure with Subtasks in Reinforcement Learning},
author = {Shuai Han and
Mehdi Dastani and
Shihan Wang},
url = {https://doi.org/10.3233/FAIA240751},
doi = {10.3233/FAIA240751},
year = {2024},
date = {2024-01-01},
booktitle = {ECAI 2024 - 27th European Conference on Artificial Intelligence,
19-24 October 2024, Santiago de Compostela, Spain - Including 13th
Conference on Prestigious Applications of Intelligent Systems (PAIS
2024)},
volume = {392},
pages = {2282–2289},
publisher = {IOS Press},
series = {Frontiers in Artificial Intelligence and Applications},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhao Y; Niu B; Dastani M; Wang S
Bootstrapped Policy Learning for Task-oriented Dialogue through Goal Shaping Proceedings Article
In: Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, EMNLP 2024, Miami, FL, USA, November 12-16, 2024, pp. 4566–4580, Association for Computational Linguistics, 2024.
@inproceedings{DBLP:conf/emnlp/ZhaoND024,
title = {Bootstrapped Policy Learning for Task-oriented Dialogue through Goal
Shaping},
author = {Yangyang Zhao and
Ben Niu and
Mehdi Dastani and
Shihan Wang},
url = {https://aclanthology.org/2024.emnlp-main.263},
year = {2024},
date = {2024-01-01},
booktitle = {Proceedings of the 2024 Conference on Empirical Methods in Natural
Language Processing, EMNLP 2024, Miami, FL, USA, November 12-16,
2024},
pages = {4566–4580},
publisher = {Association for Computational Linguistics},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhao Y; Dastani M; Long J; Wang Z; Wang S
Rescue Conversations from Dead-ends: Efficient Exploration for Task-oriented Dialogue Policy Optimization Journal Article
In: Transactions of the Association for Computational Linguistics, vol. 12, pp. 1578–1596", 2024.
@article{zhao-etal-2024-rescue,
title = {Rescue Conversations from Dead-ends: Efficient Exploration for Task-oriented Dialogue Policy Optimization},
author = {Zhao, Yangyang and
Dastani, Mehdi and
Long, Jinchuan and
Wang, Zhenyu and
Wang, Shihan},
url = {https://aclanthology.org/2024.tacl-1.86/},
doi = {10.1162/tacl_a_00717},
year = {2024},
date = {2024-01-01},
journal = {Transactions of the Association for Computational Linguistics},
volume = {12},
pages = {1578–1596"},
publisher = {MIT Press},
address = {Cambridge, MA},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chang W; Wang S; Kramer S; Oey M; Allouch S B
Human-Centered AI for Dementia Care: Using Reinforcement Learning for Personalized Interventions Support in Eating and Drinking Scenarios Proceedings Article
In: HHAI 2024: Hybrid Human AI Systems for the Social Good - Proceedings of the Third International Conference on Hybrid Human-Artificial Intelligence, Malmö, Sweden, 10-14 June 2024, pp. 84–93, IOS Press, 2024.
@inproceedings{DBLP:conf/hhai/Chang0KOA24,
title = {Human-Centered AI for Dementia Care: Using Reinforcement Learning
for Personalized Interventions Support in Eating and Drinking Scenarios},
author = {Wen{-}Tseng Chang and
Shihan Wang and
Stephanie Kramer and
Michel Oey and
Somaya Ben Allouch},
url = {https://doi.org/10.3233/FAIA240185},
doi = {10.3233/FAIA240185},
year = {2024},
date = {2024-01-01},
booktitle = {HHAI 2024: Hybrid Human AI Systems for the Social Good - Proceedings
of the Third International Conference on Hybrid Human-Artificial Intelligence,
Malmö, Sweden, 10-14 June 2024},
volume = {386},
pages = {84–93},
publisher = {IOS Press},
series = {Frontiers in Artificial Intelligence and Applications},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Loftin* R; Çelikok* M M; Van Hoof H; Kaski S; Oliehoek F A
Uncoupled Learning of Differential Stackelberg Equilibria with Commitments Proceedings Article
In: AAMAS '24: Proceedings of the 23rd International Conference on Autonomous Agents and Multiagent Systems, 2024, (The * denotes equal contribution).
@inproceedings{loftin2024uncoupled,
title = {Uncoupled Learning of Differential Stackelberg Equilibria with Commitments},
author = {Loftin*, Robert and {Ç}elikok*, Mustafa Mert and Van Hoof, Herke and Kaski, Samuel and Oliehoek, Frans A},
year = {2024},
date = {2024-01-01},
booktitle = {AAMAS '24: Proceedings of the 23rd International Conference on Autonomous Agents and Multiagent Systems},
note = {The * denotes equal contribution},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ho L; Schlobach S
A General Dialogue Framework for Logic-based Argumentation Proceedings Article
In: Kampik, Timotheus; Cyras, Kristijonas; Rago, Antonio; Cocarascu, Oana (Ed.): Proceedings of the 2nd International Workshop on Argumentation for eXplainable AI co-located with the 10th International Conference on Computational Models of Argument (COMMA 2024), Hagen, Germany, September 16, 2024, pp. 41–55, CEUR-WS.org, 2024.
@inproceedings{DBLP:conf/comma/HoS24,
title = {A General Dialogue Framework for Logic-based Argumentation},
author = {Loan Ho and
Stefan Schlobach},
editor = {Timotheus Kampik and
Kristijonas Cyras and
Antonio Rago and
Oana Cocarascu},
url = {https://ceur-ws.org/Vol-3768/paper7.pdf},
year = {2024},
date = {2024-01-01},
booktitle = {Proceedings of the 2nd International Workshop on Argumentation for
eXplainable AI co-located with the 10th International Conference
on Computational Models of Argument (COMMA 2024), Hagen, Germany,
September 16, 2024},
volume = {3768},
pages = {41–55},
publisher = {CEUR-WS.org},
series = {CEUR Workshop Proceedings},
abstract = {There is an extensive body of work in logic-based argumentation, which links logic and argumentation, which is a potential solution to address inconsistencies or conflicting information in knowledge bases (KBs) by offering dialogue games as proof procedures to determine and explain the acceptance of propositions. Most existing work, though, focuses on specific logics (such as description logics, existential rules, defeasible and propositional logics), has limitations of representational aspects, for selected semantics and binary conflicts. In this paper, we generalise this work by introducing G-SAF, which generalises the notions of arguments, dialogues and dialogue trees for more general logical reasoning with inconsistencies, including the most common semantics and to facilitate reasoning with non-binary conflicts using argumentation with collective attacks (SAFs).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Malnatsky E; Wang S; Hindriks K V; Ligthart M E U
Shaping Relatable Robots: A Child-Centered Approach to Social Personalization Proceedings Article
In: Grollman, Dan; Broadbent, Elizabeth; Ju, Wendy; Soh, Harold; Williams, Tom (Ed.): Companion of the 2024 ACM/IEEE International Conference on Human-Robot Interaction, HRI 2024, Boulder, CO, USA, March 11-15, 2024, pp. 127–129, ACM, 2024.
@inproceedings{DBLP:conf/hri/MalnatskyWHL24,
title = {Shaping Relatable Robots: A Child-Centered Approach to Social Personalization},
author = {Elena Malnatsky and
Shenghui Wang and
Koen V. Hindriks and
Mike E. U. Ligthart},
editor = {Dan Grollman and
Elizabeth Broadbent and
Wendy Ju and
Harold Soh and
Tom Williams},
url = {https://doi.org/10.1145/3610978.3638374},
doi = {10.1145/3610978.3638374},
year = {2024},
date = {2024-01-01},
booktitle = {Companion of the 2024 ACM/IEEE International Conference on Human-Robot
Interaction, HRI 2024, Boulder, CO, USA, March 11-15, 2024},
pages = {127–129},
publisher = {ACM},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Dudzik B; van der Waa J; Chen P; Dobbe R; de Troya I M D R; Bakker R M; de Boer M H T; Smit Q T S; Dell'Anna D; Erdogan E; Yolum P; Wang S; Santamar'ıa S B; Krause L; Kamphorst B A
Viewpoint: Hybrid Intelligence Supports Application Development for Diabetes Lifestyle Management Journal Article
In: J. Artif. Intell. Res., vol. 80, pp. 919–929, 2024.
@article{DBLP:journals/jair/DudzikWCDTBBSDEYWSKK24,
title = {Viewpoint: Hybrid Intelligence Supports Application Development for
Diabetes Lifestyle Management},
author = {Bernd Dudzik and
Jasper van der Waa and
Pei{-}Yu Chen and
Roel Dobbe and
Inago M. D. R. de Troya and
Roos M. Bakker and
Maaike H. T. de Boer and
Quirine T. S. Smit and
Davide Dell'Anna and
Emre Erdogan and
Pinar Yolum and
Shihan Wang and
Selene Baez Santamar{'{ı}}a and
Lea Krause and
Bart A. Kamphorst},
url = {https://doi.org/10.1613/jair.1.15916},
doi = {10.1613/JAIR.1.15916},
year = {2024},
date = {2024-01-01},
journal = {J. Artif. Intell. Res.},
volume = {80},
pages = {919–929},
abstract = {Type II diabetes is a complex health condition requiring patients to closely and continuously collaborate with healthcare professionals and other caretakers on lifestyle changes. While intelligent products have tremendous potential to support such Diabetes Lifestyle Management (DLM), existing products are typically conceived from a technology-centered perspective that insufficiently acknowledges the degree to which collaboration and inclusion of stakeholders is required. In this article, we argue that the emergent design philosophy of Hybrid Intelligence (HI) forms a suitable alternative lens for research and development. In particular, we (1) highlight a series of pragmatic challenges for effective AI-based DLM support based on results from an expert focus group, and (2) argue for HI’s potential to address these by outlining relevant research trajectories.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Erdogan E; Verbrugge R; Yolum P
Computational Theory of Mind with Abstractions for Effective Human-Agent Collaboration Proceedings Article
In: Dastani, Mehdi; Sichman, Jaime Simão; Alechina, Natasha; Dignum, Virginia (Ed.): Proceedings of the 23rd International Conference on Autonomous Agents and Multiagent Systems, AAMAS 2024, Auckland, New Zealand, May 6-10, 2024, pp. 2249–2251, International Foundation for Autonomous Agents and Multiagent Systems / ACM, 2024.
@inproceedings{DBLP:conf/atal/ErdoganVY24,
title = {Computational Theory of Mind with Abstractions for Effective Human-Agent
Collaboration},
author = {Emre Erdogan and
Rineke Verbrugge and
Pinar Yolum},
editor = {Mehdi Dastani and
Jaime Sim{ã}o Sichman and
Natasha Alechina and
Virginia Dignum},
url = {https://dl.acm.org/doi/10.5555/3635637.3663123},
doi = {10.5555/3635637.3663123},
year = {2024},
date = {2024-01-01},
booktitle = {Proceedings of the 23rd International Conference on Autonomous Agents
and Multiagent Systems, AAMAS 2024, Auckland, New Zealand, May 6-10,
2024},
pages = {2249–2251},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems
/ ACM},
abstract = {Empowering artificially intelligent agents with capabilities that humans use regularly is crucial to enable effective human-agent collaboration. One of these crucial capabilities is the modeling of Theory of Mind (ToM) reasoning: the human ability to reason about the mental content of others such as their beliefs, desires, and goals. However, it is generally impractical to track all individual mental attitudes of all other individuals and for many practical situations not even necessary. Hence, what is important is to capture enough information to create an approximate model that is effective and flexible. Accordingly, this paper proposes a computational ToM mechanism based on abstracting beliefs and knowledge into higher-level human concepts, called abstractions, similar to the ones that guide humans to effectively interact with each other (e.g., trust). We develop an agent architecture based on epistemic logic to formalize the computational dynamics of ToM reasoning. We identify important challenges regarding effective maintenance of abstractions and accurate use of ToM reasoning and demonstrate how our approach addresses these challenges over multiagent simulations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Erdogan E; Dignum F; Verbrugge R
Effective Maintenance of Computational Theory of Mind for Human-AI Collaboration Proceedings Article
In: Lorig, Fabian; Tucker, Jason; Lindström, Adam Dahlgren; Dignum, Frank; Murukannaiah, Pradeep K.; Theodorou, Andreas; Yolum, Pinar (Ed.): HHAI 2024: Hybrid Human AI Systems for the Social Good - Proceedings of the Third International Conference on Hybrid Human-Artificial Intelligence, Malmö, Sweden, 10-14 June 2024, pp. 114–123, IOS Press, 2024.
@inproceedings{DBLP:conf/hhai/ErdoganDV24,
title = {Effective Maintenance of Computational Theory of Mind for Human-AI
Collaboration},
author = {Emre Erdogan and
Frank Dignum and
Rineke Verbrugge},
editor = {Fabian Lorig and
Jason Tucker and
Adam Dahlgren Lindström and
Frank Dignum and
Pradeep K. Murukannaiah and
Andreas Theodorou and
Pinar Yolum},
url = {https://doi.org/10.3233/FAIA240188},
doi = {10.3233/FAIA240188},
year = {2024},
date = {2024-01-01},
booktitle = {HHAI 2024: Hybrid Human AI Systems for the Social Good - Proceedings
of the Third International Conference on Hybrid Human-Artificial Intelligence,
Malmö, Sweden, 10-14 June 2024},
volume = {386},
pages = {114–123},
publisher = {IOS Press},
series = {Frontiers in Artificial Intelligence and Applications},
abstract = {In order to enhance collaboration between humans and artificially intelligent agents, it is crucial to equip the computational agents with capabilities commonly used by humans. One of these capabilities is called Theory of Mind (ToM) reasoning, which is the human ability to reason about the mental contents of others, such as their beliefs, desires, and goals. For an agent to efficiently benefit from having a functioning computational ToM of its human partner in a collaboration, it needs to be practical in computationally tracking their mental attitudes and it needs to create approximate ToM models that can be effectively maintained. In this paper, we propose a computational ToM mechanism based on abstracting beliefs and knowledge into higher-level human concepts, referred to as abstractions. These abstractions, similar to those guiding human interactions (e.g., trust), form the basis of our modular agent architecture. We address an important challenge related to maintaining abstractions effectively, namely abstraction consistency. We propose different approaches to study this challenge in the context of a scenario inspired by a medical domain and provide an experimental evaluation over agent simulations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Erdogan E; Dignum F; Verbrugge R; Yolum P
Resolving Multi-user Privacy Conflicts with Computational Theory of Mind Proceedings Article
In: Workshop on Citizen-Centric Multiagent Systems, pp. 22, 2024.
@inproceedings{erdogan2024resolving,
title = {Resolving Multi-user Privacy Conflicts with Computational Theory of Mind},
author = {Erdogan, Emre and Dignum, Frank and Verbrugge, Rineke and Yolum, P{ı}nar},
url = {https://eprints.soton.ac.uk/489958/1/Proceedings_of_the_Second_International_Workshop_on_Citizen-Centric_Multiagent_Systems_2024_C-MAS_2024_.pdf},
year = {2024},
date = {2024-01-01},
booktitle = {Workshop on Citizen-Centric Multiagent Systems},
pages = {22},
abstract = {Online Social Networks (OSNs) serve as digital platforms for users to share information and build relationships. These networks facilitate the sharing of diverse content, which may disclose personal information about users. Some of these contents pertain to multiple users (such as group pictures), with different privacy expectations. Sharing of such content may lead to multi-user privacy conflicts. Decision-making mechanisms are crucial to managing conflicting privacy preferences among users, reducing their effort in conflict resolution. Various mechanisms are proposed in the literature, most of which demand significant computational resources. We propose a novel approach based on computational modeling of Theory of Mind (ToM), the human ability to understand others’ mental states (e.g., their beliefs, preferences, goals, etc.), to portray users’ privacy expectations. We argue that leveraging computational ToM modeling allows the design of agents capable of accurately capturing users’ behavior and reasoning about other agents’ privacy understanding, making them effective tools in multi-user privacy conflict management. To illustrate our ideas, we consider a content-sharing scenario and point out potential benefits of using our agent-based computational ToM approach in resolution of privacy conflicts.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Dobbe R; Wolters A
Toward Sociotechnical AI: Mapping Vulnerabilities for Machine Learning in Context Journal Article
In: Minds Mach., vol. 34, no. 2, pp. 12, 2024.
@article{DBLP:journals/mima/DobbeW24,
title = {Toward Sociotechnical AI: Mapping Vulnerabilities for Machine Learning
in Context},
author = {Roel Dobbe and
Anouk Wolters},
url = {https://doi.org/10.1007/s11023-024-09668-y},
doi = {10.1007/S11023-024-09668-Y},
year = {2024},
date = {2024-01-01},
journal = {Minds Mach.},
volume = {34},
number = {2},
pages = {12},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Amin S; Renes S; Dobbe R
Designing organizational control mechanisms for consequential AI systems: towards a situated methodology (poster) Proceedings Article
In: Ubacht, Jolien; Crompvoets, Joep; Csáki, Csaba; Danneels, Lieselot; Janssen, Marijn; Johannessen, Marius Rohde; Lampoltshammer, Thomas J.; Lee, Habin; Lindgren, Ida; Hofmann, Sara; Parycek, Peter; Pereira, Gabriela Viale; Schwabe, Gerhard; Susha, Iryna; Tambouris, Efthimios; Zuiderwijk, Anneke (Ed.): Proceedings of Ongoing Research, Practitioners, Posters, Workshops, and Projects of the International Conference EGOV-CeDEM-ePart 2024, Ghent University and KU Leuven, Ghent/Leuven, Belgium, September 1-5, 2024, CEUR-WS.org, 2024.
@inproceedings{DBLP:conf/egov/AminRD24,
title = {Designing organizational control mechanisms for consequential AI
systems: towards a situated methodology (poster)},
author = {Shan Amin and
Sander Renes and
Roel Dobbe},
editor = {Jolien Ubacht and
Joep Crompvoets and
Csaba Csáki and
Lieselot Danneels and
Marijn Janssen and
Marius Rohde Johannessen and
Thomas J. Lampoltshammer and
Habin Lee and
Ida Lindgren and
Sara Hofmann and
Peter Parycek and
Gabriela Viale Pereira and
Gerhard Schwabe and
Iryna Susha and
Efthimios Tambouris and
Anneke Zuiderwijk},
url = {https://ceur-ws.org/Vol-3737/paper52.pdf},
year = {2024},
date = {2024-01-01},
booktitle = {Proceedings of Ongoing Research, Practitioners, Posters, Workshops,
and Projects of the International Conference EGOV-CeDEM-ePart 2024,
Ghent University and KU Leuven, Ghent/Leuven, Belgium, September
1-5, 2024},
volume = {3737},
publisher = {CEUR-WS.org},
series = {CEUR Workshop Proceedings},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lindström A D; Methnani L; Krause L; Ericson P; de Rituerto de Troya Í M; Mollo D C; Dobbe R
AI Alignment through Reinforcement Learning from Human Feedback? Contradictions and Limitations Journal Article
In: CoRR, vol. abs/2406.18346, 2024.
@article{DBLP:journals/corr/abs-2406-18346,
title = {AI Alignment through Reinforcement Learning from Human Feedback?
Contradictions and Limitations},
author = {Adam Dahlgren Lindström and
Leila Methnani and
Lea Krause and
Petter Ericson and
{Í}{ñ}igo Martinez de Rituerto de Troya and
Dimitri Coelho Mollo and
Roel Dobbe},
url = {https://doi.org/10.48550/arXiv.2406.18346},
doi = {10.48550/ARXIV.2406.18346},
year = {2024},
date = {2024-01-01},
journal = {CoRR},
volume = {abs/2406.18346},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Delfos J; Eijk A Z; van Cranenburgh S; Chorus C G; Dobbe R I J
Integral system safety for machine learning in the public sector: An empirical account Journal Article
In: Gov. Inf. Q., vol. 41, no. 3, pp. 101963, 2024.
@article{DBLP:journals/giq/DelfosECCD24,
title = {Integral system safety for machine learning in the public sector:
An empirical account},
author = {Jeroen Delfos and
Anneke Zuiderwijk{-}van Eijk and
Sander van Cranenburgh and
Caspar G. Chorus and
Roel I. J. Dobbe},
url = {https://doi.org/10.1016/j.giq.2024.101963},
doi = {10.1016/J.GIQ.2024.101963},
year = {2024},
date = {2024-01-01},
journal = {Gov. Inf. Q.},
volume = {41},
number = {3},
pages = {101963},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
van der Meer M
Facilitating Opinion Diversity through Hybrid NLP Approaches Proceedings Article
In: Cao, Yang (Trista); Papadimitriou, Isabel; Ovalle, Anaelia; Zampieri, Marcos; Ferraro, Francis; Swayamdipta, Swabha (Ed.): Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies: Student Research Workshop, NAACL 2024, Mexico City, Mexico, June 18, 2024, pp. 272–284, Association for Computational Linguistics, 2024.
@inproceedings{DBLP:conf/naacl/Meer24,
title = {Facilitating Opinion Diversity through Hybrid NLP Approaches},
author = {Michiel van der Meer},
editor = {Yang (Trista) Cao and
Isabel Papadimitriou and
Anaelia Ovalle and
Marcos Zampieri and
Francis Ferraro and
Swabha Swayamdipta},
url = {https://doi.org/10.18653/v1/2024.naacl-srw.29},
doi = {10.18653/V1/2024.NAACL-SRW.29},
year = {2024},
date = {2024-01-01},
booktitle = {Proceedings of the 2024 Conference of the North American Chapter of
the Association for Computational Linguistics: Human Language Technologies:
Student Research Workshop, NAACL 2024, Mexico City, Mexico, June
18, 2024},
pages = {272–284},
publisher = {Association for Computational Linguistics},
abstract = {Modern democracies face a critical issue of declining citizen participation in decision-making. Online discussion forums are an important avenue for enhancing citizen participation. This thesis proposal 1) identifies the challenges involved in facilitating large-scale online discussions with Natural Language Processing (NLP), 2) suggests solutions to these challenges by incorporating hybrid human-AI technologies, and 3) investigates what these technologies can reveal about individual perspectives in online discussions. We propose a three-layered hierarchy for representing perspectives that can be obtained by a mixture of human intelligence and large language models. We illustrate how these representations can draw insights into the diversity of perspectives and allow us to investigate interactions in online discussions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
van der Meer M; Vossen P; Jonker C M; Murukannaiah P K
Value-Sensitive Disagreement Analysis for Online Deliberation Proceedings Article
In: Lorig, Fabian; Tucker, Jason; Lindström, Adam Dahlgren; Dignum, Frank; Murukannaiah, Pradeep K.; Theodorou, Andreas; Yolum, Pinar (Ed.): HHAI 2024: Hybrid Human AI Systems for the Social Good - Proceedings of the Third International Conference on Hybrid Human-Artificial Intelligence, Malmö, Sweden, 10-14 June 2024, pp. 481–484, IOS Press, 2024.
@inproceedings{DBLP:conf/hhai/MeerVJM24,
title = {Value-Sensitive Disagreement Analysis for Online Deliberation},
author = {Michiel van der Meer and
Piek Vossen and
Catholijn M. Jonker and
Pradeep K. Murukannaiah},
editor = {Fabian Lorig and
Jason Tucker and
Adam Dahlgren Lindström and
Frank Dignum and
Pradeep K. Murukannaiah and
Andreas Theodorou and
Pinar Yolum},
url = {https://doi.org/10.3233/FAIA240231},
doi = {10.3233/FAIA240231},
year = {2024},
date = {2024-01-01},
booktitle = {HHAI 2024: Hybrid Human AI Systems for the Social Good - Proceedings
of the Third International Conference on Hybrid Human-Artificial Intelligence,
Malmö, Sweden, 10-14 June 2024},
volume = {386},
pages = {481–484},
publisher = {IOS Press},
series = {Frontiers in Artificial Intelligence and Applications},
abstract = {Disagreements are common in online societal deliberation and may be crucial for effective collaboration, for instance in helping users understand opposing viewpoints. Although there exist automated methods for recognizing disagreement, a deeper understanding of factors that influence disagreement is currently missing. We investigate a hypothesis that differences in personal values influence disagreement in online discussions. Using Large Language Models (LLMs) for estimating both profiles of personal values and disagreement, we conduct a large-scale experiment involving 11.4M user comments. We find that the dissimilarity of value profiles correlates with disagreement only in specific cases, but that incorporating self-reported value profiles changes these results to be more undecided.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
van der Meer M; Vossen P; Jonker C M; Murukannaiah P K
An Empirical Analysis of Diversity in Argument Summarization Proceedings Article
In: Graham, Yvette; Purver, Matthew (Ed.): Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics, EACL 2024 - Volume 1: Long Papers, St. Julian's, Malta, March 17-22, 2024, pp. 2028–2045, Association for Computational Linguistics, 2024.
@inproceedings{DBLP:conf/eacl/MeerVJM24,
title = {An Empirical Analysis of Diversity in Argument Summarization},
author = {Michiel van der Meer and
Piek Vossen and
Catholijn M. Jonker and
Pradeep K. Murukannaiah},
editor = {Yvette Graham and
Matthew Purver},
url = {https://aclanthology.org/2024.eacl-long.123},
year = {2024},
date = {2024-01-01},
booktitle = {Proceedings of the 18th Conference of the European Chapter of the
Association for Computational Linguistics, EACL 2024 - Volume 1:
Long Papers, St. Julian's, Malta, March 17-22, 2024},
pages = {2028–2045},
publisher = {Association for Computational Linguistics},
abstract = {Presenting high-level arguments is a crucial task for fostering participation in online societal discussions. Current argument summarization approaches miss an important facet of this task—capturing textitdiversity—which is important for accommodating multiple perspectives. We introduce three aspects of diversity: those of opinions, annotators, and sources. We evaluate approaches to a popular argument summarization task called Key Point Analysis, which shows how these approaches struggle to (1) represent arguments shared by few people, (2) deal with data from various sources, and (3) align with subjectivity in human-provided annotations. We find that both general-purpose LLMs and dedicated KPA models exhibit this behavior, but have complementary strengths. Further, we observe that diversification of training data may ameliorate generalization in zero-shot cases. Addressing diversity in argument summarization requires a mix of strategies to deal with subjectivity.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
van der Meer M; Falk N; Murukannaiah P K; Liscio E
Annotator-Centric Active Learning for Subjective NLP Tasks Proceedings Article
In: Al-Onaizan, Yaser; Bansal, Mohit; Chen, Yun-Nung (Ed.): Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, EMNLP 2024, Miami, FL, USA, November 12-16, 2024, pp. 18537–18555, Association for Computational Linguistics, 2024.
@inproceedings{DBLP:conf/emnlp/MeerFML24,
title = {Annotator-Centric Active Learning for Subjective NLP Tasks},
author = {Michiel van der Meer and
Neele Falk and
Pradeep K. Murukannaiah and
Enrico Liscio},
editor = {Yaser Al{-}Onaizan and
Mohit Bansal and
Yun{-}Nung Chen},
url = {https://aclanthology.org/2024.emnlp-main.1031},
year = {2024},
date = {2024-01-01},
booktitle = {Proceedings of the 2024 Conference on Empirical Methods in Natural
Language Processing, EMNLP 2024, Miami, FL, USA, November 12-16,
2024},
pages = {18537–18555},
publisher = {Association for Computational Linguistics},
abstract = {Active Learning (AL) addresses the high costs of collecting human annotations by strategically annotating the most informative samples. However, for subjective NLP tasks, incorporating a wide range of perspectives in the annotation process is crucial to capture the variability in human judgments. We introduce Annotator-Centric Active Learning (ACAL), which incorporates an annotator selection strategy following data sampling. Our objective is two-fold: (1) to efficiently approximate the full diversity of human judgments, and (2) to assess model performance using annotator-centric metrics, which value minority and majority perspectives equally. We experiment with multiple annotator selection strategies across seven subjective NLP tasks, employing both traditional and novel, human-centered evaluation metrics. Our findings indicate that ACAL improves data efficiency and excels in annotator-centric performance evaluations. However, its success depends on the availability of a sufficiently large and diverse pool of annotators to sample from.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
van der Meer M; Liscio E; Jonker C M; Plaat A; Vossen P; Murukannaiah P K
A Hybrid Intelligence Method for Argument Mining Journal Article
In: J. Artif. Intell. Res., vol. 80, pp. 1187–1222, 2024.
@article{DBLP:journals/jair/MeerLJPVM24,
title = {A Hybrid Intelligence Method for Argument Mining},
author = {Michiel van der Meer and
Enrico Liscio and
Catholijn M. Jonker and
Aske Plaat and
Piek Vossen and
Pradeep K. Murukannaiah},
url = {https://doi.org/10.1613/jair.1.15135},
doi = {10.1613/JAIR.1.15135},
year = {2024},
date = {2024-01-01},
journal = {J. Artif. Intell. Res.},
volume = {80},
pages = {1187–1222},
abstract = {Large-scale survey tools enable the collection of citizen feedback in opinion corpora. Extracting the key arguments from a large and noisy set of opinions helps in understanding the opinions quickly and accurately. Fully automated methods can extract arguments but (1) require large labeled datasets that induce large annotation costs and (2) work well for known viewpoints, but not for novel points of view. We propose HyEnA, a hybrid (human + AI) method for extracting arguments from opinionated texts, combining the speed of automated processing with the understanding and reasoning capabilities of humans. We evaluate HyEnA on three citizen feedback corpora. We find that, on the one hand, HyEnA achieves higher coverage and precision than a state-of-the-art automated method when compared to a common set of diverse opinions, justifying the need for human insight. On the other hand, HyEnA requires less human effort and does not compromise quality compared to (fully manual) expert analysis, demonstrating the benefit of combining human and artificial intelligence.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
van Dijk B; van Duijn M; Verberne S; Spruit M
ChiSCor: A Corpus of Freely-Told Fantasy Stories by Dutch Children for Computational Linguistics and Cognitive Science Proceedings Article
In: Jiang, Jing; Reitter, David; Deng, Shumin (Ed.): Proceedings of the 27th Conference on Computational Natural Language Learning (CoNLL), pp. 352–363, Association for Computational Linguistics, Singapore, 2023.
@inproceedings{van-dijk-etal-2023-chiscor,
title = {ChiSCor: A Corpus of Freely-Told Fantasy Stories by Dutch Children for Computational Linguistics and Cognitive Science},
author = {van Dijk, Bram and
van Duijn, Max and
Verberne, Suzan and
Spruit, Marco},
editor = {Jiang, Jing and
Reitter, David and
Deng, Shumin},
url = {https://aclanthology.org/2023.conll-1.23},
doi = {10.18653/v1/2023.conll-1.23},
year = {2023},
date = {2023-12-01},
booktitle = {Proceedings of the 27th Conference on Computational Natural Language Learning (CoNLL)},
pages = {352–363},
publisher = {Association for Computational Linguistics},
address = {Singapore},
abstract = {In this resource paper we release ChiSCor, a new corpus containing 619 fantasy stories, told freely by 442 Dutch children aged 4-12. ChiSCor was compiled for studying how children render character perspectives, and unravelling language and cognition in development, with computational tools. Unlike existing resources, ChiSCor's stories were produced in natural contexts, in line with recent calls for more ecologically valid datasets. ChiSCor hosts text, audio, and annotations for character complexity and linguistic complexity. Additional metadata (e.g. education of caregivers) is available for one third of the Dutch children. ChiSCor also includes a small set of 62 English stories. This paper details how ChiSCor was compiled and shows its potential for future work with three brief case studies: i) we show that the syntactic complexity of stories is strikingly stable across children's ages; ii) we extend work on Zipfian distributions in free speech and show that ChiSCor obeys Zipf's law closely, reflecting its social context; iii) we show that even though ChiSCor is relatively small, the corpus is rich enough to train informative lemma vectors that allow us to analyse children's language use. We end with a reflection on the value of narrative datasets in computational linguistics.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
van Duijn M; van Dijk B; Kouwenhoven T; de Valk W; Spruit M; vanderPutten P
Theory of Mind in Large Language Models: Examining Performance of 11 State-of-the-Art models vs. Children Aged 7-10 on Advanced Tests Proceedings Article
In: Jiang, Jing; Reitter, David; Deng, Shumin (Ed.): Proceedings of the 27th Conference on Computational Natural Language Learning (CoNLL), pp. 389–402, Association for Computational Linguistics, Singapore, 2023.
@inproceedings{van-duijn-etal-2023-theory,
title = {Theory of Mind in Large Language Models: Examining Performance of 11 State-of-the-Art models vs. Children Aged 7-10 on Advanced Tests},
author = {van Duijn, Max and
van Dijk, Bram and
Kouwenhoven, Tom and
de Valk, Werner and
Spruit, Marco and
vanderPutten, Peter},
editor = {Jiang, Jing and
Reitter, David and
Deng, Shumin},
url = {https://aclanthology.org/2023.conll-1.25},
doi = {10.18653/v1/2023.conll-1.25},
year = {2023},
date = {2023-12-01},
booktitle = {Proceedings of the 27th Conference on Computational Natural Language Learning (CoNLL)},
pages = {389–402},
publisher = {Association for Computational Linguistics},
address = {Singapore},
abstract = {To what degree should we ascribe cognitive capacities to Large Language Models (LLMs), such as the ability to reason about intentions and beliefs known as Theory of Mind (ToM)? Here we add to this emerging debate by (i) testing 11 base- and instruction-tuned LLMs on capabilities relevant to ToM beyond the dominant false-belief paradigm, including non-literal language usage and recursive intentionality; (ii) using newly rewritten versions of standardized tests to gauge LLMs' robustness; (iii) prompting and scoring for open besides closed questions; and (iv) benchmarking LLM performance against that of children aged 7-10 on the same tasks. We find that instruction-tuned LLMs from the GPT family outperform other models, and often also children. Base-LLMs are mostly unable to solve ToM tasks, even with specialized prompting. We suggest that the interlinked evolution and development of language and ToM may help explain what instruction-tuning adds: rewarding cooperative communication that takes into account interlocutor and context. We conclude by arguing for a nuanced perspective on ToM in LLMs.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
van Dijk B; Kouwenhoven T; Spruit M; van Duijn M J
Large Language Models: The Need for Nuance in Current Debates and a Pragmatic Perspective on Understanding Proceedings Article
In: Bouamor, Houda; Pino, Juan; Bali, Kalika (Ed.): Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 12641–12654, Association for Computational Linguistics, Singapore, 2023.
@inproceedings{van-dijk-etal-2023-large,
title = {Large Language Models: The Need for Nuance in Current Debates and a Pragmatic Perspective on Understanding},
author = {van Dijk, Bram and
Kouwenhoven, Tom and
Spruit, Marco and
van Duijn, Max Johannes},
editor = {Bouamor, Houda and
Pino, Juan and
Bali, Kalika},
url = {https://aclanthology.org/2023.emnlp-main.779},
doi = {10.18653/v1/2023.emnlp-main.779},
year = {2023},
date = {2023-12-01},
booktitle = {Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing},
pages = {12641–12654},
publisher = {Association for Computational Linguistics},
address = {Singapore},
abstract = {Current Large Language Models (LLMs) are unparalleled in their ability to generate grammatically correct, fluent text. LLMs are appearing rapidly, and debates on LLM capacities have taken off, but reflection is lagging behind. Thus, in this position paper, we first zoom in on the debate and critically assess three points recurring in critiques of LLM capacities: i) that LLMs only parrot statistical patterns in the training data; ii) that LLMs master formal but not functional language competence; and iii) that language learning in LLMs cannot inform human language learning. Drawing on empirical and theoretical arguments, we show that these points need more nuance. Second, we outline a pragmatic perspective on the issue of `real' understanding and intentionality in LLMs. Understanding and intentionality pertain to unobservable mental states we attribute to other humans because they have pragmatic value: they allow us to abstract away from complex underlying mechanics and predict behaviour effectively. We reflect on the circumstances under which it would make sense for humans to similarly attribute mental states to LLMs, thereby outlining a pragmatic philosophical context for LLMs as an increasingly prominent technology in society.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Heuss M; Cohen D; Mansoury M; de Rijke M; Eickhoff C
Predictive Uncertainty-based Bias Mitigation in Ranking Proceedings Article
In: CIKM 2023: 32nd ACM International Conference on Information and Knowledge Management, pp. 762–772, ACM, 2023.
@inproceedings{heuss-2023-predictive,
title = {Predictive Uncertainty-based Bias Mitigation in Ranking},
author = {Heuss, Maria and Cohen, Daniel and Mansoury, Masoud and de Rijke, Maarten and Eickhoff, Carsten},
url = {https://arxiv.org/abs/2309.09833},
year = {2023},
date = {2023-10-01},
booktitle = {CIKM 2023: 32nd ACM International Conference on Information and Knowledge Management},
pages = {762–772},
publisher = {ACM},
abstract = {Societal biases that are contained in retrieved documents have received increased interest. Such biases, which are often prevalent in the training data and learned by the model, can cause societal harms, by misrepresenting certain groups, and by enforcing stereotypes. Mitigating such biases demands algorithms that balance the trade-off between maximized utility for the user with fairness objectives, which incentivize unbiased rankings. Prior work on bias mitigation often assumes that ranking scores, which correspond to the utility that a document holds for a user, can be accurately determined. In reality, there is always a degree of uncertainty in the estimate of expected document utility. This uncertainty can be approximated by viewing ranking models through a Bayesian perspective, where the standard deterministic score becomes a distribution. In this work, we investigate whether uncertainty estimates can be used to decrease the amount of bias in the ranked results, while minimizing loss in measured utility. We introduce a simple method that uses the uncertainty of the ranking scores for an uncertainty-aware, post hoc approach to bias mitigation. We compare our proposed method with existing baselines for bias mitigation with respect to the utility-fairness trade-off, the controllability of methods, and computational costs. We show that an uncertainty-based approach can provide an intuitive and flexible trade-off that outperforms all baselines without additional training requirements, allowing for the post hoc use of this approach on top of arbitrary retrieval models.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
van Leeuwen L; Verheij B; Verbrugge R; Renooij S
Using Agent-Based Simulations to Evaluate Bayesian Networks for Criminal Scenarios. Proceedings Article
In: The Nineteenth International Conference on Artificial Intelligence and Law (ICAIL 2023). Proceedings of the Conference, ACM, New York, NY, USA, Braga, Portugal, 2023.
@inproceedings{vanLeeuwen2023,
title = {Using Agent-Based Simulations to Evaluate Bayesian Networks for Criminal Scenarios. },
author = {Ludi van Leeuwen and Bart Verheij and Rineke Verbrugge and Silja Renooij},
doi = {https://doi.org/10.1145/3594536.3595125},
year = {2023},
date = {2023-06-01},
booktitle = {The Nineteenth International Conference on Artificial Intelligence and Law (ICAIL 2023). Proceedings of the Conference},
publisher = {ACM, New York, NY, USA},
address = {Braga, Portugal},
abstract = {Scenario-based Bayesian networks (BNs) have been proposed as a tool for the rational handling of evidence. The proper evaluation of existing methods requires access to a ground truth that can be used to test the quality and usefulness of a BN model of a crime. However, that would require a full probability distribution over all relevant variables used in the model, which is in practice not available. In this paper, we use an agent-based simulation as a proxy for the ground truth for the evaluation of BN models as tools for the rational handling of evidence. We use fictional crime scenarios as a background. First, we design manually constructed BNs using existing design methods in order to model example crime scenarios. Second, we build an agent-based simulation covering the scenarios of criminal and non-criminal behavior. Third, we algorithmically determine BNs using statistics collected experimentally from the agent-based simulation that represents the ground truth. Finally, we compare the manual, scenario-based BNs to the algorithmic BNs by comparing the posterior probability distribution over outcomes of the network to the ground-truth frequency distribution over those outcomes in the simulation, across all evidence valuations. We find that both manual BNs and algorithmic BNs perform similarly well: they are good reflections of the ground truth in most of the evidence valuations. Using ABMs as a ground truth can be a tool to investigate Bayesian Networks and their design methods, especially under circumstances that are implausible in real-life criminal cases, such as full probabilistic information.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mehrotra S; Jorge C C; Jonker C M; Tielman M L
Building Appropriate Trust in AI: The Significance of Integrity-Centered Explanations Proceedings Article
In: HHAI 2023: Augmenting Human Intellect, pp. 436 - 439, IOS Press Ebooks, 2023.
@inproceedings{siddharth2023a,
title = {Building Appropriate Trust in AI: The Significance of Integrity-Centered Explanations},
author = {Mehrotra, Siddharth and Jorge, Carolina Centeio and Jonker, Catholijn M. and Tielman, Myrthe L.},
doi = {10.3233/FAIA230121},
year = {2023},
date = {2023-06-01},
booktitle = {HHAI 2023: Augmenting Human Intellect},
volume = {368},
pages = {436 - 439},
publisher = {IOS Press Ebooks},
series = {Frontiers in Artificial Intelligence and Applications},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ligthart M E U; Neerincx M A; Hindriks K V
It Takes Two: Using Co-Creation to Facilitate Child-Robot Co-Regulation Journal Article
In: Transactions on Human-Robot Interaction, 2023.
@article{10.1145/3593812,
title = {It Takes Two: Using Co-Creation to Facilitate Child-Robot Co-Regulation},
author = {Ligthart, Mike E.U. and Neerincx, Mark A. and Hindriks, Koen V.},
url = {doi.org/10.1145/3593812},
doi = {10.1145/3593812},
year = {2023},
date = {2023-05-01},
journal = {Transactions on Human-Robot Interaction},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {While interacting with a social robot, children have a need to express themselves and have their expressions acknowledged by the robot. A need that is often unaddressed by the robot, due to its limitations in understanding the expressions of children. To keep the child-robot interaction manageable the robot takes control, undermining children’s ability to co-regulate the interaction. Co-regulation is important for having a fulfilling social interaction. We developed a co-creation activity that aims to facilitate more co-regulation. Children are enabled to create sound effects, gestures, and light animations for the robot to use during their conversation. A crucial additional feature is that children are able to coordinate their involvement of the co-creation process. Results from a user study (N = 59 school children, 7-11 y.o.) showed that the co-creation activity successfully facilitated co-regulation by improving children’s agency. It also positively affected the acceptance of the robot. We furthermore identified five distinct profiles detailing the different needs and motivations children have for the level of involvement they chose during the co-creation process.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Orzan N; Acar E; Grossi D; Radulescu R
Emergent Cooperation and Deception in Public Good Games Conference
2023, (2023 Adaptive and Learning Agents Workshop at AAMAS, ALA 2023 ; Conference date: 29-05-2023 Through 30-05-2023).
@conference{42fc313afdcf4ea0a011c0fdb462ef1a,
title = {Emergent Cooperation and Deception in Public Good Games},
author = {Nicole Orzan and Erman Acar and Davide Grossi and Roxana Radulescu},
url = {https://alaworkshop2023.github.io},
year = {2023},
date = {2023-05-01},
abstract = {Communication is a widely used mechanism to promote cooperation in multi-agent systems. In the field of emergent communication agents are usually trained on a particular type of environment: cooperative, competitive, or mixed-motive. Motivated by the idea that real-world settings are characterised by incomplete information and that humans face daily interactions under a wide spectrum of incentives, we hypothesise that emergent communication could be simultaneously exploited in the totality of these scenarios. In this work we pursue this line of research by focusing on social dilemmas, and develop an extended version of the Public Goods Game which allows us to train independent reinforcement learning agents simultaneously on different scenarios where incentives are aligned (or misaligned) to various extents. Additionally, we introduce uncertainty regarding the alignment of incentives, and we equip agents with the ability to learn a communication policy, to study the potential of emergent communication for overcoming uncertainty. We show that in settings where all agents have the same level of uncertainty, communication can help improve the cooperation level of the system, while, when uncertainty is asymmetric, certain agents learn to use communication to deceive and exploit their uncertain peers.},
note = {2023 Adaptive and Learning Agents Workshop at AAMAS, ALA 2023 ; Conference date: 29-05-2023 Through 30-05-2023},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Kim T; Cochez M; François-Lavet V; Neerincx M; Vossen P
A Machine with Short-Term, Episodic, and Semantic Memory Systems Proceedings Article
In: Proceedings of the AAAI Conference on Artificial Intelligence, 2023.
@inproceedings{Taewoo-AAAI-2023,
title = {A Machine with Short-Term, Episodic, and Semantic Memory Systems},
author = {Kim, Taewoon and Cochez, Michael and François-Lavet, Vincent and Neerincx, Mark and Vossen, Piek},
url = {https://arxiv.org/abs/2212.02098},
doi = {10.48550/ARXIV.2212.02098},
year = {2023},
date = {2023-02-01},
booktitle = {Proceedings of the AAAI Conference on Artificial Intelligence},
abstract = {Inspired by the cognitive science theory of the explicit human memory systems, we have modeled an agent with short-term, episodic, and semantic memory systems, each of which is modeled with a knowledge graph. To evaluate this system and analyze the behavior of this agent, we designed and released our own reinforcement learning agent environment, "the Room", where an agent has to learn how to encode, store, and retrieve memories to maximize its return by answering questions. We show that our deep Q-learning based agent successfully learns whether a short-term memory should be forgotten, or rather be stored in the episodic or semantic memory systems. Our experiments indicate that an agent with human-like memory systems can outperform an agent without this memory structure in the environment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Sauter A W M; Acar E; Francois-Lavet V
A Meta-Reinforcement Learning Algorithm for Causal Discovery Proceedings Article
In: 2nd Conference on Causal Learning and Reasoning, 2023.
@inproceedings{sauter2023a,
title = {A Meta-Reinforcement Learning Algorithm for Causal Discovery},
author = {Andreas W.M. Sauter and Erman Acar and Vincent Francois-Lavet},
url = {https://openreview.net/forum?id=p6NnDqJM_jL},
year = {2023},
date = {2023-01-01},
booktitle = {2nd Conference on Causal Learning and Reasoning},
abstract = {Uncovering the underlying causal structure of a phenomenon, domain or environment is of great scientific interest, not least because of the inferences that can be derived from such structures. Unfortunately though, given an environment, identifying its causal structure poses significant challenges. Amongst those are the need for costly interventions and the size of the space of possible structures that has to be searched. In this work, we propose a meta-reinforcement learning setup that addresses these challenges by learning a causal discovery algorithm, called Meta-Causal Discovery, or MCD. We model this algorithm as a policy that is trained on a set of environments with known causal structures to perform budgeted interventions. Simultaneously, the policy learns to maintain an estimate of the environment's causal structure. The learned policy can then be used as a causal discovery algorithm to estimate the structure of environments in a matter of milliseconds. At test time, our algorithm performs well even in environments that induce previously unseen causal structures. We empirically show that MCD estimates good graphs compared to SOTA approaches on toy environments and thus constitutes a proof-of-concept of learning causal discovery algorithms.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Han S; Dastani M; Wang S
Model-based Sparse Communication in Multi-agent Reinforcement Learning Proceedings Article
In: Proceedings of the 2023 International Conference on Autonomous Agents and Multiagent Systems, pp. 439–447, 2023.
@inproceedings{han2023model,
title = {Model-based Sparse Communication in Multi-agent Reinforcement Learning},
author = {Han, Shuai and Dastani, Mehdi and Wang, Shihan},
url = {https://www.southampton.ac.uk/~eg/AAMAS2023/pdfs/p439.pdf},
year = {2023},
date = {2023-01-01},
booktitle = {Proceedings of the 2023 International Conference on Autonomous Agents and Multiagent Systems},
pages = {439–447},
abstract = {Learning to communicate efficiently is central to multi-agent reinforcement learning (MARL). Existing methods often require agents to exchange messages intensively, which abuses communication channels and leads to high communication overhead. Only a few methods target on learning sparse communication, but they allow limited information to be shared, which affects the efficiency of policy learning. In this work, we propose model-based communication (MBC), a learning framework with a decentralized communication scheduling process. The MBC framework enables multiple agents to make decisions with sparse communication. In particular, the MBC framework introduces a model-based message estimator to estimate the up-to-date global messages using past local data. A decentralized message scheduling mechanism is also proposed to determine whether a message shall be sent based on the estimation. We evaluated our method in a variety of mixed cooperative-competitive environments. The experiment results show that the MBC method shows better performance and lower channel overhead than the state-of-art baselines.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ulfert A; Georganta E; Jorge C C; Mehrotra S; Tielman M
Shaping a multidisciplinary understanding of team trust in human-AI teams: a theoretical framework Journal Article
In: European Journal of Work and Organizational Psychology, pp. 1–14, 2023.
@article{doi:10.1080/1359432X.2023.2200172,
title = {Shaping a multidisciplinary understanding of team trust in human-AI teams: a theoretical framework},
author = {Anna-Sophie Ulfert and Eleni Georganta and Carolina Centeio Jorge and Siddharth Mehrotra and Myrthe Tielman},
url = {https://www.tandfonline.com/doi/full/10.1080/1359432X.2023.2200172},
doi = {10.1080/1359432X.2023.2200172},
year = {2023},
date = {2023-01-01},
journal = {European Journal of Work and Organizational Psychology},
pages = {1–14},
publisher = {Routledge},
abstract = {Intelligent systems are increasingly entering the workplace, gradually moving away from technologies supporting work processes to artificially intelligent (AI) agents becoming team members. Therefore, a deep understanding of effective human-AI collaboration within the team context is required. Both psychology and computer science literature emphasize the importance of trust when humans interact either with human team members or AI agents. However, empirical work and theoretical models that combine these research fields and define team trust in human-AI teams are scarce. Furthermore, they often lack to integrate central aspects, such as the multilevel nature of team trust and the role of AI agents as team members. Building on an integration of current literature on trust in human-AI teaming across different research fields, we propose a multidisciplinary framework of team trust in human-AI teams. The framework highlights different trust relationships that exist within human-AI teams and acknowledges the multilevel nature of team trust. We discuss the framework’s potential for human-AI teaming research and for the design and implementation of trustworthy AI team members.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Onnes A; Dastani M; Renooij S
Bayesian Network Conflict Detection for Normative Monitoring of Black-Box Systems Proceedings Article
In: Proceedings of the Thirty-Sixth FLAIRS Conference, Florida Online Journals, 2023.
@inproceedings{onnes2023,
title = {Bayesian Network Conflict Detection for Normative Monitoring of Black-Box Systems},
author = {Onnes, Annet and Dastani, Mehdi and Renooij, Silja},
url = {https://journals.flvc.org/FLAIRS/article/view/133240/137859},
year = {2023},
date = {2023-01-01},
booktitle = {Proceedings of the Thirty-Sixth FLAIRS Conference},
volume = {36},
publisher = {Florida Online Journals},
abstract = {Bayesian networks are interpretable probabilistic models that can be constructed from both data and domain knowledge. They are applied in various domains and for different tasks, including that of anomaly detection, for which an easy to compute measure of data conflict exists. In this paper we consider the use of Bayesian networks to monitor input-output pairs of a black-box AI system, to establish whether the output is acceptable in the current context in which the AI system operates. A Bayesian network-based prescriptive, or normative, model is assumed that includes context variables relevant for deciding what is or is not acceptable. We analyse and adjust the conflict measure to make it applicable to our new type of monitoring setting.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Li M; Ariannezhad M; Yates A; de Rijke M
Who Will Purchase This Item Next? Reverse Next Period Recommendation in Grocery Shopping Journal Article
In: ACM Trans. Recomm. Syst., 2023.
@article{10.1145/3595384,
title = {Who Will Purchase This Item Next? Reverse Next Period Recommendation in Grocery Shopping},
author = {Li, Ming and Ariannezhad, Mozhdeh and Yates, Andrew and de Rijke, Maarten},
url = {https://dl.acm.org/doi/pdf/10.1145/3595384},
doi = {10.1145/3595384},
year = {2023},
date = {2023-01-01},
journal = {ACM Trans. Recomm. Syst.},
publisher = {Association for Computing Machinery},
abstract = {Recommender systems have become an essential instrument to connect people to the items that they need. Online grocery shopping is one scenario where this is very clear. So-called user-centered recommendations take a user as input and suggest items based on the user’s preferences. Such user-centered recommendations have received significant attention and uptake. Instead, we focus on an item-centered recommendation task, again in the grocery shopping scenario. In the reverse next-period recommendation (RNPR) task, we are given an item and have to identify potential users who would like to consume it in the next period. We consider three sub-tasks of the overall RNPR task, (i) Expl-RNPR, (ii) Rep-RNPR, and (iii) Mixed-RNPR, where we consider different types of target users, i.e., (i) explore users, who are new to a given item, (ii) repeat users, who previously purchased a given item, and (iii) both explore users and repeat users. To address the Expl-RNPR task, we propose a habit-interest fusion model that employs frequency information to capture the repetition-exploration habits of users and that uses pre-trained item embeddings to model the user’s interests. For the Mixed-RNPR task, we propose a repetition-exploration user ranking algorithm to decouple the repetition and exploration task, and investigate the trade-off between targeting different types of users for a given item. Furthermore, to reduce the computational cost at inference, we analyze the repetition behavior from both user and item perspectives and then introduce a repetition-based candidate filtering method for each sub-task. We conduct experiments on two public grocery shopping datasets. Our experimental results not only demonstrate the difference between repetition and exploration, but also the effectiveness of the proposed methods.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Bleeker M; Yates A; de Rijke M
Reducing Predictive Feature Suppression in Resource-Constrained Contrastive Image-Caption Retrieval Journal Article
In: Transactions on Machine Learning Research, 2023, ISSN: 2835-8856.
@article{bleeker-2023-reducing,
title = {Reducing Predictive Feature Suppression in Resource-Constrained Contrastive Image-Caption Retrieval},
author = {Maurits Bleeker and Andrew Yates and Maarten de Rijke},
url = {https://openreview.net/forum?id=T1XtOqrVKn},
doi = {https://doi.org/10.48550/arXiv.2204.13382},
issn = {2835-8856},
year = {2023},
date = {2023-01-01},
journal = {Transactions on Machine Learning Research},
abstract = {To train image-caption retrieval (ICR) methods, contrastive loss functions are a common choice for optimization functions. Unfortunately, contrastive ICR methods are vulnerable to predictive feature suppression. Predictive features are features that correctly indicate the similarity between a query and a candidate item. However, in the presence of multiple predictive features during training, encoder models tend to suppress redundant predictive features, since these features are not needed to learn to discriminate between positive and negative pairs. While some predictive features are redundant during training, these features might be relevant during evaluation. We introduce an approach to reduce predictive feature suppression for resource-constrained ICR methods: latent target decoding (LTD). We add an additional decoder to the contrastive ICR framework, to reconstruct the input caption in a latent space of a general-purpose sentence encoder, which prevents the image and caption encoder from suppressing predictive features. We implement the LTD objective as an optimization constraint, to ensure that the reconstruction loss is below a bound value while primarily optimizing for the contrastive loss. Importantly, LTD does not depend on additional training data or expensive (hard) negative mining strategies. Our experiments show that, unlike reconstructing the input caption in the input space, LTD reduces predictive feature suppression, measured by obtaining higher recall@k, r-precision, and nDCG scores than a contrastive ICR baseline. Moreover, we show that LTD should be implemented as an optimization constraint instead of a dual optimization objective. Finally, we show that LTD can be used with different contrastive learning losses and a wide variety of resource-constrained ICR methods.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Steging C; Renooij S; Verheij B; Bench-Capon T
Arguments, rules and cases in law: Resources for aligning learning and reasoning in structured domains Journal Article
In: Argument & Computation, vol. 14, pp. 235–243, 2023, ISSN: 1946-2174, (2).
@article{StegingAC2023,
title = {Arguments, rules and cases in law: Resources for aligning learning and reasoning in structured domains},
author = {Steging, Cor and Renooij, Silja and Verheij, Bart and Bench-Capon, Trevor},
url = {https://doi.org/10.3233/AAC-220017},
doi = {10.3233/AAC-220017},
issn = {1946-2174},
year = {2023},
date = {2023-01-01},
journal = {Argument & Computation},
volume = {14},
pages = {235–243},
publisher = {IOS Press},
abstract = {This paper provides a formal description of two legal domains. In addition, we describe the generation of various artificial datasets from these domains and explain the use of these datasets in previous experiments aligning learning and reasoning. These resources are made available for the further investigation of connections between arguments, cases and rules. The datasets are publicly available at https://github.com/CorSteging/LegalResources .},
note = {2},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Javdani Rikhtehgar D; Wang S; Huitema H; Alvares J; Schlobach S; Rieffe C; Heylen D
Personalizing Cultural Heritage Access in a Virtual Reality Exhibition: A User Study on Viewing Behavior and Content Preferences Proceedings Article
In: Adjunct Proceedings of the 31st ACM Conference on User Modeling, Adaptation and Personalization, pp. 379–387, 2023.
@inproceedings{javdani2023personalizing,
title = {Personalizing Cultural Heritage Access in a Virtual Reality Exhibition: A User Study on Viewing Behavior and Content Preferences},
author = {Javdani Rikhtehgar, Delaram and Wang, Shenghui and Huitema, Hester and Alvares, Julia and Schlobach, Stefan and Rieffe, Carolien and Heylen, Dirk},
url = {https://dl.acm.org/doi/pdf/10.1145/3563359.3596666},
year = {2023},
date = {2023-01-01},
booktitle = {Adjunct Proceedings of the 31st ACM Conference on User Modeling, Adaptation and Personalization},
pages = {379–387},
abstract = {Leveraging digital technologies, museums now have the opportunity to embrace innovative approaches such as knowledge graphs, virtual reality, and virtual assistants to enhance the preservation and interactive presentation of cultural information. However, despite these advancements, personalizing the museum experience remains a significant challenge. Thus, this paper aims to investigate the necessary elements for offering personalized access to cultural heritage within a VR exhibition. To accomplish this, a user study was conducted to identify user preferences for tailored content descriptions, track user viewing behavior to gauge their interest in a VR exhibition, and determine preferred methods of information gathering. The study involved 31 participants, and the findings are expected to provide valuable insights for designing effective and engaging VR exhibitions that cater to diverse visitor interests.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}