Reimann M M; Kunneman F A; Oertel C; Hindriks K V
A Survey on Dialogue Management in Human-Robot Interaction Journal Article
In: J. Hum.-Robot Interact., 2024, (Just Accepted).
@article{10.1145/3648605,
title = {A Survey on Dialogue Management in Human-Robot Interaction},
author = {Reimann, Merle M. and Kunneman, Florian A. and Oertel, Catharine and Hindriks, Koen V.},
url = {https://doi.org/10.1145/3648605},
doi = {10.1145/3648605},
year = {2024},
date = {2024-03-01},
journal = {J. Hum.-Robot Interact.},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {As social robots see increasing deployment within the general public, improving the interaction with those robots is essential. Spoken language offers an intuitive interface for the human-robot interaction (HRI), with dialogue management (DM) being a key component in those interactive systems. Yet, to overcome current challenges and manage smooth, informative and engaging interaction a more structural approach to combining HRI and DM is needed. In this systematic review, we analyse the current use of DM in HRI and focus on the type of dialogue manager used, its capabilities, evaluation methods and the challenges specific to DM in HRI. We identify the challenges and current scientific frontier related to the DM approach, interaction domain, robot appearance, physical situatedness and multimodality.},
note = {Just Accepted},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
den Hengst F; Otten M; Elbers P; van Harmelen F; François-Lavet V; Hoogendoorn M
Guideline-informed reinforcement learning for mechanical ventilation in critical care Journal Article
In: Artificial Intelligence in Medicine, vol. 147, pp. 102742, 2024, ISSN: 0933-3657.
@article{DENHENGST2024102742,
title = {Guideline-informed reinforcement learning for mechanical ventilation in critical care},
author = {Floris {den Hengst} and Martijn Otten and Paul Elbers and Frank {van Harmelen} and Vincent François-Lavet and Mark Hoogendoorn},
url = {https://www.sciencedirect.com/science/article/pii/S0933365723002567},
doi = {https://doi.org/10.1016/j.artmed.2023.102742},
issn = {0933-3657},
year = {2024},
date = {2024-01-01},
journal = {Artificial Intelligence in Medicine},
volume = {147},
pages = {102742},
abstract = {Reinforcement Learning (RL) has recently found many applications in the healthcare domain thanks to its natural fit to clinical decision-making and ability to learn optimal decisions from observational data. A key challenge in adopting RL-based solution in clinical practice, however, is the inclusion of existing knowledge in learning a suitable solution. Existing knowledge from e.g. medical guidelines may improve the safety of solutions, produce a better balance between short- and long-term outcomes for patients and increase trust and adoption by clinicians. We present a framework for including knowledge available from medical guidelines in RL. The framework includes components for enforcing safety constraints and an approach that alters the learning signal to better balance short- and long-term outcomes based on these guidelines. We evaluate the framework by extending an existing RL-based mechanical ventilation (MV) approach with clinically established ventilation guidelines. Results from off-policy policy evaluation indicate that our approach has the potential to decrease 90-day mortality while ensuring lung protective ventilation. This framework provides an important stepping stone towards implementations of RL in clinical practice and opens up several avenues for further research.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Reimann M; van de Graaf J; van Gulik N; van de Sanden S; Verhagen T; Hindriks K
Social Robots in the Wild and the Novelty Effect Proceedings Article
In: Ali, Abdulaziz Al; Cabibihan, John-John; Meskin, Nader; Rossi, Silvia; Jiang, Wanyue; He, Hongsheng; Ge, Shuzhi Sam (Ed.): Social Robotics, pp. 38–48, Springer Nature Singapore, Singapore, 2024, ISBN: 978-981-99-8718-4.
@inproceedings{10.1007/978-981-99-8718-4_4,
title = {Social Robots in the Wild and the Novelty Effect},
author = {Reimann, Merle
and van de Graaf, Jesper
and van Gulik, Nina
and van de Sanden, Stephanie
and Verhagen, Tibert
and Hindriks, Koen},
editor = {Ali, Abdulaziz Al
and Cabibihan, John-John
and Meskin, Nader
and Rossi, Silvia
and Jiang, Wanyue
and He, Hongsheng
and Ge, Shuzhi Sam},
url = {https://rdcu.be/dtXE5},
isbn = {978-981-99-8718-4},
year = {2024},
date = {2024-01-01},
booktitle = {Social Robotics},
pages = {38–48},
publisher = {Springer Nature Singapore},
address = {Singapore},
abstract = {We designed a wine recommendation robot and deployed it in a small supermarket. In a study aimed to evaluate our design we found that people with no intent to buy wine were interacting with the robot rather than the intended audience of wine-buying customers. Behavioural data, moreover, suggests a very different evaluation of the robot than the surveys that were completed. We also found that groups were interacting more with the robot than individuals, a finding that has been reported more often in the literature. All of these findings taken together suggest that a novelty effect may have been at play. It also suggests that field studies should take this effect more seriously. The main contribution of our work is in identifying and proposing a set of indicators and thresholds that can be used to identify that a novelty effect is present. We argue that it is important to focus more on measuring attitudes towards robots that may explain behaviour due to novelty effects. Our findings also suggest research should focus more on verifying whether real user needs are met."},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhu C; Dastani M; Wang S
A survey of multi-agent deep reinforcement learning with communication Journal Article
In: Autonomous Agents and Multi-Agent Systems, vol. 38, no. 1, pp. 4, 2024.
@article{zhu2024survey,
title = {A survey of multi-agent deep reinforcement learning with communication},
author = {Zhu, Changxi and Dastani, Mehdi and Wang, Shihan},
doi = {https://doi.org/10.1007/s10458-023-09633-6},
year = {2024},
date = {2024-01-01},
journal = {Autonomous Agents and Multi-Agent Systems},
volume = {38},
number = {1},
pages = {4},
publisher = {Springer},
abstract = {Communication is an effective mechanism for coordinating the behaviors of multiple agents, broadening their views of the environment, and to support their collaborations. In the field of multi-agent deep reinforcement learning (MADRL), agents can improve the overall learning performance and achieve their objectives by communication. Agents can communicate various types of messages, either to all agents or to specific agent groups, or conditioned on specific constraints. With the growing body of research work in MADRL with communication (Comm-MADRL), there is a lack of a systematic and structural approach to distinguish and classify existing Comm-MADRL approaches. In this paper, we survey recent works in the Comm-MADRL field and consider various aspects of communication that can play a role in designing and developing multi-agent reinforcement learning systems. With these aspects in mind, we propose 9 dimensions along which Comm-MADRL approaches can be analyzed, developed, and compared. By projecting existing works into the multi-dimensional space, we discover interesting trends. We also propose some novel directions for designing future Comm-MADRL systems through exploring possible combinations of the dimensions.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
van Dijk B; van Duijn M; Verberne S; Spruit M
ChiSCor: A Corpus of Freely-Told Fantasy Stories by Dutch Children for Computational Linguistics and Cognitive Science Proceedings Article
In: Jiang, Jing; Reitter, David; Deng, Shumin (Ed.): Proceedings of the 27th Conference on Computational Natural Language Learning (CoNLL), pp. 352–363, Association for Computational Linguistics, Singapore, 2023.
@inproceedings{van-dijk-etal-2023-chiscor,
title = {ChiSCor: A Corpus of Freely-Told Fantasy Stories by Dutch Children for Computational Linguistics and Cognitive Science},
author = {van Dijk, Bram and
van Duijn, Max and
Verberne, Suzan and
Spruit, Marco},
editor = {Jiang, Jing and
Reitter, David and
Deng, Shumin},
url = {https://aclanthology.org/2023.conll-1.23},
doi = {10.18653/v1/2023.conll-1.23},
year = {2023},
date = {2023-12-01},
booktitle = {Proceedings of the 27th Conference on Computational Natural Language Learning (CoNLL)},
pages = {352–363},
publisher = {Association for Computational Linguistics},
address = {Singapore},
abstract = {In this resource paper we release ChiSCor, a new corpus containing 619 fantasy stories, told freely by 442 Dutch children aged 4-12. ChiSCor was compiled for studying how children render character perspectives, and unravelling language and cognition in development, with computational tools. Unlike existing resources, ChiSCor's stories were produced in natural contexts, in line with recent calls for more ecologically valid datasets. ChiSCor hosts text, audio, and annotations for character complexity and linguistic complexity. Additional metadata (e.g. education of caregivers) is available for one third of the Dutch children. ChiSCor also includes a small set of 62 English stories. This paper details how ChiSCor was compiled and shows its potential for future work with three brief case studies: i) we show that the syntactic complexity of stories is strikingly stable across children's ages; ii) we extend work on Zipfian distributions in free speech and show that ChiSCor obeys Zipf's law closely, reflecting its social context; iii) we show that even though ChiSCor is relatively small, the corpus is rich enough to train informative lemma vectors that allow us to analyse children's language use. We end with a reflection on the value of narrative datasets in computational linguistics.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
van Duijn M; van Dijk B; Kouwenhoven T; de Valk W; Spruit M; vanderPutten P
Theory of Mind in Large Language Models: Examining Performance of 11 State-of-the-Art models vs. Children Aged 7-10 on Advanced Tests Proceedings Article
In: Jiang, Jing; Reitter, David; Deng, Shumin (Ed.): Proceedings of the 27th Conference on Computational Natural Language Learning (CoNLL), pp. 389–402, Association for Computational Linguistics, Singapore, 2023.
@inproceedings{van-duijn-etal-2023-theory,
title = {Theory of Mind in Large Language Models: Examining Performance of 11 State-of-the-Art models vs. Children Aged 7-10 on Advanced Tests},
author = {van Duijn, Max and
van Dijk, Bram and
Kouwenhoven, Tom and
de Valk, Werner and
Spruit, Marco and
vanderPutten, Peter},
editor = {Jiang, Jing and
Reitter, David and
Deng, Shumin},
url = {https://aclanthology.org/2023.conll-1.25},
doi = {10.18653/v1/2023.conll-1.25},
year = {2023},
date = {2023-12-01},
booktitle = {Proceedings of the 27th Conference on Computational Natural Language Learning (CoNLL)},
pages = {389–402},
publisher = {Association for Computational Linguistics},
address = {Singapore},
abstract = {To what degree should we ascribe cognitive capacities to Large Language Models (LLMs), such as the ability to reason about intentions and beliefs known as Theory of Mind (ToM)? Here we add to this emerging debate by (i) testing 11 base- and instruction-tuned LLMs on capabilities relevant to ToM beyond the dominant false-belief paradigm, including non-literal language usage and recursive intentionality; (ii) using newly rewritten versions of standardized tests to gauge LLMs' robustness; (iii) prompting and scoring for open besides closed questions; and (iv) benchmarking LLM performance against that of children aged 7-10 on the same tasks. We find that instruction-tuned LLMs from the GPT family outperform other models, and often also children. Base-LLMs are mostly unable to solve ToM tasks, even with specialized prompting. We suggest that the interlinked evolution and development of language and ToM may help explain what instruction-tuning adds: rewarding cooperative communication that takes into account interlocutor and context. We conclude by arguing for a nuanced perspective on ToM in LLMs.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
van Dijk B; Kouwenhoven T; Spruit M; van Duijn M J
Large Language Models: The Need for Nuance in Current Debates and a Pragmatic Perspective on Understanding Proceedings Article
In: Bouamor, Houda; Pino, Juan; Bali, Kalika (Ed.): Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 12641–12654, Association for Computational Linguistics, Singapore, 2023.
@inproceedings{van-dijk-etal-2023-large,
title = {Large Language Models: The Need for Nuance in Current Debates and a Pragmatic Perspective on Understanding},
author = {van Dijk, Bram and
Kouwenhoven, Tom and
Spruit, Marco and
van Duijn, Max Johannes},
editor = {Bouamor, Houda and
Pino, Juan and
Bali, Kalika},
url = {https://aclanthology.org/2023.emnlp-main.779},
doi = {10.18653/v1/2023.emnlp-main.779},
year = {2023},
date = {2023-12-01},
booktitle = {Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing},
pages = {12641–12654},
publisher = {Association for Computational Linguistics},
address = {Singapore},
abstract = {Current Large Language Models (LLMs) are unparalleled in their ability to generate grammatically correct, fluent text. LLMs are appearing rapidly, and debates on LLM capacities have taken off, but reflection is lagging behind. Thus, in this position paper, we first zoom in on the debate and critically assess three points recurring in critiques of LLM capacities: i) that LLMs only parrot statistical patterns in the training data; ii) that LLMs master formal but not functional language competence; and iii) that language learning in LLMs cannot inform human language learning. Drawing on empirical and theoretical arguments, we show that these points need more nuance. Second, we outline a pragmatic perspective on the issue of `real' understanding and intentionality in LLMs. Understanding and intentionality pertain to unobservable mental states we attribute to other humans because they have pragmatic value: they allow us to abstract away from complex underlying mechanics and predict behaviour effectively. We reflect on the circumstances under which it would make sense for humans to similarly attribute mental states to LLMs, thereby outlining a pragmatic philosophical context for LLMs as an increasingly prominent technology in society.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Heuss M; Cohen D; Mansoury M; de Rijke M; Eickhoff C
Predictive Uncertainty-based Bias Mitigation in Ranking Proceedings Article
In: CIKM 2023: 32nd ACM International Conference on Information and Knowledge Management, pp. 762–772, ACM, 2023.
@inproceedings{heuss-2023-predictive,
title = {Predictive Uncertainty-based Bias Mitigation in Ranking},
author = {Heuss, Maria and Cohen, Daniel and Mansoury, Masoud and de Rijke, Maarten and Eickhoff, Carsten},
url = {https://arxiv.org/abs/2309.09833},
year = {2023},
date = {2023-10-01},
booktitle = {CIKM 2023: 32nd ACM International Conference on Information and Knowledge Management},
pages = {762–772},
publisher = {ACM},
abstract = {Societal biases that are contained in retrieved documents have received increased interest. Such biases, which are often prevalent in the training data and learned by the model, can cause societal harms, by misrepresenting certain groups, and by enforcing stereotypes. Mitigating such biases demands algorithms that balance the trade-off between maximized utility for the user with fairness objectives, which incentivize unbiased rankings. Prior work on bias mitigation often assumes that ranking scores, which correspond to the utility that a document holds for a user, can be accurately determined. In reality, there is always a degree of uncertainty in the estimate of expected document utility. This uncertainty can be approximated by viewing ranking models through a Bayesian perspective, where the standard deterministic score becomes a distribution. In this work, we investigate whether uncertainty estimates can be used to decrease the amount of bias in the ranked results, while minimizing loss in measured utility. We introduce a simple method that uses the uncertainty of the ranking scores for an uncertainty-aware, post hoc approach to bias mitigation. We compare our proposed method with existing baselines for bias mitigation with respect to the utility-fairness trade-off, the controllability of methods, and computational costs. We show that an uncertainty-based approach can provide an intuitive and flexible trade-off that outperforms all baselines without additional training requirements, allowing for the post hoc use of this approach on top of arbitrary retrieval models.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
van Leeuwen L; Verheij B; Verbrugge R; Renooij S
Using Agent-Based Simulations to Evaluate Bayesian Networks for Criminal Scenarios. Proceedings Article
In: The Nineteenth International Conference on Artificial Intelligence and Law (ICAIL 2023). Proceedings of the Conference, ACM, New York, NY, USA, Braga, Portugal, 2023.
@inproceedings{vanLeeuwen2023,
title = {Using Agent-Based Simulations to Evaluate Bayesian Networks for Criminal Scenarios. },
author = {Ludi van Leeuwen and Bart Verheij and Rineke Verbrugge and Silja Renooij},
doi = {https://doi.org/10.1145/3594536.3595125},
year = {2023},
date = {2023-06-01},
booktitle = {The Nineteenth International Conference on Artificial Intelligence and Law (ICAIL 2023). Proceedings of the Conference},
publisher = {ACM, New York, NY, USA},
address = {Braga, Portugal},
abstract = {Scenario-based Bayesian networks (BNs) have been proposed as a tool for the rational handling of evidence. The proper evaluation of existing methods requires access to a ground truth that can be used to test the quality and usefulness of a BN model of a crime. However, that would require a full probability distribution over all relevant variables used in the model, which is in practice not available. In this paper, we use an agent-based simulation as a proxy for the ground truth for the evaluation of BN models as tools for the rational handling of evidence. We use fictional crime scenarios as a background. First, we design manually constructed BNs using existing design methods in order to model example crime scenarios. Second, we build an agent-based simulation covering the scenarios of criminal and non-criminal behavior. Third, we algorithmically determine BNs using statistics collected experimentally from the agent-based simulation that represents the ground truth. Finally, we compare the manual, scenario-based BNs to the algorithmic BNs by comparing the posterior probability distribution over outcomes of the network to the ground-truth frequency distribution over those outcomes in the simulation, across all evidence valuations. We find that both manual BNs and algorithmic BNs perform similarly well: they are good reflections of the ground truth in most of the evidence valuations. Using ABMs as a ground truth can be a tool to investigate Bayesian Networks and their design methods, especially under circumstances that are implausible in real-life criminal cases, such as full probabilistic information.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ligthart M E U; Neerincx M A; Hindriks K V
It Takes Two: Using Co-Creation to Facilitate Child-Robot Co-Regulation Journal Article
In: Transactions on Human-Robot Interaction, 2023.
@article{10.1145/3593812,
title = {It Takes Two: Using Co-Creation to Facilitate Child-Robot Co-Regulation},
author = {Ligthart, Mike E.U. and Neerincx, Mark A. and Hindriks, Koen V.},
url = {doi.org/10.1145/3593812},
doi = {10.1145/3593812},
year = {2023},
date = {2023-05-01},
journal = {Transactions on Human-Robot Interaction},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {While interacting with a social robot, children have a need to express themselves and have their expressions acknowledged by the robot. A need that is often unaddressed by the robot, due to its limitations in understanding the expressions of children. To keep the child-robot interaction manageable the robot takes control, undermining children’s ability to co-regulate the interaction. Co-regulation is important for having a fulfilling social interaction. We developed a co-creation activity that aims to facilitate more co-regulation. Children are enabled to create sound effects, gestures, and light animations for the robot to use during their conversation. A crucial additional feature is that children are able to coordinate their involvement of the co-creation process. Results from a user study (N = 59 school children, 7-11 y.o.) showed that the co-creation activity successfully facilitated co-regulation by improving children’s agency. It also positively affected the acceptance of the robot. We furthermore identified five distinct profiles detailing the different needs and motivations children have for the level of involvement they chose during the co-creation process.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Orzan N; Acar E; Grossi D; Radulescu R
Emergent Cooperation and Deception in Public Good Games Conference
2023, (2023 Adaptive and Learning Agents Workshop at AAMAS, ALA 2023 ; Conference date: 29-05-2023 Through 30-05-2023).
@conference{42fc313afdcf4ea0a011c0fdb462ef1a,
title = {Emergent Cooperation and Deception in Public Good Games},
author = {Nicole Orzan and Erman Acar and Davide Grossi and Roxana Radulescu},
url = {https://alaworkshop2023.github.io},
year = {2023},
date = {2023-05-01},
abstract = {Communication is a widely used mechanism to promote cooperation in multi-agent systems. In the field of emergent communication agents are usually trained on a particular type of environment: cooperative, competitive, or mixed-motive. Motivated by the idea that real-world settings are characterised by incomplete information and that humans face daily interactions under a wide spectrum of incentives, we hypothesise that emergent communication could be simultaneously exploited in the totality of these scenarios. In this work we pursue this line of research by focusing on social dilemmas, and develop an extended version of the Public Goods Game which allows us to train independent reinforcement learning agents simultaneously on different scenarios where incentives are aligned (or misaligned) to various extents. Additionally, we introduce uncertainty regarding the alignment of incentives, and we equip agents with the ability to learn a communication policy, to study the potential of emergent communication for overcoming uncertainty. We show that in settings where all agents have the same level of uncertainty, communication can help improve the cooperation level of the system, while, when uncertainty is asymmetric, certain agents learn to use communication to deceive and exploit their uncertain peers.},
note = {2023 Adaptive and Learning Agents Workshop at AAMAS, ALA 2023 ; Conference date: 29-05-2023 Through 30-05-2023},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Kim T; Cochez M; François-Lavet V; Neerincx M; Vossen P
A Machine with Short-Term, Episodic, and Semantic Memory Systems Proceedings Article
In: Proceedings of the AAAI Conference on Artificial Intelligence, 2023.
@inproceedings{Taewoo-AAAI-2023,
title = {A Machine with Short-Term, Episodic, and Semantic Memory Systems},
author = {Kim, Taewoon and Cochez, Michael and François-Lavet, Vincent and Neerincx, Mark and Vossen, Piek},
url = {https://arxiv.org/abs/2212.02098},
doi = {10.48550/ARXIV.2212.02098},
year = {2023},
date = {2023-02-01},
booktitle = {Proceedings of the AAAI Conference on Artificial Intelligence},
abstract = {Inspired by the cognitive science theory of the explicit human memory systems, we have modeled an agent with short-term, episodic, and semantic memory systems, each of which is modeled with a knowledge graph. To evaluate this system and analyze the behavior of this agent, we designed and released our own reinforcement learning agent environment, "the Room", where an agent has to learn how to encode, store, and retrieve memories to maximize its return by answering questions. We show that our deep Q-learning based agent successfully learns whether a short-term memory should be forgotten, or rather be stored in the episodic or semantic memory systems. Our experiments indicate that an agent with human-like memory systems can outperform an agent without this memory structure in the environment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Sauter A W M; Acar E; Francois-Lavet V
A Meta-Reinforcement Learning Algorithm for Causal Discovery Proceedings Article
In: 2nd Conference on Causal Learning and Reasoning, 2023.
@inproceedings{sauter2023a,
title = {A Meta-Reinforcement Learning Algorithm for Causal Discovery},
author = {Andreas W.M. Sauter and Erman Acar and Vincent Francois-Lavet},
url = {https://openreview.net/forum?id=p6NnDqJM_jL},
year = {2023},
date = {2023-01-01},
booktitle = {2nd Conference on Causal Learning and Reasoning},
abstract = {Uncovering the underlying causal structure of a phenomenon, domain or environment is of great scientific interest, not least because of the inferences that can be derived from such structures. Unfortunately though, given an environment, identifying its causal structure poses significant challenges. Amongst those are the need for costly interventions and the size of the space of possible structures that has to be searched. In this work, we propose a meta-reinforcement learning setup that addresses these challenges by learning a causal discovery algorithm, called Meta-Causal Discovery, or MCD. We model this algorithm as a policy that is trained on a set of environments with known causal structures to perform budgeted interventions. Simultaneously, the policy learns to maintain an estimate of the environment's causal structure. The learned policy can then be used as a causal discovery algorithm to estimate the structure of environments in a matter of milliseconds. At test time, our algorithm performs well even in environments that induce previously unseen causal structures. We empirically show that MCD estimates good graphs compared to SOTA approaches on toy environments and thus constitutes a proof-of-concept of learning causal discovery algorithms.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Han S; Dastani M; Wang S
Model-based Sparse Communication in Multi-agent Reinforcement Learning Proceedings Article
In: Proceedings of the 2023 International Conference on Autonomous Agents and Multiagent Systems, pp. 439–447, 2023.
@inproceedings{han2023model,
title = {Model-based Sparse Communication in Multi-agent Reinforcement Learning},
author = {Han, Shuai and Dastani, Mehdi and Wang, Shihan},
url = {https://www.southampton.ac.uk/~eg/AAMAS2023/pdfs/p439.pdf},
year = {2023},
date = {2023-01-01},
booktitle = {Proceedings of the 2023 International Conference on Autonomous Agents and Multiagent Systems},
pages = {439–447},
abstract = {Learning to communicate efficiently is central to multi-agent reinforcement learning (MARL). Existing methods often require agents to exchange messages intensively, which abuses communication channels and leads to high communication overhead. Only a few methods target on learning sparse communication, but they allow limited information to be shared, which affects the efficiency of policy learning. In this work, we propose model-based communication (MBC), a learning framework with a decentralized communication scheduling process. The MBC framework enables multiple agents to make decisions with sparse communication. In particular, the MBC framework introduces a model-based message estimator to estimate the up-to-date global messages using past local data. A decentralized message scheduling mechanism is also proposed to determine whether a message shall be sent based on the estimation. We evaluated our method in a variety of mixed cooperative-competitive environments. The experiment results show that the MBC method shows better performance and lower channel overhead than the state-of-art baselines.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ulfert A; Georganta E; Jorge C C; Mehrotra S; Tielman M
Shaping a multidisciplinary understanding of team trust in human-AI teams: a theoretical framework Journal Article
In: European Journal of Work and Organizational Psychology, pp. 1–14, 2023.
@article{doi:10.1080/1359432X.2023.2200172,
title = {Shaping a multidisciplinary understanding of team trust in human-AI teams: a theoretical framework},
author = {Anna-Sophie Ulfert and Eleni Georganta and Carolina Centeio Jorge and Siddharth Mehrotra and Myrthe Tielman},
url = {https://www.tandfonline.com/doi/full/10.1080/1359432X.2023.2200172},
doi = {10.1080/1359432X.2023.2200172},
year = {2023},
date = {2023-01-01},
journal = {European Journal of Work and Organizational Psychology},
pages = {1–14},
publisher = {Routledge},
abstract = {Intelligent systems are increasingly entering the workplace, gradually moving away from technologies supporting work processes to artificially intelligent (AI) agents becoming team members. Therefore, a deep understanding of effective human-AI collaboration within the team context is required. Both psychology and computer science literature emphasize the importance of trust when humans interact either with human team members or AI agents. However, empirical work and theoretical models that combine these research fields and define team trust in human-AI teams are scarce. Furthermore, they often lack to integrate central aspects, such as the multilevel nature of team trust and the role of AI agents as team members. Building on an integration of current literature on trust in human-AI teaming across different research fields, we propose a multidisciplinary framework of team trust in human-AI teams. The framework highlights different trust relationships that exist within human-AI teams and acknowledges the multilevel nature of team trust. We discuss the framework’s potential for human-AI teaming research and for the design and implementation of trustworthy AI team members.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Onnes A; Dastani M; Renooij S
Bayesian Network Conflict Detection for Normative Monitoring of Black-Box Systems Proceedings Article
In: Proceedings of the Thirty-Sixth FLAIRS Conference, Florida Online Journals, 2023.
@inproceedings{onnes2023,
title = {Bayesian Network Conflict Detection for Normative Monitoring of Black-Box Systems},
author = {Onnes, Annet and Dastani, Mehdi and Renooij, Silja},
url = {https://journals.flvc.org/FLAIRS/article/view/133240/137859},
year = {2023},
date = {2023-01-01},
booktitle = {Proceedings of the Thirty-Sixth FLAIRS Conference},
volume = {36},
publisher = {Florida Online Journals},
abstract = {Bayesian networks are interpretable probabilistic models that can be constructed from both data and domain knowledge. They are applied in various domains and for different tasks, including that of anomaly detection, for which an easy to compute measure of data conflict exists. In this paper we consider the use of Bayesian networks to monitor input-output pairs of a black-box AI system, to establish whether the output is acceptable in the current context in which the AI system operates. A Bayesian network-based prescriptive, or normative, model is assumed that includes context variables relevant for deciding what is or is not acceptable. We analyse and adjust the conflict measure to make it applicable to our new type of monitoring setting.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Li M; Ariannezhad M; Yates A; de Rijke M
Who Will Purchase This Item Next? Reverse Next Period Recommendation in Grocery Shopping Journal Article
In: ACM Trans. Recomm. Syst., 2023.
@article{10.1145/3595384,
title = {Who Will Purchase This Item Next? Reverse Next Period Recommendation in Grocery Shopping},
author = {Li, Ming and Ariannezhad, Mozhdeh and Yates, Andrew and de Rijke, Maarten},
url = {https://dl.acm.org/doi/pdf/10.1145/3595384},
doi = {10.1145/3595384},
year = {2023},
date = {2023-01-01},
journal = {ACM Trans. Recomm. Syst.},
publisher = {Association for Computing Machinery},
abstract = {Recommender systems have become an essential instrument to connect people to the items that they need. Online grocery shopping is one scenario where this is very clear. So-called user-centered recommendations take a user as input and suggest items based on the user’s preferences. Such user-centered recommendations have received significant attention and uptake. Instead, we focus on an item-centered recommendation task, again in the grocery shopping scenario. In the reverse next-period recommendation (RNPR) task, we are given an item and have to identify potential users who would like to consume it in the next period. We consider three sub-tasks of the overall RNPR task, (i) Expl-RNPR, (ii) Rep-RNPR, and (iii) Mixed-RNPR, where we consider different types of target users, i.e., (i) explore users, who are new to a given item, (ii) repeat users, who previously purchased a given item, and (iii) both explore users and repeat users. To address the Expl-RNPR task, we propose a habit-interest fusion model that employs frequency information to capture the repetition-exploration habits of users and that uses pre-trained item embeddings to model the user’s interests. For the Mixed-RNPR task, we propose a repetition-exploration user ranking algorithm to decouple the repetition and exploration task, and investigate the trade-off between targeting different types of users for a given item. Furthermore, to reduce the computational cost at inference, we analyze the repetition behavior from both user and item perspectives and then introduce a repetition-based candidate filtering method for each sub-task. We conduct experiments on two public grocery shopping datasets. Our experimental results not only demonstrate the difference between repetition and exploration, but also the effectiveness of the proposed methods.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Bleeker M; Yates A; de Rijke M
Reducing Predictive Feature Suppression in Resource-Constrained Contrastive Image-Caption Retrieval Journal Article
In: Transactions on Machine Learning Research, 2023, ISSN: 2835-8856.
@article{bleeker-2023-reducing,
title = {Reducing Predictive Feature Suppression in Resource-Constrained Contrastive Image-Caption Retrieval},
author = {Maurits Bleeker and Andrew Yates and Maarten de Rijke},
url = {https://openreview.net/forum?id=T1XtOqrVKn},
doi = {https://doi.org/10.48550/arXiv.2204.13382},
issn = {2835-8856},
year = {2023},
date = {2023-01-01},
journal = {Transactions on Machine Learning Research},
abstract = {To train image-caption retrieval (ICR) methods, contrastive loss functions are a common choice for optimization functions. Unfortunately, contrastive ICR methods are vulnerable to predictive feature suppression. Predictive features are features that correctly indicate the similarity between a query and a candidate item. However, in the presence of multiple predictive features during training, encoder models tend to suppress redundant predictive features, since these features are not needed to learn to discriminate between positive and negative pairs. While some predictive features are redundant during training, these features might be relevant during evaluation. We introduce an approach to reduce predictive feature suppression for resource-constrained ICR methods: latent target decoding (LTD). We add an additional decoder to the contrastive ICR framework, to reconstruct the input caption in a latent space of a general-purpose sentence encoder, which prevents the image and caption encoder from suppressing predictive features. We implement the LTD objective as an optimization constraint, to ensure that the reconstruction loss is below a bound value while primarily optimizing for the contrastive loss. Importantly, LTD does not depend on additional training data or expensive (hard) negative mining strategies. Our experiments show that, unlike reconstructing the input caption in the input space, LTD reduces predictive feature suppression, measured by obtaining higher recall@k, r-precision, and nDCG scores than a contrastive ICR baseline. Moreover, we show that LTD should be implemented as an optimization constraint instead of a dual optimization objective. Finally, we show that LTD can be used with different contrastive learning losses and a wide variety of resource-constrained ICR methods.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Steging C; Renooij S; Verheij B; Bench-Capon T
Arguments, rules and cases in law: Resources for aligning learning and reasoning in structured domains Journal Article
In: Argument & Computation, vol. 14, pp. 235–243, 2023, ISSN: 1946-2174, (2).
@article{StegingAC2023,
title = {Arguments, rules and cases in law: Resources for aligning learning and reasoning in structured domains},
author = {Steging, Cor and Renooij, Silja and Verheij, Bart and Bench-Capon, Trevor},
url = {https://doi.org/10.3233/AAC-220017},
doi = {10.3233/AAC-220017},
issn = {1946-2174},
year = {2023},
date = {2023-01-01},
journal = {Argument & Computation},
volume = {14},
pages = {235–243},
publisher = {IOS Press},
abstract = {This paper provides a formal description of two legal domains. In addition, we describe the generation of various artificial datasets from these domains and explain the use of these datasets in previous experiments aligning learning and reasoning. These resources are made available for the further investigation of connections between arguments, cases and rules. The datasets are publicly available at https://github.com/CorSteging/LegalResources .},
note = {2},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Javdani Rikhtehgar D; Wang S; Huitema H; Alvares J; Schlobach S; Rieffe C; Heylen D
Personalizing Cultural Heritage Access in a Virtual Reality Exhibition: A User Study on Viewing Behavior and Content Preferences Proceedings Article
In: Adjunct Proceedings of the 31st ACM Conference on User Modeling, Adaptation and Personalization, pp. 379–387, 2023.
@inproceedings{javdani2023personalizing,
title = {Personalizing Cultural Heritage Access in a Virtual Reality Exhibition: A User Study on Viewing Behavior and Content Preferences},
author = {Javdani Rikhtehgar, Delaram and Wang, Shenghui and Huitema, Hester and Alvares, Julia and Schlobach, Stefan and Rieffe, Carolien and Heylen, Dirk},
url = {https://dl.acm.org/doi/pdf/10.1145/3563359.3596666},
year = {2023},
date = {2023-01-01},
booktitle = {Adjunct Proceedings of the 31st ACM Conference on User Modeling, Adaptation and Personalization},
pages = {379–387},
abstract = {Leveraging digital technologies, museums now have the opportunity to embrace innovative approaches such as knowledge graphs, virtual reality, and virtual assistants to enhance the preservation and interactive presentation of cultural information. However, despite these advancements, personalizing the museum experience remains a significant challenge. Thus, this paper aims to investigate the necessary elements for offering personalized access to cultural heritage within a VR exhibition. To accomplish this, a user study was conducted to identify user preferences for tailored content descriptions, track user viewing behavior to gauge their interest in a VR exhibition, and determine preferred methods of information gathering. The study involved 31 participants, and the findings are expected to provide valuable insights for designing effective and engaging VR exhibitions that cater to diverse visitor interests.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Valero-Leal E; Bielza C; Larrañaga P; Renooij S
Efficient search for relevance explanations using MAP-independence in Bayesian networks Journal Article
In: International Journal of Approximate Reasoning, vol. 160, pp. 108965, 2023, ISSN: 0888-613X.
@article{VALEROLEAL2023108965,
title = {Efficient search for relevance explanations using MAP-independence in Bayesian networks},
author = {Enrique Valero-Leal and Concha Bielza and Pedro Larrañaga and Silja Renooij},
url = {https://www.sciencedirect.com/science/article/pii/S0888613X23000968},
doi = {https://doi.org/10.1016/j.ijar.2023.108965},
issn = {0888-613X},
year = {2023},
date = {2023-01-01},
journal = {International Journal of Approximate Reasoning},
volume = {160},
pages = {108965},
abstract = {MAP-independence is a novel concept concerned with explaining the (ir)relevance of intermediate nodes for maximum a posteriori (MAP) computations in Bayesian networks. Building upon properties of MAP-independence, we introduce and experiment with methods for finding sets of relevant nodes using both an exhaustive and a heuristic approach. Our experiments show that these properties significantly speed up run time for both approaches. In addition, we link Image 1-independence to defeasible reasoning, a type of reasoning that analyses how new evidence may invalidate an already established conclusion. Ways to present users with an explanation using MAP-independence are also suggested.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lukowicz P; others
“Who’sa Good Robot?!” Designing Human-Robot Teaching Interactions Inspired by Dog Training Proceedings Article
In: HHAI 2023: Augmenting Human Intellect: Proceedings of the Second International Conference on Hybrid Human-Artificial Intelligence, pp. 310, IOS Press 2023.
@inproceedings{lukowicz2023sa,
title = {“Who’sa Good Robot?!” Designing Human-Robot Teaching Interactions Inspired by Dog Training},
author = {Lukowicz, P and others},
url = {https://doi.org/10.3233/faia230094},
year = {2023},
date = {2023-01-01},
booktitle = {HHAI 2023: Augmenting Human Intellect: Proceedings of the Second International Conference on Hybrid Human-Artificial Intelligence},
volume = {368},
pages = {310},
organization = {IOS Press},
abstract = {Recent work in Human-Robot Interaction (HRI) investigates the role of human users as teachers from which robots can flexibly learn new personalised skills through interaction. However, existing human-robot teaching methods remain largely unintuitive for the end user and require significant effort to adapt to the way the robot learns. This paper envisions the use of dog training methods as a starting point for HRI researchers to develop more intuitive interactions between human teachers and robot learners. We provide a design framework (called FETCH-R) aimed at guiding the conception of interactions between human teachers and robot learners inspired by dog training. This work paves the way towards the use of animal training as an inspiration to create human-robot teaching protocols that promote engagement, ease-of-use, and fosters human-robot relationships.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
K-CAP '23: Proceedings of the 12th Knowledge Capture Conference 2023 Proceedings
Association for Computing Machinery, <conf-loc>, <city>Pensacola</city>, <state>FL</state>, <country>USA</country>, </conf-loc>, 2023, ISBN: 9798400701412.
@proceedings{10.1145/3587259,
title = {K-CAP '23: Proceedings of the 12th Knowledge Capture Conference 2023},
url = {https://dl.acm.org/doi/proceedings/10.1145/3587259},
isbn = {9798400701412},
year = {2023},
date = {2023-01-01},
publisher = {Association for Computing Machinery},
address = {<conf-loc>, <city>Pensacola</city>, <state>FL</state>, <country>USA</country>, </conf-loc>},
abstract = {It is our great pleasure to welcome you to the 12th ACM International Conference on Knowledge Capture: K-CAP 2023, held in person on December 5th - 7th in Pensacola, Florida, US.Driven by the increasing demands for knowledge-based applications and the unprecedented availability of information from heterogeneous data sources, the study of knowledge capture is of crucial importance. Knowledge capture involves the extraction of useful knowledge from vast and diverse data sources as well as its acquisition directly from human experts.Nowadays knowledge is derived from an increasingly diverse set of data resources that differ with regard to their domain, format, quality, coverage, specificity, viewpoint, bias, and most importantly, consumers and producers of data. The heterogeneity, amount and complexity of data allow us to answer complex questions that could not be answered in isolation, requiring the interaction of different scientific fields and technologies. A goal of K-CAP is to develop such synergies using systematic and rigorous methodologies.The call for papers attracted 105 submissions from all over the world, covering a diverse range of topics spanning knowledge mining, large language models for information extraction, neuro-symbolic approaches for knowledge capture, knowledge engineering, question-answering, knowledge graphs, natural language processing, reasoning, entity linking, querying and knowledge-based applications. From a competitive set of high-quality submissions, we accepted 27 long research papers, 5 short papers, and 1 vision paper. The high-quality program is divided into 7 research sessions, in addition to 3 tutorials reflecting novel topics of interest in Knowledge Capture.We encourage everyone to attend the keynote talks that we have planned for K-CAP 2023. The highly anticipated talks by Dr. Robert R. Hoffman (Florida Institute for Human and Machine Cognition) and Dr. Jane Pinelis (Johns Hopkins University Applied Physics Laboratory) will guide us to a better understanding of the future of knowledge capture and explainable, resilient AI ecosystems, as they become commonplace in real world applications.},
keywords = {},
pubstate = {published},
tppubtype = {proceedings}
}
Visbeek S; Acar E; den Hengst F
Explainable Fraud Detection with Deep Symbolic Classification Proceedings Article
In: Dutta, Sanghamitra; Joseph, Andreas; Li, Jundong; Mishra, Saumitra; Toni, Francesca; Weller, Adrian (Ed.): Proceedings of the Workshop on Explainable AI in Finance, XAIFIN-23, New York, 2023.
@inproceedings{xaifin23,
title = {Explainable Fraud Detection with Deep Symbolic Classification},
author = {Visbeek, Samantha and Acar, Erman and den Hengst, Floris},
editor = {Dutta, Sanghamitra and Joseph, Andreas and Li, Jundong and Mishra, Saumitra and Toni, Francesca and Weller, Adrian},
url = {https://arxiv.org/pdf/2312.00586},
year = {2023},
date = {2023-01-01},
booktitle = {Proceedings of the Workshop on Explainable AI in Finance, XAIFIN-23},
address = {New York},
abstract = {There is a growing demand for explainable, transparent, and datadriven models within the domain of fraud detection. Decisions made by the fraud detection model need to be explainable in the event of a customer dispute. Additionally, the decision-making process in the model must be transparent to win the trust of regulators, analysts, and business stakeholders. At the same time, fraud detection solutions can benefit from data due to the noisy and dynamic nature of fraud detection and the availability of large historical data sets. Finally, fraud detection is notorious for its class imbalance: there are typically several orders of magnitude more legitimate transactions than fraudulent ones. In this paper, we present Deep Symbolic Classification (DSC), an extension of the Deep Symbolic Regression framework to classification problems. DSC casts classification as a search problem in the space of all analytic functions composed of a vocabulary of variables, constants, and operations and optimizes for an arbitrary evaluation metric directly. The search is guided by a deep neural network trained with reinforcement learning. Because the functions are mathematical expressions that are in closed-form and concise, the model is inherently explainable both at the level of a single classification decision and at the model's decision process level. Furthermore, the class imbalance problem is successfully addressed by optimizing for metrics that are robust to class imbalance such as the F1 score. This eliminates the need for problematic oversampling and undersampling techniques that plague traditional approaches. Finally, the model allows to explicitly balance between the prediction accuracy and the explainability. An evaluation on the PaySim data set demonstrates competitive predictive performance with state-of-the-art models, while surpassing them in terms of explainability. This establishes DSC as a promising model for fraud detection systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Troshin S; Niculae V
Wrapped β-Gaussians with compact support for exact probabilistic modeling on manifolds Journal Article
In: Transactions on Machine Learning Research, 2023, ISSN: 2835-8856.
@article{troshin2023wrapped,
title = {Wrapped β-Gaussians with compact support for exact probabilistic modeling on manifolds},
author = {Sergey Troshin and Vlad Niculae},
url = {https://openreview.net/forum?id=KrequDpWzt},
issn = {2835-8856},
year = {2023},
date = {2023-01-01},
journal = {Transactions on Machine Learning Research},
abstract = {We introduce wrapped β-Gaussians, a family of wrapped distributions on Riemannian manifolds, supporting efficient reparametrized sampling, as well as exact density estimation, effortlessly supporting high dimensions and anisotropic scale parameters. We extend Fenchel-Young losses for geometry-aware learning with wrapped β-Gaussians, and demonstrate the efficacy of our proposed family in a suite of experiments on hypersphere and rotation manifolds: data fitting, hierarchy encoding, generative modeling with variational autoencoders, and multilingual word embedding alignment.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Tiddi I; de Boer V; Schlobach S; Meyer-Vitali A
Knowledge Engineering for Hybrid Intelligence Proceedings Article
In: Proceedings of the 12th Knowledge Capture Conference 2023, pp. 75–82, Association for Computing Machinery, Pensacola, FL, USA,, 2023, ISBN: 9798400701412.
@inproceedings{10.1145/3587259.3627541,
title = {Knowledge Engineering for Hybrid Intelligence},
author = {Tiddi, Ilaria and de Boer, Victor and Schlobach, Stefan and Meyer-Vitali, André},
url = {https://doi.org/10.1145/3587259.3627541},
doi = {10.1145/3587259.3627541},
isbn = {9798400701412},
year = {2023},
date = {2023-01-01},
booktitle = {Proceedings of the 12th Knowledge Capture Conference 2023},
pages = {75–82},
publisher = {Association for Computing Machinery},
address = {Pensacola, FL, USA,},
series = {K-CAP '23},
abstract = {Hybrid Intelligence (HI) is a rapidly growing field aiming at creating collaborative systems where humans and intelligent machines cooperate in mixed teams towards shared goals. A clear characterization of the tasks and knowledge exchanged by the agents in HI applications is still missing, hampering both standardization and reuse when designing new HI systems. Knowledge Engineering (KE) methods have been used to solve such issue through the formalization of tasks and roles in knowledge-intensive processes. We investigate whether KE methods can be applied to HI scenarios, and specifically whether common, reusable elements such as knowledge roles, tasks and subtasks can be identified in contexts where symbolic, subsymbolic and human-in-the-loop components are involved. We first adapt the well-known CommonKADS methodology to HI, and then use it to analyze several HI projects and identify common tasks. The results are (i) a high-level ontology of HI knowledge roles, (ii) a set of novel, HI-specific tasks and (iii) an open repository to store scenarios1 – allowing reuse, validation and design of existing and new HI applications.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wolff J; de Boer V; Heylen D; van Riemsdijk B
Using Non-Monotonic Reasoning for Understandable and Flexible User-Models Proceedings Article
In: Sauerwald, Kai; Thimm, Matthias (Ed.): Proceedings of the 21st International Workshop on Non-Monotonic Reasoning co-located with the 20th International Conference on Principles of Knowledge Representation and Reasoning (KR 2023) and co-located with the 36th International Workshop on Description Logics (DL 2023), Rhodes, Greece, September 2-4, 2023, pp. 133-136, CEUR-WS.org, 2023.
@inproceedings{WolffBHR23,
title = {Using Non-Monotonic Reasoning for Understandable and Flexible User-Models},
author = {Johanna Wolff and Victor de Boer and Dirk Heylen and Birna van Riemsdijk},
editor = {Kai Sauerwald and Matthias Thimm},
url = {https://ceur-ws.org/Vol-3464/short5.pdf},
year = {2023},
date = {2023-01-01},
booktitle = {Proceedings of the 21st International Workshop on Non-Monotonic Reasoning co-located with the 20th International Conference on Principles of Knowledge Representation and Reasoning (KR 2023) and co-located with the 36th International Workshop on Description Logics (DL 2023), Rhodes, Greece, September 2-4, 2023},
volume = {3464},
pages = {133-136},
publisher = {CEUR-WS.org},
series = {CEUR Workshop Proceedings},
abstract = {Behavior support agents can assist humans in accomplishing a variety of goals by suggesting actions that promote the desired outcome while being in line with the user’s needs and preferences. In order to make these agents more effective, flexible and responsible, this research aims to create a framework which allows for more interaction between the agent and the user. By using techniques from non-monotonic reasoning, this work aims to model the knowledge base of the agent so that it aligns with the user’s mental model and is able to be modified by the user through new input. In order for the agent to be able to explain its output to the user, the reasoning process needs to be explicit and traceable, which this work intends to incorporate into a logical framework.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
van Woerkom W; Grossi D; Verheij B; Prakken H
Hierarchical Precedential Constraint Proceedings Article
In: Nineteenth International Conference on Artificial Intelligence and Law, pp. 333–342, 2023.
@inproceedings{vanwoerkom2023hierarchical,
title = {Hierarchical Precedential Constraint},
author = {van Woerkom, Wijnand and Grossi, Davide and Verheij, Bart and Prakken, Henry},
url = {https://dl.acm.org/doi/10.1145/3594536.3595154},
year = {2023},
date = {2023-01-01},
booktitle = {Nineteenth International Conference on Artificial Intelligence and Law},
pages = {333–342},
series = {ICAIL'23},
abstract = {In recent work, theories of case-based legal reasoning have been applied to the development of explainable artificial intelligence methods, through the analogy of training examples as previously decided cases. One such theory is that of precedential constraint. A downside of this theory with respect to this application is that it performs single-step reasoning, moving directly from the case base to an outcome. For this reason we propose a generalization of the theory of precedential constraint which allows multi-step reasoning, moving from the case base through a series of intermediate legal concepts before arriving at an outcome. Our generalization revolves around the notion of factor hierarchy, so we call this hierarchical precedential constraint. We present the theory, demonstrate its applicability to case-based legal reasoning, and perform a preliminary analysis of its theoretical properties.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liscio E; Lera-Leri R; Bistaffa F; Dobbe R I J; Jonker C M; Lopez-Sanchez M; Rodriguez-Aguilar J A; Murukannaiah P K
Value Inference in Sociotechnical Systems Proceedings Article
In: Proc. of the 22nd International Conference on Autonomous Agents and Multiagent Systems, pp. 1774-1780, IFAAMAS, London, United Kingdom, 2023.
@inproceedings{Liscio2023AAMAS,
title = {Value Inference in Sociotechnical Systems},
author = {Liscio, Enrico and Lera-Leri, Roger and Bistaffa, Filippo and Dobbe, Roel I.J. and Jonker, Catholijn M. and Lopez-Sanchez, Maite and Rodriguez-Aguilar, Juan A. and Murukannaiah, Pradeep K.},
url = {https://www.ifaamas.org/Proceedings/aamas2023/pdfs/p1774.pdf},
year = {2023},
date = {2023-01-01},
booktitle = {Proc. of the 22nd International Conference on Autonomous Agents and Multiagent Systems},
pages = {1774-1780},
publisher = {IFAAMAS},
address = {London, United Kingdom},
series = {AAMAS '23},
abstract = {As artificial agents become increasingly embedded in our society, we must ensure that their behavior aligns with human values. Value alignment entails value inference, the process of identifying values and reasoning about how humans prioritize values. We introduce a holistic framework that connects the technical (AI) components necessary for value inference. Subsequently, we discuss how hybrid intelligence—the synergy of human and artificial intelligence—is instrumental to the success of value inference. Finally, we illustrate how value inference both poses significant challenges and provides novel opportunities for multiagent systems research.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liscio E; Araque O; Gatti L; Constantinescu I; Jonker C M; Kalimeri K; Murukannaiah P K
What does a Text Classifier Learn about Morality? An Explainable Method for Cross-Domain Comparison of Moral Rhetoric Proceedings Article
In: Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics, pp. 14113-14132, ACL, Toronto, Canada, 2023.
@inproceedings{Liscio2023ACL,
title = {What does a Text Classifier Learn about Morality? An Explainable Method for Cross-Domain Comparison of Moral Rhetoric},
author = {Liscio, Enrico and Araque, Oscar and Gatti, Lorenzo and Constantinescu, Ionut and Jonker, Catholijn M. and Kalimeri, Kyriaki and Murukannaiah, Pradeep K.},
url = {https://aclanthology.org/2023.acl-long.789.pdf},
year = {2023},
date = {2023-01-01},
booktitle = {Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics},
pages = {14113-14132},
publisher = {ACL},
address = {Toronto, Canada},
series = {ACL '23},
abstract = {Moral rhetoric influences our judgement. Although social scientists recognize moral expression as domain specific, there are no systematic methods for analyzing whether a text classifier learns the domain-specific expression of moral language or not. We propose Tomea, a method to compare a supervised classifier’s representation of moral rhetoric across domains. Tomea enables quantitative and qualitative comparisons of moral rhetoric via an interpretable exploration of similarities and differences across moral concepts and domains. We apply Tomea on moral narratives in thirtyfive thousand tweets from seven domains. We extensively evaluate the method via a crowd study, a series of cross-domain moral classification comparisons, and a qualitative analysis of cross-domain moral expression.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Reimann M M; Oertel C; Kunneman F A; Hindriks K V
Predicting Interaction Quality Aspects Using Level-Based Scores for Conversational Agents Proceedings Article
In: Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents, Association for Computing Machinery, , Würzburg, Germany,, 2023, ISBN: 9781450399944.
@inproceedings{10.1145/3570945.3607332,
title = {Predicting Interaction Quality Aspects Using Level-Based Scores for Conversational Agents},
author = {Reimann, Merle M. and Oertel, Catharine and Kunneman, Florian A. and Hindriks, Koen V.},
url = {https://doi-org.vu-nl.idm.oclc.org/10.1145/3570945.3607332},
doi = {10.1145/3570945.3607332},
isbn = {9781450399944},
year = {2023},
date = {2023-01-01},
booktitle = {Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents},
publisher = {Association for Computing Machinery},
address = {, Würzburg, Germany,},
series = {IVA '23},
abstract = {In order to improve human-agent interaction, it is essential to have good measures of interaction quality. We define interaction quality based on multiple aspects, including usability, likability and perceived conversation quality as subjective measures, and interaction length, completion rate and frequency of unrecognized utterances as objective measures. Determining necessary improvements to a conversational agent is a non-trivial task, because it is difficult to infer from an evaluation of the agent as a whole, which aspects of the agent need to be improved to raise the interaction quality. In this paper, we propose a scoring system for task-oriented conversational agents to predict aspects of interaction quality and to guide an iterative improvement process. Our scoring system does not provide a single score, but leverages structural features of the dialogue management approach and assigns a score on three levels: the utterance, dialogue move, and genre level. Using the agent's scores on separate levels to predict the interaction quality allows making targeted improvements to the conversational agent. In order to evaluate our scoring system, we apply it over the course of multiple crowdsourcing pilot studies, using a recipe recommendation agent. We evaluate the obtained scores in regard to their ability to predict selected objective and subjective interaction quality aspects, as well as their suitability for making informed decisions about necessary improvements.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hou M; Hindriks K; Eiben A; Baraka K
A Process-Oriented Framework for Robot Imitation Learning in Human-Centered Interactive Tasks Proceedings Article
In: 2023 32nd IEEE International Conference on Robot and Human Interactive Communication (RO-MAN), pp. 1745–1752, IEEE 2023.
@inproceedings{hou2023process,
title = {A Process-Oriented Framework for Robot Imitation Learning in Human-Centered Interactive Tasks},
author = {Hou, Muhan and Hindriks, Koen and Eiben, AE and Baraka, Kim},
url = {https://ieeexplore.ieee.org/document/10309326/},
year = {2023},
date = {2023-01-01},
booktitle = {2023 32nd IEEE International Conference on Robot and Human Interactive Communication (RO-MAN)},
pages = {1745–1752},
organization = {IEEE},
abstract = {Human-centered interactive robot tasks (e.g., social greetings and cooperative dressing) are a type of task where humans are involved in task dynamics and performance evaluation. Such tasks require spatial and temporal coordination between agents in real-time, tackling physical limitations from constrained robot bodies, and connecting human user experience with concrete learning objectives to inform algorithm design. To solve these challenges, imitation learning has become a popular approach where by a robot learns to perform a task by imitating how human experts do it (i.e., expert policies). However, previous works tend to isolate the algorithm design from the design of the whole learning pipeline, neglecting its connection with other modules inside the process (like data collection and user-centered subjective evaluation) from the view as a system. Going beyond traditional imitation learning, this work reexamines robot imitation learning in human-centered interactive tasks from the perspective of the whole learning pipeline, ranging from data collection to subjective evaluation. We present a process-oriented framework that consists of a guideline to collect diverse yet representative demonstrations and an interpreter to explain subjective user-centered performance with objective robot-related parameters. We illustrate the steps covered by the framework in a fist-bump greeting task as demonstrative deployment. Results show that our framework is able to identify representative human-centered features to instruct demonstration collection and validate influential robot-centered factors to interpret the gap in subjective performance between the expert policy and the imitator policy.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Paci P; Tiddi I; Preciado D; Baraka K
“Who’s a Good Robot?!” Designing Human-Robot Teaching Interactions Inspired by Dog Training Book Section
In: HHAI 2023: Augmenting Human Intellect, pp. 310–319, IOS Press, 2023.
@incollection{paci2023sa,
title = {“Who’s a Good Robot?!” Designing Human-Robot Teaching Interactions Inspired by Dog Training},
author = {Paci, Patrizia and Tiddi, Ilaria and Preciado, Daniel and Baraka, Kim},
url = {https://ebooks.iospress.nl/pdf/doi/10.3233/FAIA230094},
year = {2023},
date = {2023-01-01},
booktitle = {HHAI 2023: Augmenting Human Intellect},
pages = {310–319},
publisher = {IOS Press},
abstract = {Recent work in Human-Robot Interaction (HRI) investigates the role of human users as teachers from which robots can flexibly learn new personalised skills through interaction. However, existing human-robot teaching methods remain largely unintuitive for the end user and require significant effort to adapt to the way the robot learns. This paper envisions the use of dog training methods as a starting point for HRI researchers to develop more intuitive interactions between human teachers and robot learners. We provide a design framework (called FETCHR) aimed at guiding the conception of interactions between human teachers and robot learners inspired by dog training. This work paves the way towards the use of animal training as an inspiration to create human-robot teaching protocols that promote engagement, ease-of-use, and fosters human-robot relationships.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Hou M; Hindriks K; Eiben A; Baraka K
Shaping Imbalance into Balance: Active Robot Guidance of Human Teachers for Better Learning from Demonstrations Proceedings Article
In: 2023 32nd IEEE International Conference on Robot and Human Interactive Communication (RO-MAN), pp. 1737–1744, IEEE 2023.
@inproceedings{hou2023shaping,
title = {Shaping Imbalance into Balance: Active Robot Guidance of Human Teachers for Better Learning from Demonstrations},
author = {Hou, Muhan and Hindriks, Koen and Eiben, AE and Baraka, Kim},
url = {https://ieeexplore.ieee.org/document/10309481},
year = {2023},
date = {2023-01-01},
booktitle = {2023 32nd IEEE International Conference on Robot and Human Interactive Communication (RO-MAN)},
pages = {1737–1744},
organization = {IEEE},
abstract = {Learning from Demonstrations (LfD) transfers skills from human teachers to robots. However, data imbalance in demonstrations can bias policies towards majority situations. Previous work attempted to solve this problem after data collection, but few efforts were made to maintain a balanced distribution from the phase of data acquisition. Our method accounts for the influence of robots on human teachers and enables robots to actively guide interaction to approximate demonstration distributions to target distributions. Simulated and real-world experiments validated the method’s efficacy in shaping demonstration distribution into various target distributions and robustness to various levels of uncertainties. Also, our method significantly improved the generalization ability of robot learning when LfD policies were trained with data collected by our method compared to natural data collection.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rakova B; Dobbe R
Algorithms as Social-Ecological-Technological Systems: an Environmental Justice Lens on Algorithmic Audits Proceedings Article
In: 2023 ACM Conference on Fairness, Accountability, and Transparency, Association for Computing Machinery, Chicago, IL, USA, 2023.
@inproceedings{rakova_algorithms_2023,
title = {Algorithms as Social-Ecological-Technological Systems: an Environmental Justice Lens on Algorithmic Audits},
author = {Rakova, Bogdana and Dobbe, Roel},
url = {https://doi.org/10.1145/3593013.3594014},
doi = {10.1145/3593013.3594014},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-05},
booktitle = {2023 ACM Conference on Fairness, Accountability, and Transparency},
publisher = {Association for Computing Machinery},
address = {Chicago, IL, USA},
series = {FAccT '23},
abstract = {This article formulates seven lessons for preventing harm in artificial intelligence (AI) systems based on insights from the field of system safety for software-based automation in safety-critical domains. New applications of AI across societal domains and public organizations and infrastructures come with new hazards, which lead to new forms of harm, both grave and pernicious. The text addresses the lack of consensus for diagnosing and eliminating new AI system hazards. For decades, the field of system safety has dealt with accidents and harm in safety-critical systems governed by varying degrees of software-based automation and decision-making. This field embraces the core assumption of systems and control that AI systems cannot be safeguarded by technical design choices on the model or algorithm alone, instead requiring an end-to-end hazard analysis and design frame that includes the context of use, impacted stakeholders and the formal and informal institutional environment in which the system operates. Safety and other values are then inherently socio-technical and emergent system properties that require design and control measures to instantiate these across the technical, social and institutional components of a system. This article honors system safety pioneer Nancy Leveson, by situating her core lessons for today's AI system safety challenges [2]. For every lesson, concrete tools are offered for rethinking and reorganizing the safety management of AI systems, both in design and governance. This history tells us that effective AI safety management requires transdisciplinary approaches and a shared language that allows involvement of all levels of society. The article is a non-archival contribution to FAccT 2022, and will be published as a chapter to The Oxford Handbook of AI Governance [1]. The full article is available as a pre-print on ArXiv via https://arxiv.org/abs/2202.09292.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ligthart M E U; de Droog S M; Bossema M; Elloumi L; Hoogland K; Smakman M; Hindriks K V; Ben Allouch S
Design Specifications for a Social Robot Math Tutor Proceedings Article
In: Proceedings of the 2023 ACM/IEEE International Conference on Human-Robot Interaction, 2023.
@inproceedings{ligthart_design_2023,
title = {Design Specifications for a Social Robot Math Tutor},
author = {Ligthart, Mike E.U. and de Droog, Simone M. and Bossema, Marianne and Elloumi, Lamia and Hoogland, Kees and Smakman, Matthijs and Hindriks, Koen V. and Ben Allouch, Somaya},
url = {http://doi.org/10.1145/3568162.3576957},
doi = {10.1145/3568162.3576957},
year = {2023},
date = {2023-01-01},
booktitle = {Proceedings of the 2023 ACM/IEEE International Conference on Human-Robot Interaction},
abstract = {To benefit from the social capabilities of a robot math tutor, instead of being distracted by them, a novel approach is needed where the math task and the robot's social behaviors are better intertwined. We present concrete design specifications of how children can practice math via a personal conversation with a social robot and how the robot can scaffold instructions. We evaluated the designs with a three-session experimental user study (n = 130, 8-11 y.o.). Participants got better at math over time when the robot scaffolded instructions. Furthermore, the robot felt more as a friend when it personalized the conversation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Timmerman E; Ligthart M E U
Let’s Roll Together: Children Helping a Robot Play a Dice Game Proceedings Article
In: Companion of the 2023 ACM/IEEE International Conference on Human-Robot Interaction, 2023.
@inproceedings{timmerman_lets_2023,
title = {Let’s Roll Together: Children Helping a Robot Play a Dice Game},
author = {Timmerman, Emily and Ligthart, Mike E.U.},
url = {http://doi.org/10.1145/3568294.3580130},
doi = {10.1145/3568294.3580130},
year = {2023},
date = {2023-01-01},
booktitle = {Companion of the 2023 ACM/IEEE International Conference on Human-Robot Interaction},
abstract = {Play is an important part of children’s lives and playing with social robots could provide powerful interventions, for example in education. However, child-robot play is often restricted by the technical limitations of the robot. Tools like Bluetooth-connected dice could circumvent some of these limitations, but technical limitations can also be resolved in a social way. In this paper, we explore children playing a dice game with a Nao robot. The Nao robot cannot pick up the dice. We compared two modes of helping: rolling for the robot and handing the dice to the robot. The results show that children prefer handing the dice to the robot. They feel the robot is more involved when it physically participates. Children who feel the robot is more involved, enjoy the game more. Finally, we found evidence that helping the robot might even be preferred over the robot not needing any help.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Stower R; Ligthart M E U; Spitale M; Calvo-Barajas N; de Droog S M
CRITTER: Child-Robot Interaction and Interdisciplinary Research Proceedings Article
In: Companion of the 2023 ACM/IEEE International Conference on Human-Robot Interaction, 2023.
@inproceedings{stower_critter_2023,
title = {CRITTER: Child-Robot Interaction and Interdisciplinary Research},
author = {Stower, Rebecca and Ligthart, Mike E.U. and Spitale, Micol and Calvo-Barajas, Natalia and de Droog, Simone M.},
url = {http://doi.org/10.1145/3568294.3579955},
doi = {10.1145/3568294.3579955},
year = {2023},
date = {2023-01-01},
booktitle = {Companion of the 2023 ACM/IEEE International Conference on Human-Robot Interaction},
abstract = {Several recent works in human-robot-interaction (HRI) have begun to highlight the importance of the replication crisis and open science practices for our field. Yet, suggestions and recommendations tailored to child-robot-interaction (CRI) research, which poses its own additional set of challenges, remain limited. There is also an increased need within both HRI and CRI for inter and cross-disciplinary collaborations, where input from multiple different domains can contribute to better research outcomes. Consequently, this workshop aims to facilitate discussions between researchers from diverse disciplines within CRI. The workshop will open with a panel discussion between CRI researchers from different disciplines, followed by 3-minute flash talks of the accepted submissions. The second half of the workshop will consist of breakout group discussions, where both senior and junior academics from different disciplines can share their experiences of conducting CRI research. Through this workshop, we hope to create a common ground for addressing shared challenges in CRI, as well as identify a set of possible solutions going forward.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Karine Miras A E E
Hu-bot: promoting the cooperation between humans and mobile robots Journal Article
In: Neural Computing and Applications, 2023.
@article{miras23hubot,
title = {Hu-bot: promoting the cooperation between humans and mobile robots},
author = {Karine Miras, Decebal Mocanu, A. E. Eiben},
url = {https://link.springer.com/article/10.1007/s00521-022-08061-z},
year = {2023},
date = {2023-01-01},
journal = {Neural Computing and Applications},
abstract = {This paper investigates human–robot collaboration in a novel setup: a human helps a mobile robot that can move and navigate freely in an environment. Specifically, the human helps by remotely taking over control during the learning of a task. The task is to find and collect several items in a walled arena, and Reinforcement Learning is used to seek a suitable controller. If the human observes undesired robot behavior, they can directly issue commands for the wheels through a game joystick. Experiments in a simulator showed that human assistance improved robot behavior efficacy by 30% and efficiency by 12%. The best policies were also tested in real life, using physical robots. Hardware experiments showed no significant difference concerning the simulations, providing empirical validation of our approach in practice.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Dudzik B; Broekens J
A Valid Self-Report is Never Late, Nor is it Early: On Considering the "Right" Temporal Distance for Assessing Emotional Experience Journal Article
In: 2023.
@article{Dudzik2023,
title = {A Valid Self-Report is Never Late, Nor is it Early: On Considering the "Right" Temporal Distance for Assessing Emotional Experience},
author = {Bernd Dudzik and Joost Broekens},
url = {http://arxiv.org/abs/2302.02821},
year = {2023},
date = {2023-01-01},
abstract = {Developing computational models for automatic affect prediction requires valid self-reports about individuals' emotional interpretations of stimuli. In this article, we highlight the important influence of the temporal distance between a stimulus event and the moment when its experience is reported on the provided information's validity. This influence stems from the time-dependent and time-demanding nature of the involved cognitive processes. As such, reports can be collected too late: forgetting is a widely acknowledged challenge for accurate descriptions of past experience. For this reason, methods striving for assessment as early as possible have become increasingly popular. However, here we argue that collection may also occur too early: descriptions about very recent stimuli might be collected before emotional processing has fully converged. Based on these notions, we champion the existence of a temporal distance for each type of stimulus that maximizes the validity of self-reports – a "right" time. Consequently, we recommend future research to (1) consciously consider the potential influence of temporal distance on affective self-reports when planning data collection, (2) document the temporal distance of affective self-reports wherever possible as part of corpora for computational modelling, and finally (3) and explore the effect of temporal distance on self-reports across different types of stimuli.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Grossi D; van der Hoek W; Kuijer L B
Reasoning about General Preference Relations Journal Article
In: Artif. Intell., vol. 313, no. C, 2022, ISSN: 0004-3702.
@article{grossi22reasoning,
title = {Reasoning about General Preference Relations},
author = {Grossi, Davide and van der Hoek, Wiebe and Kuijer, Louwe B.},
url = {https://pure.rug.nl/ws/portalfiles/portal/576044639/93403444_8620970_maximality_2021.pdf},
doi = {10.1016/j.artint.2022.103793},
issn = {0004-3702},
year = {2022},
date = {2022-11-01},
journal = {Artif. Intell.},
volume = {313},
number = {C},
publisher = {Elsevier Science Publishers Ltd.},
address = {GBR},
abstract = {Preference relations are at the heart of many fundamental concepts in artificial intelligence, ranging from utility comparisons, to defeat among strategies and relative plausibility among states, just to mention a few. Reasoning about such relations has been the object of extensive research and a wealth of formalisms exist to express and reason about them. One such formalism is conditional logic, which focuses on reasoning about the ``best'' alternatives according to a given preference relation. A ``best'' alternative is normally interpreted as an alternative that is either maximal (no other alternative is preferred to it) or optimal (it is at least as preferred as all other alternatives). And the preference relation is normally assumed to satisfy strong requirements (typically transitivity and some kind of well-foundedness assumption). Here, we generalize this existing literature in two ways. Firstly, in addition to maximality and optimality, we consider two other interpretations of ``best'', which we call unmatchedness and acceptability. Secondly, we do not inherently require the preference relation to satisfy any constraints. Instead, we allow the relation to satisfy any combination of transitivity, totality and anti-symmetry. This allows us to model a wide range of situations, including cases where the lack of constraints stems from a modeled agent being irrational (for example, an agent might have preferences that are neither transitive nor total nor anti-symmetric) or from the interaction of perfectly rational agents (for example, a defeat relation among strategies in a game might be anti-symmetric but not total or transitive). For each interpretation of ``best'' (maximal, optimal, unmatched or acceptable) and each combination of constraints (transitivity, totality and/or anti-symmetry), we study the sets of valid inferences. Specifically, in all but one case we introduce a sound and strongly complete axiomatization, and in the one remaining case we show that no such axiomatization exists.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Renooij S
Relevance for Robust Bayesian Network MAP-Explanations Proceedings Article
In: Salmerán, Antonio; Rumí, Rafael (Ed.): Proceedings of The 11th International Conference on Probabilistic Graphical Models, pp. 13–24, PMLR, 2022.
@inproceedings{pmlr-v186-renooij22a,
title = {Relevance for Robust Bayesian Network MAP-Explanations},
author = {Renooij, Silja},
editor = {Salmerán, Antonio and Rumí, Rafael},
url = {https://proceedings.mlr.press/v186/renooij22a.html},
year = {2022},
date = {2022-10-01},
booktitle = {Proceedings of The 11th International Conference on Probabilistic Graphical Models},
volume = {186},
pages = {13–24},
publisher = {PMLR},
series = {Proceedings of Machine Learning Research},
abstract = {In the context of explainable AI, the concept of MAP-independence was recently introduced as a means for conveying the (ir)relevance of intermediate nodes for MAP computations in Bayesian networks. In this paper, we further study the concept of MAP-independence, discuss methods for finding sets of relevant nodes, and suggest ways to use these in providing users with an explanation concerning the robustness of the MAP result.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Baez Santamaria S; Vossen P; Baier T
Evaluating Agent Interactions Through Episodic Knowledge Graphs Proceedings Article
In: Proceedings of the 1st Workshop on Customized Chat Grounding Persona and Knowledge, pp. 15–28, Association for Computational Linguistics, Gyeongju, Republic of Korea, 2022.
@inproceedings{baez-santamaria-etal-2022-evaluating,
title = {Evaluating Agent Interactions Through Episodic Knowledge Graphs},
author = {Baez Santamaria, Selene and Vossen, Piek and Baier, Thomas},
url = {https://aclanthology.org/2022.ccgpk-1.3},
year = {2022},
date = {2022-10-01},
booktitle = {Proceedings of the 1st Workshop on Customized Chat Grounding Persona and Knowledge},
pages = {15–28},
publisher = {Association for Computational Linguistics},
address = {Gyeongju, Republic of Korea},
abstract = {We present a new method based on episodic Knowledge Graphs (eKGs) for evaluating (multimodal) conversational agents in open domains. This graph is generated by interpreting raw signals during conversation and is able to capture the accumulation of knowledge over time. We apply structural and semantic analysis of the resulting graphs and translate the properties into qualitative measures. We compare these measures with existing automatic and manual evaluation metrics commonly used for conversational agents. Our results show that our Knowledge-Graph-based evaluation provides more qualitative insights into interaction and the agent's behavior.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ho L; de Boer V; van Riemsdijk M B; Schlobach S; Tielman M
Knowledge Representation Formalisms for Hybrid Intelligence Proceedings Article
In: 1st International Workshop on Argumentation for eXplainable AI (ArgXAI) co-located with 9th International Conference on Computational Models of Argument (COMMA 2022), online, 2022.
@inproceedings{ArgXAI2022,
title = {Knowledge Representation Formalisms for Hybrid Intelligence},
author = {Ho, Loan and de Boer, Victor and van Riemsdijk, M. Birna and Schlobach, Stefan and Tielman, Myrthe},
url = {http://ceur-ws.org/Vol-3209/7787.pdf},
year = {2022},
date = {2022-09-01},
volume = {Vol-3209},
publisher = {1st International Workshop on Argumentation for eXplainable AI (ArgXAI) co-located with 9th International Conference on Computational Models of Argument (COMMA 2022)},
address = {online},
abstract = {Hybrid Intelligence (HI) is the combination of human and machine intelligence, expanding human intellect instead of replacing it. Information in HI scenarios is often inconsistent, e.g. due to shifting preferences, user's motivation or conflicts arising from merged data. As it provides an intuitive mechanism for reasoning with conflicting information, with natural explanations that are understandable to humans, our hypothesis is that Dung's Abstract Argumentation (AA) is a suitable formalism for such hybrid scenarios. This paper investigates the capabilities of Argumentation in representing and reasoning in the presence of inconsistency, and its potential for intuitive explainability to link between artificial and human actors. To this end, we conduct a survey among a number of research projects of the Hybrid Intelligence Centre1 . Within these projects we analyse the applicability of argumentation with respect to various inconsistency types stemming, for instance, from commonsense reasoning, decision making, and negotiation. The results show that 14 out of the 21 projects have to deal with inconsistent information. In half of those scenarios, the knowledge models come with natural preference relations over the information. We show that Argumentation is a suitable framework to model the specific knowledge in 10 out of 14 projects, thus indicating the potential of Abstract Argumentation for transparently dealing with inconsistencies in Hybrid Intelligence systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Khurana U; Vermeulen I; Nalisnick E; Van Noorloos M; Fokkens A
Hate Speech Criteria: A Modular Approach to Task-Specific Hate Speech Definitions Proceedings Article
In: Proceedings of the Sixth Workshop on Online Abuse and Harms (WOAH), pp. 176–191, Association for Computational Linguistics, Seattle, Washington (Hybrid), 2022.
@inproceedings{khurana-etal-2022-hate,
title = {Hate Speech Criteria: A Modular Approach to Task-Specific Hate Speech Definitions},
author = {Khurana, Urja and Vermeulen, Ivar and Nalisnick, Eric and Van Noorloos, Marloes and Fokkens, Antske},
url = {https://aclanthology.org/2022.woah-1.17},
doi = {10.18653/v1/2022.woah-1.17},
year = {2022},
date = {2022-07-01},
booktitle = {Proceedings of the Sixth Workshop on Online Abuse and Harms (WOAH)},
pages = {176–191},
publisher = {Association for Computational Linguistics},
address = {Seattle, Washington (Hybrid)},
abstract = {The subjectivity of automatic hate speech detection makes it a complex task, reflected in different and incomplete definitions in NLP. We present hate speech criteria, developed with insights from a law and social science expert, that help researchers create more explicit definitions and annotation guidelines on five aspects: (1) target groups and (2) dominance, (3) perpetrator characteristics, (4) explicit presence of negative interactions, and the (5) type of consequences/effects. Definitions can be structured so that they cover a more broad or more narrow phenomenon and conscious choices can be made on specifying criteria or leaving them open. We argue that the goal and exact task developers have in mind should determine how the scope of hate speech is defined. We provide an overview of the properties of datasets from hatespeechdata.com that may help select the most suitable dataset for a specific scenario.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Atigh M G; Schoep J; Acar E; van Noord N; Mettes P
Hyperbolic Image Segmentation Proceedings Article
In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 4453–4462, 2022.
@inproceedings{Atigh_2022_CVPR,
title = {Hyperbolic Image Segmentation},
author = {Atigh, Mina Ghadimi and Schoep, Julian and Acar, Erman and van Noord, Nanne and Mettes, Pascal},
url = {https://openaccess.thecvf.com/content/CVPR2022/papers/Atigh_Hyperbolic_Image_Segmentation_CVPR_2022_paper.pdf},
year = {2022},
date = {2022-06-01},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
pages = {4453–4462},
abstract = {For image segmentation, the current standard is to perform pixel-level optimization and inference in Euclidean output embedding spaces through linear hyperplanes. In this work, we show that hyperbolic manifolds provide a valuable alternative for image segmentation and propose a tractable formulation of hierarchical pixel-level classification in hyperbolic space. Hyperbolic Image Segmentation opens up new possibilities and practical benefits for segmentation, such as uncertainty estimation and boundary information for free, zero-label generalization, and increased performance in low-dimensional output embeddings.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Sarvi F; Heuss M; Aliannejadi M; Schelter S; de Rijke M
Understanding and Mitigating the Effect of Outliers in Fair Ranking Proceedings Article
In: WSDM 2022: The Fifteenth International Conference on Web Search and Data Mining, ACM, 2022.
@inproceedings{sarvi-2022-understanding,
title = {Understanding and Mitigating the Effect of Outliers in Fair Ranking},
author = {Sarvi, Fatemeh and Heuss, Maria and Aliannejadi, Mohammad and Schelter, Sebastian and de Rijke, Maarten},
url = {https://arxiv.org/abs/2112.11251},
year = {2022},
date = {2022-02-01},
booktitle = {WSDM 2022: The Fifteenth International Conference on Web Search and Data Mining},
publisher = {ACM},
abstract = {Traditional ranking systems are expected to sort items in the order of their relevance and thereby maximize their utility. In fair ranking, utility is complemented with fairness as an optimization goal. Recent work on fair ranking focuses on developing algorithms to optimize for fairness, given position-based exposure. In contrast, we identify the potential of outliers in a ranking to influence exposure and thereby negatively impact fairness. An outlier in a list of items can alter the examination probabilities, which can lead to different distributions of attention, compared to position-based exposure. We formalize outlierness in a ranking, show that outliers are present in realistic datasets, and present the results of an eye-tracking study, showing that users scanning order and the exposure of items are influenced by the presence of outliers. We then introduce OMIT, a method for fair ranking in the presence of outliers. Given an outlier detection method, OMIT improves fair allocation of exposure by suppressing outliers in the top-k ranking. Using an academic search dataset, we show that outlierness optimization leads to a fairer policy that displays fewer outliers in the top-k, while maintaining a reasonable trade-off between fairness and utility.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ulusoy O; Yolum P
PANOLA: A Personal Assistant for Supporting Users in Preserving Privacy Journal Article
In: ACM Transactions on Internet Technology, vol. 22, no. 1, 2022.
@article{panola-2022,
title = {PANOLA: A Personal Assistant for Supporting Users in Preserving Privacy},
author = {Ulusoy, Onuralp and Yolum, Pinar},
url = {https://webspace.science.uu.nl/~yolum001/papers/panola-2022.pdf},
year = {2022},
date = {2022-02-01},
journal = {ACM Transactions on Internet Technology},
volume = {22},
number = {1},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {Privacy is the right of individuals to keep personal information to themselves. When individuals use online systems, they should be given the right to decide what information they would like to share and what to keep private. When a piece of information pertains only to a single individual, preserving privacy is possible by providing the right access options to the user. However, when a piece of information pertains to multiple individuals, such as a picture of a group of friends or a collaboratively edited document, deciding how to share this information and with whom is challenging. The problem becomes more difficult when the individuals who are affected by the information have different, possibly conflicting privacy constraints. Resolving this problem requires a mechanism that takes into account the relevant individuals’ concerns to decide on the privacy configuration of information. Because these decisions need to be made frequently (i.e., per each piece of shared content), the mechanism should be automated. This article presents a personal assistant to help end-users with managing the privacy of their content. When some content that belongs to multiple users is about to be shared, the personal assistants of the users employ an auction-based privacy mechanism to regulate the privacy of the content. To do so, each personal assistant learns the preferences of its user over time and produces bids accordingly. Our proposed personal assistant is capable of assisting users with different personas and thus ensures that people benefit from it as they need it. Our evaluations over multiagent simulations with online social network content show that our proposed personal assistant enables privacy-respecting content sharing.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Los M; Christoff Z; Grossi D
Proportional Budget Allocations: Towards a Systematization Proceedings Article
In: Raedt, Lud De (Ed.): Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence, IJCAI-22, pp. 398–404, International Joint Conferences on Artificial Intelligence Organization, 2022, (Main Track).
@inproceedings{los22proportional,
title = {Proportional Budget Allocations: Towards a Systematization},
author = {Los, Maaike and Christoff, Zoé and Grossi, Davide},
editor = {Lud De Raedt},
url = {https://doi.org/10.24963/ijcai.2022/57},
doi = {10.24963/ijcai.2022/57},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence, IJCAI-22},
pages = {398–404},
publisher = {International Joint Conferences on Artificial Intelligence Organization},
abstract = {We contribute to the programme of lifting proportionality axioms from the multi-winner voting setting to participatory budgeting. We define novel proportionality axioms for participatory budgeting and test them on known proportionality-driven rules such as Phragmén and Rule X. We investigate logical implications among old and new axioms and provide a systematic overview of proportionality criteria in participatory budgeting.},
note = {Main Track},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhang Y; Grossi D
Tracking Truth by Weighting Proxies in Liquid Democracy Proceedings Article
In: Faliszewski, Piotr; Mascardi, Viviana; Pelachaud, Catherine; Taylor, Matthew E. (Ed.): 21st International Conference on Autonomous Agents and Multiagent Systems, AAMAS 2022, Auckland, New Zealand, May 9-13, 2022, pp. 1482–1490, International Foundation for Autonomous Agents and Multiagent Systems (IFAAMAS), 2022.
@inproceedings{zhang22tracking,
title = {Tracking Truth by Weighting Proxies in Liquid Democracy},
author = {Yuzhe Zhang and Davide Grossi},
editor = {Piotr Faliszewski and Viviana Mascardi and Catherine Pelachaud and Matthew E. Taylor},
url = {https://www.ifaamas.org/Proceedings/aamas2022/pdfs/p1482.pdf},
doi = {10.5555/3535850.3536015},
year = {2022},
date = {2022-01-01},
booktitle = {21st International Conference on Autonomous Agents and Multiagent Systems, AAMAS 2022, Auckland, New Zealand, May 9-13, 2022},
pages = {1482–1490},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems (IFAAMAS)},
abstract = {We study wisdom-of-the-crowd effects in liquid democracy on net- works where agents are allowed to apportion parts of their voting weight to different proxies. We show that in this setting—unlike in the standard one where voting weight is delegated in full to only one proxy—it becomes possible to construct delegation struc- tures that optimize the truth-tracking ability of the group. Focusing on group accuracy we contrast this centralized solution with the setting in which agents are free to choose their weighted delega- tions by greedily trying to maximize their own individual accuracy. While equilibria with weighted delegations may be as bad as with standard delegations, they are never worse and may sometimes be better. To gain further insights into this model we experimentally study quantal response delegation strategies on random networks. We observe that weighted delegations can lead, under specific con- ditions, to higher group accuracy than simple majority voting},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}