Ligthart M; Neerincx M; Hindriks K
Memory-Based Personalization for Fostering a Long-Term Child-Robot Relationship Proceedings Article
In: Sakamoto, Daisuke; Weiss, Astrid; Hiatt, Laura M.; Shiomi, Masahiro (Ed.): ACM/IEEE International Conference on Human-Robot Interaction, HRI 2022, Sapporo, Hokkaido, Japan, March 7 - 10, 2022, pp. 80–89, IEEE / ACM, 2022.
@inproceedings{DBLP:conf/hri/LigthartNH22,
title = {Memory-Based Personalization for Fostering a Long-Term Child-Robot Relationship},
author = {Mike Ligthart and Mark Neerincx and Koen Hindriks},
editor = {Daisuke Sakamoto and Astrid Weiss and Laura M. Hiatt and Masahiro Shiomi},
url = {https://repository.tudelft.nl/islandora/object/uuid%3A99db22f6-19cb-4d21-85c2-bc1250a9fa01},
doi = {10.1109/HRI53351.2022.9889446},
year = {2022},
date = {2022-01-01},
booktitle = {ACM/IEEE International Conference on Human-Robot Interaction, HRI 2022, Sapporo, Hokkaido, Japan, March 7 - 10, 2022},
pages = {80–89},
publisher = {IEEE / ACM},
abstract = {After the novelty effect wears off children need a new motivator to keep interacting with a social robot. Enabling children to build a relationship with the robot is the key for facilitating a sustainable long-term interaction. We designed a memory-based personalization strategy that safeguards the continuity between sessions and tailors the interaction to the child's needs and interests to foster the child-robot relationship. A longitudinal (five sessions in two months) user study (N = 46, 8-10 y.o) showed that the strategy kept children interested longer in the robot, fosters more closeness, elicits more positive social cues, and adds continuity between sessions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Elloumi L; Bossema M; de Droog S M; Smakman M; van Ginkel S; Ligthart M E U; Hoogland K; Hindriks K V; Ben Allouch S
Exploring Requirements and Opportunities for Social Robots in Primary Mathematics Education Proceedings Article
In: 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN), IEEE Press, 2022.
@inproceedings{elloumi_exploring_2022,
title = {Exploring Requirements and Opportunities for Social Robots in Primary Mathematics Education},
author = {Elloumi, Lamia and Bossema, Marianne and de Droog, Simone M. and Smakman, Matthijs and van Ginkel, Stan and Ligthart, Mike E.U. and Hoogland, Kees and Hindriks, Koen V. and Ben Allouch, Somaya},
url = {https://research.vu.nl/en/publications/exploring-requirements-and-opportunities-for-social-robots-in-pri},
doi = {10.1109/RO-MAN53752.2022.9900569},
year = {2022},
date = {2022-01-01},
booktitle = {31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN)},
publisher = {IEEE Press},
abstract = {Social robots have been introduced in different fields such as retail, health care and education. Primary education in the Netherlands (and elsewhere) recently faced new challenges because of the COVID-19 pandemic, lockdowns and quarantines including students falling behind and teachers burdened with high workloads. Together with two Dutch municipalities and nine primary schools we are exploring the long-term use of social robots to study how social robots might support teachers in primary education, with a focus on mathematics education. This paper presents an explorative study to define requirements for a social robot math tutor. Multiple focus groups were held with the two main stakeholders, namely teachers and students. During the focus groups the aim was 1) to understand the current situation of mathematics education in the upper primary school level, 2) to identify the problems that teachers and students encounter in mathematics education, and 3) to identify opportunities for deploying a social robot math tutor in primary education from the perspective of both the teachers and students. The results inform the development of social robots and opportunities for pedagogical methods used in math teaching, child-robot interaction and potential support for teachers in the classroom.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Raman C; Quiros J V; Tan S; Islam A; Gedik E; Hung H
ConfLab: A Data Collection Concept, Dataset, and Benchmark for Machine Analysis of Free-Standing Social Interactions in the Wild Proceedings Article
In: Thirty-sixth Conference on Neural Information (NeurIPS) Processing Systems Datasets and Benchmarks Track, 2022.
@inproceedings{raman2022conflab,
title = {ConfLab: A Data Collection Concept, Dataset, and Benchmark for Machine Analysis of Free-Standing Social Interactions in the Wild},
author = {Chirag Raman and Jose Vargas Quiros and Stephanie Tan and Ashraful Islam and Ekin Gedik and Hayley Hung},
url = {https://openreview.net/forum?id=CNJQKM5cV2o},
year = {2022},
date = {2022-01-01},
booktitle = {Thirty-sixth Conference on Neural Information (NeurIPS) Processing Systems Datasets and Benchmarks Track},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tsfasman M; Fenech K; Tarvirdians M; Lorincz A; Jonker C; Oertel C
Towards Creating a Conversational Memory for Long-Term Meeting Support: Predicting Memorable Moments in Multi-Party Conversations through Eye-Gaze Proceedings Article
In: Proceedings of the 2022 International Conference on Multimodal Interaction, pp. 94–104, Association for Computing Machinery, Bengaluru, India, 2022, ISBN: 9781450393904.
@inproceedings{10.1145/3536221.3556613,
title = {Towards Creating a Conversational Memory for Long-Term Meeting Support: Predicting Memorable Moments in Multi-Party Conversations through Eye-Gaze},
author = {Tsfasman, Maria and Fenech, Kristian and Tarvirdians, Morita and Lorincz, Andras and Jonker, Catholijn and Oertel, Catharine},
url = {https://doi.org/10.1145/3536221.3556613},
doi = {10.1145/3536221.3556613},
isbn = {9781450393904},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the 2022 International Conference on Multimodal Interaction},
pages = {94–104},
publisher = {Association for Computing Machinery},
address = {Bengaluru, India},
series = {ICMI '22},
abstract = {When working in a group, it is essential to understand each other’s viewpoints to increase group cohesion and meeting productivity. This can be challenging in teams: participants might be left misunderstood and the discussion could be going around in circles. To tackle this problem, previous research on group interactions has addressed topics such as dominance detection, group engagement, and group creativity. Conversational memory, however, remains a widely unexplored area in the field of multimodal analysis of group interaction. The ability to track what each participant or a group as a whole find memorable from each meeting would allow a system or agent to continuously optimise its strategy to help a team meet its goals. In the present paper, we therefore investigate what participants take away from each meeting and how it is reflected in group dynamics.As a first step toward such a system, we recorded a multimodal longitudinal meeting corpus (MEMO), which comprises a first-party annotation of what participants remember from a discussion and why they remember it. We investigated whether participants of group interactions encode what they remember non-verbally and whether we can use such non-verbal multimodal features to predict what groups are likely to remember automatically. We devise a coding scheme to cluster participants’ memorisation reasons into higher-level constructs. We find that low-level multimodal cues, such as gaze and speaker activity, can predict conversational memorability. We also find that non-verbal signals can indicate when a memorable moment starts and ends. We could predict four levels of conversational memorability with an average accuracy of 44 %. We also showed that reasons related to participants’ personal feelings and experiences are the most frequently mentioned grounds for remembering meeting segments.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Dudzik B; Küster D; St-Onge D; Putze F
The 4th Workshop on Modeling Socio-Emotional and Cognitive Processes from Multimodal Data In-the-Wild (MSECP-Wild) Proceedings Article
In: Proceedings of the 2022 International Conference on Multimodal Interaction, pp. 803–804, Association for Computing Machinery, Bengaluru, India, 2022, ISBN: 9781450393904.
@inproceedings{10.1145/3536221.3564029,
title = {The 4th Workshop on Modeling Socio-Emotional and Cognitive Processes from Multimodal Data In-the-Wild (MSECP-Wild)},
author = {Dudzik, Bernd and Küster, Dennis and St-Onge, David and Putze, Felix},
url = {https://pure.tudelft.nl/admin/files/140622118/3536221.3564029.pdf},
doi = {10.1145/3536221.3564029},
isbn = {9781450393904},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the 2022 International Conference on Multimodal Interaction},
pages = {803–804},
publisher = {Association for Computing Machinery},
address = {Bengaluru, India},
series = {ICMI '22},
abstract = {The ability to automatically infer relevant aspects of human users’ thoughts and feelings is crucial for technologies to adapt their behaviors in complex interactions intelligently (e.g., social robots or tutoring systems). Research on multimodal analysis has demonstrated the potential of technology to provide such estimates for a broad range of internal states and processes. However, constructing robust enough approaches for deployment in real-world applications remains an open problem. The MSECP-Wild workshop series serves as a multidisciplinary forum to present and discuss research addressing this challenge. This 4th iteration focuses on addressing varying contextual conditions (e.g., throughout an interaction or across different situations and environments) in intelligent systems as a crucial barrier for more valid real-world predictions and actions. Submissions to the workshop span efforts relevant to multimodal data collection and context-sensitive modeling. These works provide important impulses for discussions of the state-of-the-art and opportunities for future research on these subjects.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Dudzik B; Hung H
Exploring the Detection of Spontaneous Recollections during Video-Viewing In-the-Wild Using Facial Behavior Analysis Proceedings Article
In: Proceedings of the 2022 International Conference on Multimodal Interaction, pp. 236–246, Association for Computing Machinery, Bengaluru, India, 2022, ISBN: 9781450393904.
@inproceedings{10.1145/3536221.3556609,
title = {Exploring the Detection of Spontaneous Recollections during Video-Viewing In-the-Wild Using Facial Behavior Analysis},
author = {Dudzik, Bernd and Hung, Hayley},
url = {https://dl.acm.org/doi/10.1145/3536221.3556609},
doi = {10.1145/3536221.3556609},
isbn = {9781450393904},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the 2022 International Conference on Multimodal Interaction},
pages = {236–246},
publisher = {Association for Computing Machinery},
address = {Bengaluru, India},
series = {ICMI '22},
abstract = {Intelligent systems might benefit from automatically detecting when a stimulus has triggered a user’s recollection of personal memories, e.g., to identify that a piece of media content holds personal significance for them. While computational research has demonstrated the potential to identify related states based on facial behavior (e.g., mind-wandering), the automatic detection of spontaneous recollections specifically has not been investigated this far. Motivated by this, we present machine learning experiments exploring the feasibility of detecting whether a video clip has triggered personal memories in a viewer based on the analysis of their Head Rotation, Head Position, Eye Gaze, and Facial Expressions. Concretely, we introduce an approach for automatic detection and evaluate its potential for predictions using in-the-wild webcam recordings. Overall, our findings demonstrate the capacity for above chance detections in both settings, with substantially better performance for the video-independent variant. Beyond this, we investigate the role of person-specific recollection biases for predictions of our video-independent models and the importance of specific modalities of facial behavior. Finally, we discuss the implications of our findings for detecting recollections and user-modeling in adaptive systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
van Krieken E; Acar E; van Harmelen F
Analyzing differentiable fuzzy logic operators Journal Article
In: Artificial Intelligence, vol. 302, pp. 103602, 2022.
@article{van2022analyzing,
title = {Analyzing differentiable fuzzy logic operators},
author = {van Krieken, Emile and Acar, Erman and van Harmelen, Frank},
url = {https://research.vu.nl/ws/portalfiles/portal/146020254/2002.06100v2.pdf},
year = {2022},
date = {2022-01-01},
journal = {Artificial Intelligence},
volume = {302},
pages = {103602},
publisher = {Elsevier},
abstract = {The AI community is increasingly putting its attention towards combining symbolic and neural approaches, as it is often argued that the strengths and weaknesses of these approaches are complementary. One recent trend in the literature are weakly supervised learning techniques that employ operators from fuzzy logics. In particular, these use prior background knowledge described in such logics to help the training of a neural network from unlabeled and noisy data. By interpreting logical symbols using neural networks, this background knowledge can be added to regular loss functions, hence making reasoning a part of learning. We study, both formally and empirically, how a large collection of logical operators from the fuzzy logic literature behave in a differentiable learning setting. We find that many of these operators, including some of the most well-known, are highly unsuitable in this setting. A further finding concerns the treatment of implication in these fuzzy logics, and shows a strong imbalance between gradients driven by the antecedent and the consequent of the implication. Furthermore, we introduce a new family of fuzzy implications (called sigmoidal implications) to tackle this phenomenon. Finally, we empirically show that it is possible to use Differentiable Fuzzy Logics for semi-supervised learning, and compare how different operators behave in practice. We find that, to achieve the largest performance improvement over a supervised baseline, we have to resort to non-standard combinations of logical operators which perform well in learning, but no longer satisfy the usual logical laws.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Romero D W; Kuzina A; Bekkers E J; Tomczak J M; Hoogendoorn M
CKConv: Continuous Kernel Convolution For Sequential Data Journal Article
In: International Conference on Learning Representations (ICLR), 2022, 2022.
@article{DBLP:journals/corr/abs-2102-02611,
title = {CKConv: Continuous Kernel Convolution For Sequential Data},
author = {David W. Romero and Anna Kuzina and Erik J. Bekkers and Jakub M. Tomczak and Mark Hoogendoorn},
url = {https://openreview.net/pdf?id=8FhxBtXSl0},
year = {2022},
date = {2022-01-01},
journal = {International Conference on Learning Representations (ICLR), 2022},
abstract = {Conventional neural architectures for sequential data present important limitations. Recurrent networks suffer from exploding and vanishing gradients, small effective memory horizons, and must be trained sequentially. Convolutional networks are unable to handle sequences of unknown size and their memory horizon must be defined a priori. In this work, we show that all these problems can be solved by formulating convolutional kernels in CNNs as continuous functions. The resulting Continuous Kernel Convolution (CKConv) allows us to model arbitrarily long sequences in a parallel manner, within a single operation, and without relying on any form of recurrence. We show that Continuous Kernel Convolutional Networks (CKCNNs) obtain state-of-the-art results in multiple datasets, e.g., permuted MNIST, and, thanks to their continuous nature, are able to handle non-uniformly sampled datasets and irregularly-sampled data natively. CKCNNs match or perform better than neural ODEs designed for these purposes in a faster and simpler manner.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Liscio E; van der Meer M; Siebert L C; Jonker C M; Murukannaiah P K
What values should an agent align with? Journal Article
In: Autonomous Agents and Multi-Agent Systems, vol. 36, no. 23, pp. 32, 2022.
@article{Liscio2022,
title = {What values should an agent align with?},
author = {Liscio, Enrico and van der Meer, Michiel and Siebert, Luciano C. and Jonker, Catholijn M. and Murukannaiah, Pradeep K.},
url = {https://link.springer.com/content/pdf/10.1007/s10458-022-09550-0},
year = {2022},
date = {2022-01-01},
journal = {Autonomous Agents and Multi-Agent Systems},
volume = {36},
number = {23},
pages = {32},
publisher = {Springer US},
abstract = {The pursuit of values drives human behavior and promotes cooperation. Existing research is focused on general values (e.g., Schwartz) that transcend contexts. However, context-specific values are necessary to (1) understand human decisions, and (2) engineer intelligent agents that can elicit and align with human values. We propose Axies, a hybrid (human and AI) methodology to identify context-specific values. Axies simplifies the abstract task of value identification as a guided value annotation process involving human annotators. Axies exploits the growing availability of value-laden text corpora and Natural Language Processing to assist the annotators in systematically identifying context-specific values. We evaluate Axies in a user study involving 80 human subjects. In our study, six annotators generate value lists for two timely and important contexts: Covid-19 measures and sustainable Energy. We employ two policy experts and 72 crowd workers to evaluate Axies value lists and compare them to a list of general (Schwartz) values. We find that Axies yields values that are (1) more context-specific than general values, (2) more suitable for value annotation than general values, and (3) independent of the people applying the methodology.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kiseleva J; Li Z; Aliannejadi M; Mohanty S; ter Hoeve M; Burtsev M; Skrynnik A; Zholus A; Panov A; Srinet K; Szlam A; Sun Y; Hofmann K; Awadallah A; Abdrazakov L; Churin I; Manggala P; Naszadi K; van der Meer M; Kim T
Interactive Grounded Language Understanding in a Collaborative Environment: IGLU 2021 Journal Article
In: 2022.
@article{IGLU2022,
title = {Interactive Grounded Language Understanding in a Collaborative Environment: IGLU 2021},
author = {Kiseleva, Julia and Li, Ziming and Aliannejadi, Mohammad and Mohanty, Shrestha and ter Hoeve, Maartje and Burtsev, Mikhail and Skrynnik, Alexey and Zholus, Artem and Panov, Aleksandr and Srinet, Kavya and Szlam, Arthur and Sun, Yuxuan and Hofmann, Marc-Alexandre Côté, Katja and Awadallah, Ahmed and Abdrazakov, Linar and Churin, Igor and Manggala, Putra and Naszadi, Kata and van der Meer, Michiel and Kim, Taewoon},
url = {https://arxiv.org/abs/2205.02388},
doi = {10.48550/ARXIV.2205.02388},
year = {2022},
date = {2022-01-01},
publisher = {arXiv},
abstract = {Human intelligence has the remarkable ability to quickly adapt to new tasks and environments. Starting from a very young age, humans acquire new skills and learn how to solve new tasks either by imitating the behavior of others or by following provided natural language instructions. To facilitate research in this direction, we propose emphIGLU: Interactive Grounded Language Understanding in a Collaborative Environment. The primary goal of the competition is to approach the problem of how to build interactive agents that learn to solve a task while provided with grounded natural language instructions in a collaborative environment. Understanding the complexity of the challenge, we split it into sub-tasks to make it feasible for participants.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Grossi D
Social Choice Around the Block: On the Computational Social Choice of Blockchain Proceedings Article
In: Faliszewski, Piotr; Mascardi, Viviana; Pelachaud, Catherine; Taylor, Matthew E. (Ed.): 21st International Conference on Autonomous Agents and Multiagent Systems, AAMAS 2022, Auckland, New Zealand, May 9-13, 2022, pp. 1788–1793, International Foundation for Autonomous Agents and Multiagent Systems (IFAAMAS), 2022.
@inproceedings{DBLP:conf/atal/Grossi22,
title = {Social Choice Around the Block: On the Computational Social Choice of Blockchain},
author = {Davide Grossi},
editor = {Piotr Faliszewski and Viviana Mascardi and Catherine Pelachaud and Matthew E. Taylor},
url = {https://www.ifaamas.org/Proceedings/aamas2022/pdfs/p1788.pdf},
year = {2022},
date = {2022-01-01},
booktitle = {21st International Conference on Autonomous Agents and Multiagent Systems, AAMAS 2022, Auckland, New Zealand, May 9-13, 2022},
pages = {1788–1793},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems (IFAAMAS)},
abstract = {One of the most innovative aspects of blockchain technology con- sists in the introduction of an incentive layer to regulate the behav- ior of distributed protocols. The designer of a blockchain system faces therefore issues that are akin to those relevant for the design of economic mechanisms, and faces them in a computational setting. From this perspective the present paper argues for the importance of computational social choice in blockchain research. It identifies a few challenges at the interface of the two fields that illustrate the strong potential for cross-fertilization between them.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Verma R; Nalisnick E
Calibrated Learning to Defer with One-vs-All Classifiers Proceedings Article
In: ICML 2022 Workshop on Human-Machine Collaboration and Teaming, 2022.
@inproceedings{Verma-Nalisnick-ICML:2022,
title = {Calibrated Learning to Defer with One-vs-All Classifiers},
author = {Rajeev Verma and Eric Nalisnick},
url = {https://icml.cc/Conferences/2022/ScheduleMultitrack?event=18123},
year = {2022},
date = {2022-01-01},
booktitle = {ICML 2022 Workshop on Human-Machine Collaboration and Teaming},
abstract = {The learning to defer (L2D) framework has the potential to make AI systems safer. For a given input, the system can defer the decision to a human if the human is more likely than the model to take the correct action. We study the calibration of L2D systems, investigating if the probabilities they output are sound. We find that Mozannar & Sontag’s (2020) multiclass framework is not calibrated with respect to expert correctness. Moreover, it is not even guaranteed to produce valid probabilities due to its parameterization being degenerate for this purpose. We propose an L2D system based on one-vs-all classifiers that is able to produce calibrated probabilities of expert correctness. Furthermore, our loss function is also a consistent surrogate for multiclass L2D, like Mozannar & Sontag's (2020). Our experiments verify that not only is our system calibrated, but this benefit comes at no cost to accuracy. Our model's accuracy is always comparable (and often superior) to Mozannar & Sontag's (2020) model's in tasks ranging from hate speech detection to galaxy classification to diagnosis of skin lesions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Manggala P; Hoos H H; Nalisnick E
Bayesian Weak Supervision via an Optimal Transport Approach Proceedings Article
In: ICML 2022 Workshop on Human-Machine Collaboration and Teaming, 2022.
@inproceedings{manggala2022optimaltransportweaksupervision,
title = {Bayesian Weak Supervision via an Optimal Transport Approach},
author = {Manggala, Putra and Hoos, Holger H. and Nalisnick, Eric},
url = {https://openreview.net/forum?id=YJkf-6tTFiY},
year = {2022},
date = {2022-01-01},
booktitle = {ICML 2022 Workshop on Human-Machine Collaboration and Teaming},
abstract = {Large-scale machine learning is often impeded by a lack of labeled training data. To address this problem, the paradigm of weak supervision aims to collect and then aggregate multiple noisy labels. We propose a Bayesian probabilistic model that employs a tractable Sinkhorn-based optimal transport formulation to derive a ground-truth label. The translation between true and weak labels is cast as a transport problem with an inferred cost structure. Our approach achieves strong performance on the WRENCH weak supervision benchmark. Moreover, the posterior distribution over cost matrices allows for exploratory analysis of the weak sources.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Dobbe R
System Safety and Artificial Intelligence Book Section
In: Oxford Handbook of AI Governance, vol. To Appear, Oxford, 2022, ISBN: 978-0-19-757932-9.
@incollection{dobbe_system_2022,
title = {System Safety and Artificial Intelligence},
author = {Dobbe, Roel},
url = {https://arxiv.org/abs/2202.09292},
isbn = {978-0-19-757932-9},
year = {2022},
date = {2022-01-01},
booktitle = {Oxford Handbook of AI Governance},
volume = {To Appear},
address = {Oxford},
abstract = {This chapter formulates seven lessons for preventing harm in artificial intelligence (AI) systems based on insights from the field of system safety for software-based automation in safety-critical domains. New applications of AI across societal domains and public organizations and infrastructures come with new hazards, which lead to new forms of harm, both grave and pernicious. The text addresses the lack of consensus for diagnosing and eliminating new AI system hazards. For decades, the field of system safety has dealt with accidents and harm in safety-critical systems governed by varying degrees of software-based automation and decision-making. This field embraces the core assumption of systems and control that AI systems cannot be safeguarded by technical design choices on the model or algorithm alone, instead requiring an end-to-end hazard analysis and design frame that includes the context of use, impacted stakeholders and the formal and informal institutional environment in which the system operates. Safety and other values are then inherently socio-technical and emergent system properties that require design and control measures to instantiate these across the technical, social and institutional components of a system. This chapter honors system safety pioneer Nancy Leveson, by situating her core lessons for today's AI system safety challenges. For every lesson, concrete tools are offered for rethinking and reorganizing the safety management of AI systems, both in design and governance. This history tells us that effective AI safety management requires transdisciplinary approaches and a shared language that allows involvement of all levels of society.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Sauter A; Acar E; François-Lavet V
A Meta-Reinforcement Learning Algorithm for Causal Discovery Miscellaneous
2022.
@misc{Sauter22MetaRL,
title = {A Meta-Reinforcement Learning Algorithm for Causal Discovery},
author = {Sauter, Andreas and Acar, Erman and François-Lavet, Vincent},
url = {https://arxiv.org/abs/2207.08457},
doi = {10.48550/ARXIV.2207.08457},
year = {2022},
date = {2022-01-01},
publisher = {arXiv},
abstract = {Causal discovery is a major task with the utmost importance for machine learning since causal structures can enable models to go beyond pure correlation-based inference and significantly boost their performance. However, finding causal structures from data poses a significant challenge both in computational effort and accuracy, let alone its impossibility without interventions in general. In this paper, we develop a meta-reinforcement learning algorithm that performs causal discovery by learning to perform interventions such that it can construct an explicit causal graph. Apart from being useful for possible downstream applications, the estimated causal graph also provides an explanation for the data-generating process. In this article, we show that our algorithm estimates a good graph compared to the SOTA approaches, even in environments whose underlying causal structure is previously unseen. Further, we make an ablation study that shows how learning interventions contribute to the overall performance of our approach. We conclude that interventions indeed help boost the performance, efficiently yielding an accurate estimate of the causal structure of a possibly unseen environment.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Heuss M; Sarvi F; de Rijke M
Fairness of Exposure in Light of Incomplete Exposure Estimation Proceedings Article
In: Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 759–769, Association for Computing Machinery, Madrid, Spain, 2022.
@inproceedings{heuss-2022-fairness,
title = {Fairness of Exposure in Light of Incomplete Exposure Estimation},
author = {Heuss, Maria and Sarvi, Fatemeh and de Rijke, Maarten},
url = {https://irlab.science.uva.nl/wp-content/papercite-data/pdf/heuss-2022-fairness.pdf},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval},
pages = {759–769},
publisher = {Association for Computing Machinery},
address = {Madrid, Spain},
abstract = {Fairness of exposure is a commonly used notion of fairness for ranking systems. It is based on the idea that all items or item groups should get exposure proportional to the merit of the item or the collective merit of the items in the group. Often, stochastic ranking policies are used to ensure fairness of exposure. Previous work unrealistically assumes that we can reliably estimate the expected exposure for all items in each ranking produced by the stochastic policy. In this work, we discuss how to approach fairness of exposure in cases where the policy contains rankings of which, due to inter-item dependencies, we cannot reliably estimate the exposure distribution. In such cases, we cannot determine whether the policy can be considered fair. Our contributions in this paper are twofold. First, we define a method called method for finding stochastic policies that avoid showing rankings with unknown exposure distribution to the user without having to compromise user utility or item fairness. Second, we extend the study of fairness of exposure to the top-k setting and also assess method in this setting. We find that method can significantly reduce the number of rankings with unknown exposure distribution without a drop in user utility or fairness compared to existing fair ranking methods, both for full-length and top-k rankings. This is an important first step in developing fair ranking methods for cases where we have incomplete knowledge about the user's behaviour.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ho L; de Boer V; van Riemsdijk M B; Schlobach S; Tielman M
Argumentation for Knowledge Base Inconsistencies in Hybrid Intelligence Scenarios Proceedings Article
In: The 1st International Workshop on Knowledge Representation for Hybrid Intelligence (KR4HI), online, 2022.
@inproceedings{HHAI2022,
title = {Argumentation for Knowledge Base Inconsistencies in Hybrid Intelligence Scenarios},
author = {Ho, Loan and de Boer, Victor and van Riemsdijk, M. Birna and Schlobach, Stefan and Tielman, Myrthe},
url = {https://research.utwente.nl/en/publications/argumentation-for-knowledge-base-inconsistencies-in-hybrid-intell},
year = {2022},
date = {2022-01-01},
publisher = {The 1st International Workshop on Knowledge Representation for Hybrid Intelligence (KR4HI)},
address = {online},
abstract = {Hybrid Intelligence (HI) is the combination of human and machine intelligence, expanding human intellect instead of replacing it. In HI scenarios, inconsistencies in knowledge bases (KBs) can occur for a variety of reasons. These include shifting preferences, user’s motivation and or external conditions (for example, available resources and environment can vary over time). Argumentation is a potential method to address such inconsistencies as it provides a mechanism for reasoning with conflicting information, with natural explanations that are understandable to humans. In this paper, we investigate the capabilities of Argumentation in representing and reasoning about knowledge of both human and artificial agents in the presence of inconsistency. Moreover, we show how Argumentation enables Explainability for addressing problems in Decision-Making and Justification of an opinion. In order to investigate the applicability of Argumentation in HI scenarios, we demonstrate a mapping of two specific HI scenarios to Argumentation problems. We analyse to what extent of Argumentation is applicable by clarifying the practical inconsistency types of the HI scenarios that Argumentation can address. These include inconsistencies related to recommendations and decision making. We then model particularly the presentation of conflicting information for each scenario based on the form of argument representation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ho L; Arch-int S; Acar E; Schlobach S; Arch-int N
An argumentative approach for handling inconsistency in prioritized Datalog± ontologies Journal Article
In: AI Commun., vol. 35, no. 3, pp. 243–267, 2022, ISSN: 0921-7126.
@article{10.3233/AIC-220087,
title = {An argumentative approach for handling inconsistency in prioritized Datalog± ontologies},
author = {Ho, Loan and Arch-int, Somjit and Acar, Erman and Schlobach, Stefan and Arch-int, Ngamnij},
url = {https://research.vu.nl/en/publications/an-argumentative-approach-for-handling-inconsistency-in-prioritiz},
doi = {10.3233/AIC-220087},
issn = {0921-7126},
year = {2022},
date = {2022-01-01},
journal = {AI Commun.},
volume = {35},
number = {3},
pages = {243–267},
publisher = {IOS Press},
address = {NLD},
abstract = {Prioritized Datalog± is a well-studied formalism for modelling ontological knowledge and data, and has a success story in many applications in the (Semantic) Web and in other domains. Since the information content on the Web is both inherently context-dependent and frequently updated, the occurrence of a logical inconsistency is often inevitable. This phenomenon has led the research community to develop various types of inconsistency-tolerant semantics over the last few decades. Although the study of query answering under inconsistency-tolerant semantics is well-understood, the problem of explaining query answering under such semantics took considerably less attention, especially in the scenario where the facts are prioritized. In this paper, we aim to fill this gap. More specifically, we use Dung''s abstract argumentation framework to address the problem of explaining inconsistency-tolerant query answering in Datalog± KB where facts are prioritized, or preordered. We clarify the relationship between preferred repair semantics and various notions of extensions for argumentation frameworks. The strength of such argumentation-based approach is the explainability; users can more easily understand why different points of views are conflicting and why the query answer is entailed (or not) under different semantics. To this end we introduce the formal notion of a dialogical explanation, and show how it can be used to both explain showing why query results hold and not hold according to the known semantics in inconsistent Datalog± knowledge bases.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Prakken H; Ratsma R
A top-level model of case-based argumentation for explanation: Formalisation and experiments Journal Article
In: Argument and Computation, vol. 13, pp. 159–194, 2022.
@article{p+r22,
title = {A top-level model of case-based argumentation for explanation: Formalisation and experiments},
author = {H. Prakken and R. Ratsma},
url = {https://content.iospress.com/articles/argument-and-computation/aac210009},
year = {2022},
date = {2022-01-01},
journal = {Argument and Computation},
volume = {13},
pages = {159–194},
abstract = {This paper proposes a formal top-level model of explaining the outputs of machine-learning-based decision-making applications and evaluates it experimentally with three data sets. The model draws on AI & law research on argumentation with cases, which models how lawyers draw analogies to past cases and discuss their relevant similarities and differences in terms of relevant factors and dimensions in the problem domain. A case-based approach is natural since the input data of machine-learning applications can be seen as cases. While the approach is motivated by legal decision making, it also applies to other kinds of decision making, such as commercial decisions about loan applications or employee hiring, as long as the outcome is binary and the input conforms to this factor- or dimension format. The model is top-level in that it can be extended with more refined accounts of similarities and differences between cases. It is shown to overcome several limitations of similar argumentation-based explanation models, which only have binary features and do not represent the tendency of features towards particular outcomes. The results of the experimental evaluation studies indicate that the model may be feasible in practice, but that further development and experimentation is needed to confirm its usefulness as an explanation model. Main challenges here are selecting from a large number of possible explanations, reducing the number of features in the explanations and adding more meaningful information to them. It also remains to be investigated how suitable our approach is for explaining non-linear models.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Çelikok M M; Oliehoek F A; Kaski S
Best-Response Bayesian Reinforcement Learning with Bayes-Adaptive POMDPs for Centaurs Proceedings Article
In: Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems, pp. 235–243, International Foundation for Autonomous Agents and Multiagent Systems, Virtual Event, New Zealand, 2022, ISBN: 9781450392136.
@inproceedings{10.5555/3535850.3535878,
title = {Best-Response Bayesian Reinforcement Learning with Bayes-Adaptive POMDPs for Centaurs},
author = {Çelikok, Mustafa Mert and Oliehoek, Frans A. and Kaski, Samuel},
url = {https://ifaamas.org/Proceedings/aamas2022/pdfs/p235.pdf},
isbn = {9781450392136},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems},
pages = {235–243},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Virtual Event, New Zealand},
series = {AAMAS '22},
abstract = {Centaurs are half-human, half-AI decision-makers where the AI's goal is to complement the human. To do so, the AI must be able to recognize the goals and constraints of the human and have the means to help them. We present a novel formulation of the interaction between the human and the AI as a sequential game where the agents are modelled using Bayesian best-response models. We show that in this case the AI's problem of helping bounded-rational humans make better decisions reduces to a Bayes-adaptive POMDP. In our simulated experiments, we consider an instantiation of our framework for humans who are subjectively optimistic about the AI's future behaviour. Our results show that when equipped with a model of the human, the AI can infer the human's bounds and nudge them towards better decisions. We discuss ways in which the machine can learn to improve upon its own limitations as well with the help of the human. We identify a novel trade-off for centaurs in partially observable tasks: for the AI's actions to be acceptable to the human, the machine must make sure their beliefs are sufficiently aligned, but aligning beliefs might be costly. We present a preliminary theoretical analysis of this trade-off and its dependence on task structure.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Prakken H
Formalising an aspect of argument strength: degrees of attackability Book Section
In: Toni, Francesca (Ed.): Computational Models of Argument. Proceedings of COMMA 2022, pp. 296–307, IOS Press, Amsterdam etc, 2022.
@incollection{hp22gradual,
title = {Formalising an aspect of argument strength: degrees of attackability},
author = {H. Prakken},
editor = {Francesca Toni et al.},
url = {https://ebooks.iospress.nl/doi/10.3233/FAIA220161},
doi = {10.3233/FAIA220161},
year = {2022},
date = {2022-01-01},
booktitle = {Computational Models of Argument. Proceedings of COMMA 2022},
pages = {296–307},
publisher = {IOS Press},
address = {Amsterdam etc},
abstract = {This paper formally studies a notion of dialectical argument strength in terms of the number of ways in which an argument can be successfully attacked in expansions of an abstract argumentation framework. The proposed model is abstract but its design is motivated by the wish to avoid overly limiting assumptions that may not hold in particular dialogue contexts or in particular structured accounts of argumentation. It is shown that most principles for gradual argument acceptability proposed in the literature fail to hold for the proposed notion of dialectical strength, which clarifies their rational foundations and highlights the importance of distinguishing between logical, dialectical and rhetorical argument strength.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
van Woerkom W; Grossi D; Prakken H; Verheij B
Justification in Case-Based Reasoning Proceedings Article
In: Čyras, Kristijonas; Kampik, Timotheus; Cocarascu, Oana; Rago, Antonio (Ed.): Proceedings of the First International Workshop on Argumentation for eXplainable AI, pp. 1–13, CEUR Workshop Proceedings, 2022.
@inproceedings{vanwoerkom2022,
title = {Justification in Case-Based Reasoning},
author = {van Woerkom, Wijnand and Grossi, Davide and Prakken, Henry and Verheij, Bart},
editor = {Čyras, Kristijonas and Kampik, Timotheus and Cocarascu, Oana and Rago, Antonio},
url = {https://ceur-ws.org/Vol-3209/5942.pdf},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the First International Workshop on Argumentation for eXplainable AI},
pages = {1–13},
publisher = {CEUR Workshop Proceedings},
abstract = {The explanation and justification of decisions is an important subject in contemporary data-driven automated methods. Case-based argumentation has been proposed as the formal background for the explanation of data-driven automated decision making. In particular, a method was developed in recent work based on the theory of precedential constraint which reasons from a case base, given by the training data of the machine learning system, to produce a justification for the outcome of a focus case. An important role is played in this method by the notions of citability and compensation, and in the present work we develop these in more detail. Special attention is paid to the notion of compensation; we formally specify the notion and identify several of its desirable properties. These considerations reveal a refined formal perspective on the explanation method as an extension of the theory of precedential constraint with a formal notion of justification.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hopner N; Tiddi I; van Hoof H
Leveraging Class Abstraction for Commonsense Reinforcement Learning via Residual Policy Gradient Methods Proceedings Article
In: Raedt, Lud De (Ed.): Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence, IJCAI-22, pp. 3050–3056, International Joint Conferences on Artificial Intelligence Organization, 2022, (Main Track).
@inproceedings{ijcai2022p423,
title = {Leveraging Class Abstraction for Commonsense Reinforcement Learning via Residual Policy Gradient Methods},
author = {Hopner, Niklas and Tiddi, Ilaria and van Hoof, Herke},
editor = {Lud De Raedt},
url = {https://doi.org/10.24963/ijcai.2022/423},
doi = {10.24963/ijcai.2022/423},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence, IJCAI-22},
pages = {3050–3056},
publisher = {International Joint Conferences on Artificial Intelligence Organization},
abstract = {Enabling reinforcement learning (RL) agents to leverage a knowledge base while learning from experience promises to advance RL in knowledge intensive domains. However, it has proven difficult to leverage knowledge that is not manually tailored to the environment. We propose to use the subclass relationships present in open-source knowledge graphs to abstract away from specific objects. We develop a residual policy gradient method that is able to integrate knowledge across different abstraction levels in the class hierarchy. Our method results in improved sample efficiency and generalisation to unseen objects in commonsense games, but we also investigate failure modes, such as excessive noise in the extracted class knowledge or environments with little class structure.},
note = {Main Track},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
van der Meer M; Reuver M; Khurana U; Krause L; Santamaría S B
Will It Blend? Mixing Training Paradigms & Prompting for Argument Quality Prediction Miscellaneous
2022.
@misc{https://doi.org/10.48550/arxiv.2209.08966,
title = {Will It Blend? Mixing Training Paradigms & Prompting for Argument Quality Prediction},
author = {van der Meer, Michiel and Reuver, Myrthe and Khurana, Urja and Krause, Lea and Santamaría, Selene Báez},
url = {https://arxiv.org/abs/2209.08966},
doi = {10.48550/ARXIV.2209.08966},
year = {2022},
date = {2022-01-01},
publisher = {arXiv},
abstract = {This paper describes our winning contribution to the Shared Task of the 9th Workshop on Argument Mining (2022). Our approach uses Large Language Models for the task of Argument Quality Prediction. We perform prompt engineering using GPT-3, and also investigate the training paradigms multi-task learning, contrastive learning, and intermediate-task training. We find that a mixed prediction setup outperforms single models. Prompting GPT-3 works best for predicting argument validity, and argument novelty is best estimated by a model trained using all three training paradigms.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
van Woerkom W; Grossi D; Prakken H; Verheij B
Landmarks in Case-Based Reasoning: From Theory to Data Proceedings Article
In: Schlobach, Stefan; Pérez-Ortiz, María; Tielman, Myrthe (Ed.): HHAI2022: Augmenting Human Intellect, pp. 212–224, IOS Press, 2022.
@inproceedings{woerkom2022landmarks,
title = {Landmarks in Case-Based Reasoning: From Theory to Data},
author = {van Woerkom, Wijnand and Grossi, Davide and Prakken, Henry and Verheij, Bart},
editor = {Schlobach, Stefan and Pérez-Ortiz, María and Tielman, Myrthe},
url = {https://ebooks.iospress.nl/volumearticle/60868},
year = {2022},
date = {2022-01-01},
booktitle = {HHAI2022: Augmenting Human Intellect},
volume = {354},
pages = {212–224},
publisher = {IOS Press},
series = {Frontiers in Artificial Intelligence and Applications},
abstract = {Widespread application of uninterpretable machine learning systems for sensitive purposes has spurred research into elucidating the decision making process of these systems. These efforts have their background in many different disciplines, one of which is the field of AI & law. In particular, recent works have observed that machine learning training data can be interpreted as legal cases. Under this interpretation the formalism developed to study case law, called the theory of precedential constraint, can be used to analyze the way in which machine learning systems draw on training data – or should draw on them – to make decisions. These works predominantly stay on the theoretical level, hence in the present work the formalism is evaluated on a real world dataset. Through this analysis we identify a significant new concept which we call landmark cases, and use it to characterize the types of datasets that are more or less suitable to be described by the theory.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kökciyan N; Yolum P
Taking Situation-Based Privacy Decisions: Privacy Assistants Working with Humans Proceedings Article
In: Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence (IJCAI), pp. 703–709, 2022.
@inproceedings{pas-ijcai-2022,
title = {Taking Situation-Based Privacy Decisions: Privacy Assistants Working with Humans},
author = {Kökciyan, Nadin and Yolum, Pinar},
url = {https://www.ijcai.org/proceedings/2022/0099.pdf},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence (IJCAI)},
pages = {703–709},
abstract = {Privacy on the Web is typically managed by giving consent to individual Websites for various aspects of data usage. This paradigm requires too much human effort and thus is impractical for Internet of Things (IoT) applications where humans interact with many new devices on a daily basis. Ideally, software privacy assistants can help by making privacy decisions in different situations on behalf of the users. To realize this, we propose an agent-based model for a privacy assistant. The model identifies the contexts that a situation implies and computes the trustworthiness of these contexts. Contrary to traditional trust models that capture trust in an entity by observing large number of interactions, our proposed model can assess the trustworthiness even if the user has not interacted with the particular device before. Moreover, our model can decide which situations are inherently ambiguous and thus can request the human to make the decision. We evaluate various aspects of the model using a real-life data set and report adjustments that are needed to serve different types of users well.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ayci G; Şensoy M; Özgür A; Yolum P
Uncertainty-Aware Personal Assistant for Making Personalized Privacy Decisions Journal Article
In: ACM Transactions on Internet Technology, 2022, (In press).
@article{pure-2022,
title = {Uncertainty-Aware Personal Assistant for Making Personalized Privacy Decisions},
author = {Ayci, Gönül and Şensoy, Murat and Özgür, Arzucan and Yolum, Pinar},
url = {https://doi.org/10.1145/3561820},
doi = {10.1145/3561820},
year = {2022},
date = {2022-01-01},
journal = {ACM Transactions on Internet Technology},
publisher = {Association for Computing Machinery},
abstract = {Many software systems, such as online social networks enable users to share information about themselves. While the action of sharing is simple, it requires an elaborate thought process on privacy: what to share, with whom to share, and for what purposes. Thinking about these for each piece of content to be shared is tedious. Recent approaches to tackle this problem build personal assistants that can help users by learning what is private over time and recommending privacy labels such as private or public to individual content that a user considers sharing. However, privacy is inherently ambiguous and highly personal. Existing approaches to recommend privacy decisions do not address these aspects of privacy sufficiently. Ideally, a personal assistant should be able to adjust its recommendation based on a given user, considering that user’s privacy understanding. Moreover, the personal assistant should be able to assess when its recommendation would be uncertain and let the user make the decision on her own. Accordingly, this paper proposes a personal assistant that uses evidential deep learning to classify content based on its privacy label. An important characteristic of the personal assistant is that it can model its uncertainty in its decisions explicitly, determine that it does not know the answer, and delegate from making a recommendation when its uncertainty is high. By factoring in user’s own understanding of privacy, such as risk factors or own labels, the personal assistant can personalize its recommendations per user. We evaluate our proposed personal assistant using a well-known data set. Our results show that our personal assistant can accurately identify uncertain cases, personalize them to its user’s needs, and thus helps users preserve their privacy well.},
note = {In press},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Erdogan E; Dignum F; Verbrugge R; Yolum P
Abstracting Minds: Computational Theory of Mind for Human-Agent Collaboration Book Section
In: HHAI2022: Augmenting Human Intellect, pp. 199–211, IOS Press, 2022.
@incollection{erdogan2022abstracting,
title = {Abstracting Minds: Computational Theory of Mind for Human-Agent Collaboration},
author = {Erdogan, Emre and Dignum, Frank and Verbrugge, Rineke and Yolum, Pinar},
url = {http://dx.doi.org/10.3233/FAIA220199},
year = {2022},
date = {2022-01-01},
booktitle = {HHAI2022: Augmenting Human Intellect},
pages = {199–211},
publisher = {IOS Press},
abstract = {Theory of mind refers to the human ability to reason about mental content of other people such as beliefs, desires, and goals. In everyday life, people rely on their theory of mind to understand, explain, and predict the behaviour of others. Having a theory of mind is especially useful when people collaborate, since individuals can then reason on what the other individual knows as well as what reasoning they might do. Realization of hybrid intelligence, where an agent collaborates with a human, will require the agent to be able to do similar reasoning through computational theory of mind. Accordingly, this paper provides a mechanism for computational theory of mind based on abstractions of single beliefs into higher-level concepts. These concepts can correspond to social norms, roles, as well as values. Their use in decision making serves as a heuristic to choose among interactions, thus facilitating collaboration on decisions. Using examples from the medical domain, we demonstrate how having such a theory of mind enables an agent to interact with humans efficiently and can increase the quality of the decisions humans make.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Erdogan E; Dignum F; Verbrugge R; Yolum P
Computational Theory of Mind for Human-Agent Coordination Proceedings Article
In: Ajmeri, Nirav; Morris Martin, Andreasa; Savarimuthu, Bastin Tony Roy (Ed.): Coordination, Organizations, Institutions, Norms, and Ethics for Governance of Multi-Agent Systems XV, pp. 92–108, Springer International Publishing, 2022.
@inproceedings{erdogan+2022,
title = {Computational Theory of Mind for Human-Agent Coordination},
author = {Erdogan, Emre and Dignum, Frank and Verbrugge, Rineke and Yolum, Pinar},
editor = {Ajmeri, Nirav and Morris Martin, Andreasa and Savarimuthu, Bastin Tony Roy},
url = {http://dx.doi.org/10.1007/978-3-031-20845-4_6},
year = {2022},
date = {2022-01-01},
booktitle = {Coordination, Organizations, Institutions, Norms, and Ethics for Governance of Multi-Agent Systems XV},
pages = {92–108},
publisher = {Springer International Publishing},
abstract = {In everyday life, people often depend on their theory of mind, i.e., their ability to reason about unobservable mental content of others to understand, explain, and predict their behaviour. Many agent-based models have been designed to develop computational theory of mind and analyze its effectiveness in various tasks and settings. However, most existing models are not generic (e.g., only applied in a given setting), not feasible (e.g., require too much information to be processed), or not human-inspired (e.g., do not capture the behavioral heuristics of humans). This hinders their applicability in many settings. Accordingly, we propose a new computational theory of mind, which captures the human decision heuristics of reasoning by abstracting individual beliefs about others. We specifically study computational affinity and show how it can be used in tandem with theory of mind reasoning when designing agent models for human-agent negotiation. We perform two-agent simulations to analyze the role of affinity in getting to agreements when there is a bound on the time to be spent for negotiating. Our results suggest that modeling affinity can ease the negotiation process by decreasing the number of rounds needed for an agreement as well as yield a higher benefit for agents with theory of mind reasoning.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Michelini M; Haret A; Grossi D
Group Wisdom at a Price: Jury Theorems with Costly Information Proceedings Article
In: Raedt, Lud De (Ed.): Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence, IJCAI-22, pp. 419–425, International Joint Conferences on Artificial Intelligence Organization, 2022, (Main Track).
@inproceedings{michelini22group,
title = {Group Wisdom at a Price: Jury Theorems with Costly Information},
author = {Michelini, Matteo and Haret, Adrian and Grossi, Davide},
editor = {Lud De Raedt},
url = {https://doi.org/10.24963/ijcai.2022/60},
doi = {10.24963/ijcai.2022/60},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence, IJCAI-22},
pages = {419–425},
publisher = {International Joint Conferences on Artificial Intelligence Organization},
abstract = {We study epistemic voting on binary issues where voters are characterized by their competence, i.e., the probability of voting for the correct alternative, and can choose between two actions: voting or abstaining. In our setting voting involves the expenditure of some effort, which is required to achieve the appropriate level of competence, whereas abstention carries no effort. We model this scenario as a game and characterize its equilibria under several variations. Our results show that when agents are aware of everyone's incentives, then the addition of effort may lead to Nash equilibria where wisdom of the crowds is lost. We further show that if agents' awareness of each other is constrained by a social network, the topology of the network may actually mitigate this effect.},
note = {Main Track},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Khurana U; Nalisnick E; Fokkens A
How Emotionally Stable is ALBERT? Testing Robustness with Stochastic Weight Averaging on a Sentiment Analysis Task Proceedings Article
In: Proceedings of the 2nd Workshop on Evaluation and Comparison of NLP Systems, pp. 16–31, Association for Computational Linguistics, Punta Cana, Dominican Republic, 2021.
@inproceedings{khurana-etal-2021-emotionally,
title = {How Emotionally Stable is ALBERT? Testing Robustness with Stochastic Weight Averaging on a Sentiment Analysis Task},
author = {Khurana, Urja and Nalisnick, Eric and Fokkens, Antske},
url = {https://aclanthology.org/2021.eval4nlp-1.3},
year = {2021},
date = {2021-11-01},
booktitle = {Proceedings of the 2nd Workshop on Evaluation and Comparison of NLP Systems},
pages = {16–31},
publisher = {Association for Computational Linguistics},
address = {Punta Cana, Dominican Republic},
abstract = {Despite their success, modern language models are fragile. Even small changes in their training pipeline can lead to unexpected results. We study this phenomenon by examining the robustness of ALBERT (Lan et al., 2020) in combination with Stochastic Weight Averaging (SWA)—a cheap way of ensembling—on a sentiment analysis task (SST-2). In particular, we analyze SWA's stability via CheckList criteria (Ribeiro et al., 2020), examining the agreement on errors made by models differing only in their random seed. We hypothesize that SWA is more stable because it ensembles model snapshots taken along the gradient descent trajectory. We quantify stability by comparing the models' mistakes with Fleiss' Kappa (Fleiss, 1971) and overlap ratio scores. We find that SWA reduces error rates in general; yet the models still suffer from their own distinct biases (according to CheckList).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Dobbe R; Krendl Gilbert T; Mintz Y
Hard choices in artificial intelligence Journal Article
In: Artificial Intelligence, vol. 300, 2021, ISSN: 0004-3702.
@article{dobbe_hard_2021,
title = {Hard choices in artificial intelligence},
author = {Dobbe, Roel and Krendl Gilbert, Thomas and Mintz, Yonatan},
url = {https://www.sciencedirect.com/science/article/pii/S0004370221001065},
doi = {10.1016/j.artint.2021.103555},
issn = {0004-3702},
year = {2021},
date = {2021-11-01},
urldate = {2021-08-04},
journal = {Artificial Intelligence},
volume = {300},
abstract = {As AI systems are integrated into high stakes social domains, researchers now examine how to design and operate them in a safe and ethical manner. However, the criteria for identifying and diagnosing safety risks in complex social contexts remain unclear and contested. In this paper, we examine the vagueness in debates about the safety and ethical behavior of AI systems. We show how this vagueness cannot be resolved through mathematical formalism alone, instead requiring deliberation about the politics of development as well as the context of deployment. Drawing from a new sociotechnical lexicon, we redefine vagueness in terms of distinct design challenges at key stages in AI system development. The resulting framework of Hard Choices in Artificial Intelligence (HCAI) empowers developers by 1. identifying points of overlap between design decisions and major sociotechnical challenges; 2. motivating the creation of stakeholder feedback channels so that safety issues can be exhaustively addressed. As such, HCAI contributes to a timely debate about the status of AI development in democratic societies, arguing that deliberation should be the goal of AI Safety, not just the procedure by which it is ensured.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Koopman T; Renooij S
Persuasive Contrastive Explanations (Extended Abstract) Proceedings Article
In: Baader, F.; Bogaerts, B.; Brewka, G.; Hoffmann, J.; Lukasiewicz, T.; Potyka, N.; Toni, F. (Ed.): Proceedings of The 2nd Workshop in Explainable Logic-based Knowledge Representation (XLoKR), 2021.
@inproceedings{xlokr-koopman21,
title = {Persuasive Contrastive Explanations (Extended Abstract)},
author = {Koopman, Tara AND Renooij, Silja},
editor = {Baader, F. AND Bogaerts, B. AND Brewka, G. AND Hoffmann, J. AND Lukasiewicz, T. AND Potyka, N. AND Toni, F.},
url = {https://xlokr21.ai.vub.ac.be/papers/16/paper.pdf},
year = {2021},
date = {2021-11-01},
booktitle = {Proceedings of The 2nd Workshop in Explainable Logic-based Knowledge Representation (XLoKR)},
abstract = {Explanation in Artificial Intelligence is often focused on providing reasons for why a model under consideration and its outcome are correct. Recently, research in explainable machine learning has initiated a shift in focus on including so-called counterfactual explanations. In this paper we propose to combine both types of explanation into a persuasive contrastive explanation that aims to provide an answer to the question Why outcome t instead of t'? posed by a user. In addition, we propose a model-agnostic algorithm for computing persuasive contrastive explanations from AI systems with few input variables.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Koopman T; Renooij S
Persuasive Contrastive Explanations for Bayesian Networks Proceedings Article
In: Vejnarova, J.; Wilson, N. (Ed.): Proceedings of the Sixteenth European Conference on Symbolic and Quantitative Approached to Reasoning with Uncertainty (ECSQARU), pp. 229–242, Springer, Cham, 2021.
@inproceedings{ecsqaru-koopman21,
title = {Persuasive Contrastive Explanations for Bayesian Networks},
author = {Koopman, Tara AND Renooij, Silja},
editor = {Vejnarova, J. and Wilson, N.},
url = {https://webspace.science.uu.nl/~renoo101/Prof/PDF/Conf/ecsqaru2021-final.pdf},
doi = {10.1007/978-3-030-86772-0_17},
year = {2021},
date = {2021-09-01},
booktitle = {Proceedings of the Sixteenth European Conference on Symbolic and Quantitative Approached to Reasoning with Uncertainty (ECSQARU)},
volume = {12897},
pages = {229–242},
publisher = {Springer, Cham},
series = {Lecture Notes in Computer Science},
abstract = {Explanation in Artificial Intelligence is often focused on providing reasons for why a model under consideration and its outcome are correct. Recently, research in explainable machine learning has initiated a shift in focus on including so-called counterfactual explanations. In this paper we propose to combine both types of explanation in the context of explaining Bayesian networks. To this end we introduce persuasive contrastive explanations that aim to provide an answer to the question Why outcome t instead of t'? posed by a user. In addition, we propose an algorithm for computing persuasive contrastive explanations. Both our definition of persuasive contrastive explanation and the proposed algorithm can be employed beyond the current scope of Bayesian networks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rahman M A; Hopner N; Christianos F; Albrecht S V
Towards Open Ad Hoc Teamwork Using Graph-based Policy Learning Proceedings Article
In: Meila, Marina; Zhang, Tong (Ed.): Proceedings of the 38th International Conference on Machine Learning, pp. 8776–8786, PMLR, 2021.
@inproceedings{pmlr-v139-rahman21a,
title = {Towards Open Ad Hoc Teamwork Using Graph-based Policy Learning},
author = {Rahman, Muhammad A and Hopner, Niklas and Christianos, Filippos and Albrecht, Stefano V},
editor = {Meila, Marina and Zhang, Tong},
url = {https://proceedings.mlr.press/v139/rahman21a.html},
year = {2021},
date = {2021-07-01},
booktitle = {Proceedings of the 38th International Conference on Machine Learning},
volume = {139},
pages = {8776–8786},
publisher = {PMLR},
series = {Proceedings of Machine Learning Research},
abstract = {Ad hoc teamwork is the challenging problem of designing an autonomous agent which can adapt quickly to collaborate with teammates without prior coordination mechanisms, including joint training. Prior work in this area has focused on closed teams in which the number of agents is fixed. In this work, we consider open teams by allowing agents with different fixed policies to enter and leave the environment without prior notification. Our solution builds on graph neural networks to learn agent models and joint-action value models under varying team compositions. We contribute a novel action-value computation that integrates the agent model and joint-action value model to produce action-value estimates. We empirically demonstrate that our approach successfully models the effects other agents have on the learner, leading to policies that robustly adapt to dynamic team compositions and significantly outperform several alternative methods.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Baez Santamaria S; Baier T; Kim T; Krause L; Kruijt J; Vossen P
EMISSOR: A platform for capturing multimodal interactions as Episodic Memories and Interpretations with Situated Scenario-based Ontological References Proceedings Article
In: Proceedings of the 1st Workshop on Multimodal Semantic Representations (MMSR), pp. 56–77, Association for Computational Linguistics, Groningen, Netherlands (Online), 2021.
@inproceedings{baez-santamaria-etal-2021-emissor,
title = {EMISSOR: A platform for capturing multimodal interactions as Episodic Memories and Interpretations with Situated Scenario-based Ontological References},
author = {Baez Santamaria, Selene and Baier, Thomas and Kim, Taewoon and Krause, Lea and Kruijt, Jaap and Vossen, Piek},
url = {https://aclanthology.org/2021.mmsr-1.6},
year = {2021},
date = {2021-06-01},
booktitle = {Proceedings of the 1st Workshop on Multimodal Semantic Representations (MMSR)},
pages = {56–77},
publisher = {Association for Computational Linguistics},
address = {Groningen, Netherlands (Online)},
abstract = {We present EMISSOR: a platform to capture multimodal interactions as recordings of episodic experiences with explicit referential interpretations that also yield an episodic Knowledge Graph (eKG). The platform stores streams of multiple modalities as parallel signals. Each signal is segmented and annotated independently with interpretation. Annotations are eventually mapped to explicit identities and relations in the eKG. As we ground signal segments from different modalities to the same instance representations, we also ground different modalities across each other. Unique to our eKG is that it accepts different interpretations across modalities, sources and experiences and supports reasoning over conflicting information and uncertainties that may result from multimodal experiences. EMISSOR can record and annotate experiments in virtual and real-world, combine data, evaluate system behavior and their performance for preset goals but also model the accumulation of knowledge and interpretations in the Knowledge Graph as a result of these episodic experiences.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Boomgaard G; Santamaría S B; Tiddi I; Sips R J; Szávik Z
Learning profile-based recommendations for medical search auto-complete Proceedings Article
In: Martin, Andreas; Hinkelmann, Knut; Fill, Hans-Georg; Gerber, Aurona; Lenat, Doug; Stolle, Reinhard; van Harmelen, Frank (Ed.): AAAI-MAKE 2021 Combining Machine Learning and Knowledge Engineering, pp. 1–13, CEUR-WS, 2021.
@inproceedings{boomgaard-etal-2021-learning,
title = {Learning profile-based recommendations for medical search auto-complete},
author = {Guusje Boomgaard and Selene Baez Santamaría and Ilaria Tiddi and Robert Jan Sips and Zoltán Szávik},
editor = {Andreas Martin and Knut Hinkelmann and Hans-Georg Fill and Aurona Gerber and Doug Lenat and Reinhard Stolle and {van Harmelen}, Frank},
url = {http://ceur-ws.org/Vol-2846/paper34.pdf},
year = {2021},
date = {2021-04-10},
booktitle = {AAAI-MAKE 2021 Combining Machine Learning and Knowledge Engineering},
pages = {1–13},
publisher = {CEUR-WS},
series = {CEUR Workshop Proceedings},
abstract = {Query popularity is a main feature in web-search auto-completion. Several personalization features have been proposed to support specific users' searches, but often do not meet the privacy requirements of a medical environment (e.g. clinical trial search). Furthermore, in such specialized domains, the differences in user expertise and the domain-specific language users employ are far more widespread than in web-search. We propose a query auto-completion method based on different relevancy and diversity features, which can appropriately meet different user needs. Our method incorporates indirect popularity measures, along with graph topology and semantic features. An evolutionary algorithm optimizes relevance, diversity, and coverage to return a top-k list of query completions to the user. We evaluated our approach quantitatively and qualitatively using query log data from a clinical trial search engine, comparing the effects of different relevancy and diversity settings using domain experts. We found that syntax-based diversity has more impact on effectiveness and efficiency, graph-based diversity shows a more compact list of results, and relevancy the most effect on indicated preferences.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mehrotra S
Modelling Trust in Human-AI Interaction Proceedings Article
In: Proceedings of the 20th International Conference on Autonomous Agents and MultiAgent Systems, pp. 1826–1828, International Foundation for Autonomous Agents and Multiagent Systems, Virtual Event, United Kingdom, 2021, ISBN: 9781450383073.
@inproceedings{10.5555/3463952.3464253,
title = {Modelling Trust in Human-AI Interaction},
author = {Mehrotra, Siddharth},
url = {https://pure.tudelft.nl/ws/portalfiles/portal/95731744/p1826.pdf},
isbn = {9781450383073},
year = {2021},
date = {2021-01-01},
booktitle = {Proceedings of the 20th International Conference on Autonomous Agents and MultiAgent Systems},
pages = {1826–1828},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Virtual Event, United Kingdom},
series = {AAMAS '21},
abstract = {Trust is an important element of any interaction, but especially when we are interacting with a piece of technology which does not think like we do. Therefore, AI systems need to understand how humans trust them, and what to do to promote appropriate trust. The aim of this research is to study trust through both a formal and social lens. We will be working on formal models of trust, but with a focus on the social nature of trust in order to represent how humans trust AI. We will then employ methods from human-computer interaction research to study if these models work in practice, and what would eventually be necessary for systems to elicit appropriate levels of trust from their users. The context of this research will be AI agents which interact with their users to offer personal support.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mehrotra S; Jonker C M; Tielman M L
More Similar Values, More Trust? - The Effect of Value Similarity on Trust in Human-Agent Interaction Proceedings Article
In: Proceedings of the 2021 AAAI/ACM Conference on AI, Ethics, and Society, pp. 777–783, Association for Computing Machinery, Virtual Event, USA, 2021, ISBN: 9781450384735.
@inproceedings{10.1145/3461702.3462576,
title = {More Similar Values, More Trust? - The Effect of Value Similarity on Trust in Human-Agent Interaction},
author = {Mehrotra, Siddharth and Jonker, Catholijn M. and Tielman, Myrthe L.},
url = {https://doi.org/10.1145/3461702.3462576},
doi = {10.1145/3461702.3462576},
isbn = {9781450384735},
year = {2021},
date = {2021-01-01},
booktitle = {Proceedings of the 2021 AAAI/ACM Conference on AI, Ethics, and Society},
pages = {777–783},
publisher = {Association for Computing Machinery},
address = {Virtual Event, USA},
series = {AIES '21},
abstract = {As AI systems are increasingly involved in decision making, it also becomes important that they elicit appropriate levels of trust from their users. To achieve this, it is first important to understand which factors influence trust in AI. We identify that a research gap exists regarding the role of personal values in trust in AI. Therefore, this paper studies how human and agent Value Similarity (VS) influences a human's trust in that agent. To explore this, 89 participants teamed up with five different agents, which were designed with varying levels of value similarity to that of the participants. In a within-subjects, scenario-based experiment, agents gave suggestions on what to do when entering the building to save a hostage. We analyzed the agent's scores on subjective value similarity, trust and qualitative data from open-ended questions. Our results show that agents rated as having more similar values also scored higher on trust, indicating a positive effect between the two. With this result, we add to the existing understanding of human-agent trust by providing insight into the role of value-similarity.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jorge C C; Mehrotra S; Tielman M; Jonker C
Trust should correspond to trustworthiness: a formalization of appropriate, mutual trust in human-agent teams Proceedings Article
In: Proceedings of the 22nd International Workshop on Trust in Agent Societies, London, UK, 2021.
@inproceedings{jorge2021trust,
title = {Trust should correspond to trustworthiness: a formalization of appropriate, mutual trust in human-agent teams},
author = {Jorge, C Centeio and Mehrotra, Siddharth and Tielman, ML and Jonker, CM},
url = {https://ceur-ws.org/Vol-3022/paper4.pdf},
year = {2021},
date = {2021-01-01},
booktitle = {Proceedings of the 22nd International Workshop on Trust in Agent Societies},
address = {London, UK},
abstract = {In human-agent teams, how one teammate trusts another teammate should correspond to the latter’s actual trustworthiness, creating what we would call appropriate mutual trust. Although this sounds obvious, the notion of appropriate mutual trust for human-agent teamwork lacks a formal definition. In this article, we propose a formalization which represents trust as a belief about trustworthiness. Then, we address mutual trust, and pose that agents can use beliefs about trustworthiness to represent how they trust their human teammates, as well as to reason about how their human teammates trust them. This gives us a formalization with nested beliefs about beliefs of trustworthiness. Next, we highlight that mutual trust should also be appropriate, where we define appropriate trust in an agent as the trust which corresponds directly to that agent’s trustworthiness. Finally, we explore how agents can define their own trustworthiness, using the concepts of ability, benevolence and integrity. This formalization of appropriate mutual trust can form the base for developing agents which can promote such trust.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Steging C; Renooij S; Verheij B
Discovering the Rationale of Decisions: Towards a Method for Aligning Learning and Reasoning Proceedings Article
In: Proceedings of the Eighteenth International Conference on Artificial Intelligence and Law, pp. 235–239, Association for Computing Machinery, São Paulo, Brazil, 2021, ISBN: 9781450385268.
@inproceedings{StegingICAIL21,
title = {Discovering the Rationale of Decisions: Towards a Method for Aligning Learning and Reasoning},
author = {Steging, Cor and Renooij, Silja and Verheij, Bart},
url = {https://doi.org/10.1145/3462757.3466059},
doi = {10.1145/3462757.3466059},
isbn = {9781450385268},
year = {2021},
date = {2021-01-01},
booktitle = {Proceedings of the Eighteenth International Conference on Artificial Intelligence and Law},
pages = {235–239},
publisher = {Association for Computing Machinery},
address = {São Paulo, Brazil},
series = {ICAIL '21},
abstract = {In AI and law, systems that are designed for decision support should be explainable when pursuing justice. In order for these systems to be fair and responsible, they should make correct decisions and make them using a sound and transparent rationale. In this paper, we introduce a knowledge-driven method for model-agnostic rationale evaluation using dedicated test cases, similar to unit-testing in professional software development. We apply this new quantitative human-in-the-loop method in a machine learning experiment aimed at extracting known knowledge structures from artificial datasets from a real-life legal setting. We show that our method allows us to analyze the rationale of black box machine learning systems by assessing which rationale elements are learned or not. Furthermore, we show that the rationale can be adjusted using tailor-made training data based on the results of the rationale evaluation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
van Bekkum M; de Boer M; van Harmelen F; Meyer-Vitali A; ten Teije A
Modular design patterns for hybrid learning and reasoning systems Journal Article
In: Appl. Intell., vol. 51, no. 9, pp. 6528–6546, 2021.
@article{DBLP:journals/apin/BekkumBHMT21,
title = {Modular design patterns for hybrid learning and reasoning systems},
author = {Michael van Bekkum and Maaike de Boer and Frank van Harmelen and André Meyer{-}Vitali and Annette ten Teije},
url = {https://link.springer.com/article/10.1007/s10489-021-02394-3},
doi = {10.1007/s10489-021-02394-3},
year = {2021},
date = {2021-01-01},
journal = {Appl. Intell.},
volume = {51},
number = {9},
pages = {6528–6546},
abstract = {The unification of statistical (data-driven) and symbolic (knowledge-driven) methods is widely recognised as one of the key challenges of modern AI. Recent years have seen large number of publications on such hybrid neuro-symbolic AI systems. That rapidly growing literature is highly diverse and mostly empirical, and is lacking a unifying view of the large variety of these hybrid systems. In this paper we analyse a large body of recent literature and we propose a set of modular design patterns for such hybrid, neuro-symbolic systems. We are able to describe the architecture of a very large number of hybrid systems by composing only a small set of elementary patterns as building blocks. The main contributions of this paper are: 1) a taxonomically organised vocabulary to describe both processes and data structures used in hybrid systems; 2) a set of 15+ design patterns for hybrid AI systems, organised in a set of elementary patterns and a set of compositional patterns; 3) an application of these design patterns in two realistic use-cases for hybrid AI systems. Our patterns reveal similarities between systems that were not recognised until now. Finally, our design patterns extend and refine Kautz' earlier attempt at categorising neuro-symbolic architectures.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kuzina A; Welling M; Tomczak J M
Diagnosing Vulnerability of Variational Auto-Encoders to Adversarial Attacks Proceedings Article
In: ICLR 2021 Workshop on Robust and Reliable Machine Learning in the Real World, 2021.
@inproceedings{kuzina2021diagnosing,
title = {Diagnosing Vulnerability of Variational Auto-Encoders to Adversarial Attacks},
author = {Kuzina, Anna and Welling, Max and Tomczak, Jakub M},
url = {https://arxiv.org/pdf/2103.06701.pdf},
year = {2021},
date = {2021-01-01},
booktitle = {ICLR 2021 Workshop on Robust and Reliable Machine Learning in the Real World},
abstract = {In this work, we explore adversarial attacks on the Variational Autoencoders (VAE). We show how to modify data point to obtain a prescribed latent code (supervised attack) or just get a drastically different code (unsupervised attack). We examine the influence of model modifications (β-VAE, NVAE) on the robustness of VAEs and suggest metrics to quantify it.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zheng H; Verheij B
Rules, cases and arguments in artificial intelligence and law Book Section
In: Vogl, R (Ed.): Research Handbook on Big Data Law, pp. 373–387, Edgar Elgar Publishing, 2021.
@incollection{Zheng:2021,
title = {Rules, cases and arguments in artificial intelligence and law},
author = {H. Zheng and B. Verheij},
editor = {R Vogl},
url = {https://www.ai.rug.nl/~verheij/publications/handbook2021.htm},
year = {2021},
date = {2021-01-01},
booktitle = {Research Handbook on Big Data Law},
pages = {373–387},
publisher = {Edgar Elgar Publishing},
abstract = {Artificial intelligence and law is an interdisciplinary field of research that dates back at least to the 1970s, with academic conferences starting in the 1980s. In the field, complex problems are addressed about the computational modeling and automated support of legal reasoning and argumentation. Scholars have different backgrounds, and progress is driven by insights from lawyers, judges, computer scientists, philosophers and others. The community investigates and develops artificial intelligence techniques applicable in the legal domain, in order to enhance access to law for citizens and to support the efficiency and quality of work in the legal domain, aiming to promote a just society. Integral to the legal domain, legal reasoning and its structure and process have gained much attention in AI & Law research. Such research is today especially relevant, since in these days of big data and widespread use of algorithms, there is a need in AI to connect knowledge-based and data-driven AI techniques in order to arrive at a social, explainable and responsible AI. By considering knowledge in the form of rules and data in the form of cases connected by arguments, the field of AI & Law contributes relevant representations and algorithms for handling a combination of knowledge and data. In this chapter, as an entry point into the literature on AI & Law, three major styles of modeling legal reasoning are studied: rule-based reasoning, case-based reasoning and argument-based reasoning, which are the focus of this chapter. We describe selected key ideas, leaving out formal detail. As we will see, these styles of modeling legal reasoning are related, and there is much research investigating relations. We use the example domain of Dutch tort law (Section 2) to illustrate these three major styles, which are then more fully explained (Sections 3 to 5)},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Kurtan A C; Yolum P
Assisting humans in privacy management: an agent-based approach Journal Article
In: Autonomous Agents and Multi-Agent Systems, vol. 35, no. 7, 2021.
@article{kurtan-yolum-21,
title = {Assisting humans in privacy management: an agent-based approach},
author = {A. Can Kurtan and P{ı}nar Yolum},
url = {https://link.springer.com/article/10.1007/s10458-020-09488-1},
doi = {https://doi.org/10.1007/s10458-020-09488-1},
year = {2021},
date = {2021-01-01},
journal = {Autonomous Agents and Multi-Agent Systems},
volume = {35},
number = {7},
abstract = {Image sharing is a service offered by many online social networks. In order to preserve privacy of images, users need to think through and specify a privacy setting for each image that they upload. This is difficult for two main reasons: first, research shows that many times users do not know their own privacy preferences, but only become aware of them over time. Second, even when users know their privacy preferences, editing these privacy settings is cumbersome and requires too much effort, interfering with the quick sharing behavior expected on an online social network. Accordingly, this paper proposes a privacy recommendation model for images using tags and an agent that implements this, namely pelte. Each user agent makes use of the privacy settings that its user have set for previous images to predict automatically the privacy setting for an image that is uploaded to be shared. When in doubt, the agent analyzes the sharing behavior of other users in the user's network to be able to recommend to its user about what should be considered as private. Contrary to existing approaches that assume all the images are available to a centralized model, pelte is compatible to distributed environments since each agent accesses only the privacy settings of the images that the agent owner has shared or those that have been shared with the user. Our simulations on a real-life dataset shows that pelte can accurately predict privacy settings even when a user has shared a few images with others, the images have only a few tags or the user's friends have varying privacy preferences.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Liscio E; van der Meer M; Jonker C M; Murukannaiah P K
A Collaborative Platform for Identifying Context-Specific Values Proceedings Article
In: Proceedings of the 20th International Conference on Autonomous Agents and Multiagent Systems, pp. 1773–1775, IFAAMAS, Online, 2021.
@inproceedings{Liscio2021a,
title = {A Collaborative Platform for Identifying Context-Specific Values},
author = {Liscio, Enrico and van der Meer, Michiel and Jonker, Catholijn M. and Murukannaiah, Pradeep K.},
url = {https://www.ifaamas.org/Proceedings/aamas2021/pdfs/p1773.pdf},
year = {2021},
date = {2021-01-01},
booktitle = {Proceedings of the 20th International Conference on Autonomous Agents and Multiagent Systems},
pages = {1773–1775},
publisher = {IFAAMAS},
address = {Online},
series = {AAMAS '21},
abstract = {Value alignment is a crucial aspect of ethical multiagent systems. An important step toward value alignment is identifying values specific to an application context. However, identifying context-specific values is complex and cognitively demanding. To support this process, we develop a methodology and a collaborative web platform that employs AI techniques. We describe this platform, highlighting its intuitive design and implementation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liscio E; van der Meer M; Siebert L C; Jonker C M; Mouter N; Murukannaiah P K
Axies: Identifying and Evaluating Context-Specific Values Proceedings Article
In: Proc. of the 20th International Conference on Autonomous Agents and Multiagent Systems (AAMAS 2021), pp. 799–808, IFAAMAS, Online, 2021.
@inproceedings{Liscio2021b,
title = {Axies: Identifying and Evaluating Context-Specific Values},
author = {Liscio, Enrico and van der Meer, Michiel and Siebert, Luciano C. and Jonker, Catholijn M. and Mouter, Niek and Murukannaiah, Pradeep K.},
url = {https://ii.tudelft.nl/~pradeep/doc/Liscio-2021-AAMAS-Axies.pdf},
year = {2021},
date = {2021-01-01},
booktitle = {Proc. of the 20th International Conference on Autonomous Agents and Multiagent Systems (AAMAS 2021)},
pages = {799–808},
publisher = {IFAAMAS},
address = {Online},
abstract = {The pursuit of values drives human behavior and promotes cooperation. Existing research is focused on general (e.g., Schwartz) values that transcend contexts. However, context-specific values are necessary to (1) understand human decisions, and (2) engineer intelligent agents that can elicit human values and take value-aligned actions. We propose Axies, a hybrid (human and AI) methodology to identify context-specific values. Axies simplifies the abstract task of value identification as a guided value annotation process involving human annotators. Axies exploits the growing availability of value-laden text corpora and Natural Language Processing to assist the annotators in systematically identifying context-specific values. We evaluate Axies in a user study involving 60 subjects. In our study, six annotators generate value lists for two timely and important contexts: Covid-19 measures, and sustainable Energy. Then, two policy experts and 52 crowd workers evaluate Axies value lists. We find that Axies yields values that are context-specific, consistent across different annotators, and comprehensible to end users},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Manggala P; Hoos H H; Nalisnick E
Bayesian Regression from Multiple Sources of Weak Supervision Proceedings Article
In: ICML 2021 Machine Learning for Data: Automated Creation, Privacy, Bias, 2021.
@inproceedings{manggala2021bayesianregression,
title = {Bayesian Regression from Multiple Sources of Weak Supervision},
author = {Manggala, Putra and Hoos, Holger H. and Nalisnick, Eric},
url = {https://pmangg.github.io/papers/brfmsows_mhn_ml4data_icml.pdf},
year = {2021},
date = {2021-01-01},
booktitle = {ICML 2021 Machine Learning for Data: Automated Creation, Privacy, Bias},
abstract = {We describe a Bayesian approach to weakly supervised regression. Our proposed framework propagates uncertainty from the weak supervision to an aggregated predictive distribution. We use a generalized Bayes procedure to account for the supervision being weak and therefore likely misspecified.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Steging C; Renooij S; Verheij B
Discovering the Rationale of Decisions: Experiments on Aligning Learning and Reasoning Proceedings Article
In: 4th EXplainable AI in Law Workshop (XAILA 2021), pp. 235–239, ACM, 2021.
@inproceedings{StegingXAILA21,
title = {Discovering the Rationale of Decisions: Experiments on
Aligning Learning and Reasoning},
author = {Cor Steging and Silja Renooij and Bart Verheij},
url = {https://arxiv.org/abs/2105.06758},
year = {2021},
date = {2021-01-01},
booktitle = {4th EXplainable AI in Law Workshop (XAILA 2021)},
pages = {235–239},
publisher = {ACM},
abstract = {In AI and law, systems that are designed for decision support should be explainable when pursuing justice. In order for these systems to be fair and responsible, they should make correct decisions and make them using a sound and transparent rationale. In this paper, we introduce a knowledge-driven method for model-agnostic rationale evaluation using dedicated test cases, similar to unit-testing in professional software development. We apply this new method in a set of machine learning experiments aimed at extracting known knowledge structures from artificial datasets from fictional and non-fictional legal settings. We show that our method allows us to analyze the rationale of black-box machine learning systems by assessing which rationale elements are learned or not. Furthermore, we show that the rationale can be adjusted using tailor-made training data based on the results of the rationale evaluation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Vessies M B; Vadgama S P; van de Leur R R; Doevendans P A F M; Hassink R J; Bekkers E; van Es R
Interpretable ECG classification via a query-based latent space traversal (qLST) Journal Article
In: CoRR, vol. abs/2111.07386, 2021.
@article{DBLP:journals/corr/abs-2111-07386,
title = {Interpretable ECG classification via a query-based latent space traversal (qLST)},
author = {Melle B. Vessies and Sharvaree P. Vadgama and Rutger R. van de Leur and Pieter A. F. M. Doevendans and Rutger J. Hassink and Erik Bekkers and René van Es},
url = {https://arxiv.org/abs/2111.07386},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2111.07386},
abstract = {Electrocardiography (ECG) is an effective and non-invasive diagnostic tool that measures the electrical activity of the heart. Interpretation of ECG signals to detect various abnormalities is a challenging task that requires expertise. Recently, the use of deep neural networks for ECG classification to aid medical practitioners has become popular, but their black box nature hampers clinical implementation. Several saliency-based interpretability techniques have been proposed, but they only indicate the location of important features and not the actual features. We present a novel interpretability technique called qLST, a query-based latent space traversal technique that is able to provide explanations for any ECG classification model. With qLST, we train a neural network that learns to traverse in the latent space of a variational autoencoder trained on a large university hospital dataset with over 800,000 ECGs annotated for 28 diseases. We demonstrate through experiments that we can explain different black box classifiers by generating ECGs through these traversals.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}