Prakken H; Ratsma R
A top-level model of case-based argumentation for explanation: Formalisation and experiments Journal Article
In: Argument and Computation, vol. 13, pp. 159–194, 2022.
@article{p+r22,
title = {A top-level model of case-based argumentation for explanation: Formalisation and experiments},
author = {H. Prakken and R. Ratsma},
url = {https://content.iospress.com/articles/argument-and-computation/aac210009},
year = {2022},
date = {2022-01-01},
journal = {Argument and Computation},
volume = {13},
pages = {159–194},
abstract = {This paper proposes a formal top-level model of explaining the outputs of machine-learning-based decision-making applications and evaluates it experimentally with three data sets. The model draws on AI & law research on argumentation with cases, which models how lawyers draw analogies to past cases and discuss their relevant similarities and differences in terms of relevant factors and dimensions in the problem domain. A case-based approach is natural since the input data of machine-learning applications can be seen as cases. While the approach is motivated by legal decision making, it also applies to other kinds of decision making, such as commercial decisions about loan applications or employee hiring, as long as the outcome is binary and the input conforms to this factor- or dimension format. The model is top-level in that it can be extended with more refined accounts of similarities and differences between cases. It is shown to overcome several limitations of similar argumentation-based explanation models, which only have binary features and do not represent the tendency of features towards particular outcomes. The results of the experimental evaluation studies indicate that the model may be feasible in practice, but that further development and experimentation is needed to confirm its usefulness as an explanation model. Main challenges here are selecting from a large number of possible explanations, reducing the number of features in the explanations and adding more meaningful information to them. It also remains to be investigated how suitable our approach is for explaining non-linear models.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Çelikok M M; Oliehoek F A; Kaski S
Best-Response Bayesian Reinforcement Learning with Bayes-Adaptive POMDPs for Centaurs Proceedings Article
In: Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems, pp. 235–243, International Foundation for Autonomous Agents and Multiagent Systems, Virtual Event, New Zealand, 2022, ISBN: 9781450392136.
@inproceedings{10.5555/3535850.3535878,
title = {Best-Response Bayesian Reinforcement Learning with Bayes-Adaptive POMDPs for Centaurs},
author = {Çelikok, Mustafa Mert and Oliehoek, Frans A. and Kaski, Samuel},
url = {https://ifaamas.org/Proceedings/aamas2022/pdfs/p235.pdf},
isbn = {9781450392136},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems},
pages = {235–243},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Virtual Event, New Zealand},
series = {AAMAS '22},
abstract = {Centaurs are half-human, half-AI decision-makers where the AI's goal is to complement the human. To do so, the AI must be able to recognize the goals and constraints of the human and have the means to help them. We present a novel formulation of the interaction between the human and the AI as a sequential game where the agents are modelled using Bayesian best-response models. We show that in this case the AI's problem of helping bounded-rational humans make better decisions reduces to a Bayes-adaptive POMDP. In our simulated experiments, we consider an instantiation of our framework for humans who are subjectively optimistic about the AI's future behaviour. Our results show that when equipped with a model of the human, the AI can infer the human's bounds and nudge them towards better decisions. We discuss ways in which the machine can learn to improve upon its own limitations as well with the help of the human. We identify a novel trade-off for centaurs in partially observable tasks: for the AI's actions to be acceptable to the human, the machine must make sure their beliefs are sufficiently aligned, but aligning beliefs might be costly. We present a preliminary theoretical analysis of this trade-off and its dependence on task structure.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Prakken H
Formalising an aspect of argument strength: degrees of attackability Book Section
In: Toni, Francesca (Ed.): Computational Models of Argument. Proceedings of COMMA 2022, pp. 296–307, IOS Press, Amsterdam etc, 2022.
@incollection{hp22gradual,
title = {Formalising an aspect of argument strength: degrees of attackability},
author = {H. Prakken},
editor = {Francesca Toni et al.},
url = {https://ebooks.iospress.nl/doi/10.3233/FAIA220161},
doi = {10.3233/FAIA220161},
year = {2022},
date = {2022-01-01},
booktitle = {Computational Models of Argument. Proceedings of COMMA 2022},
pages = {296–307},
publisher = {IOS Press},
address = {Amsterdam etc},
abstract = {This paper formally studies a notion of dialectical argument strength in terms of the number of ways in which an argument can be successfully attacked in expansions of an abstract argumentation framework. The proposed model is abstract but its design is motivated by the wish to avoid overly limiting assumptions that may not hold in particular dialogue contexts or in particular structured accounts of argumentation. It is shown that most principles for gradual argument acceptability proposed in the literature fail to hold for the proposed notion of dialectical strength, which clarifies their rational foundations and highlights the importance of distinguishing between logical, dialectical and rhetorical argument strength.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
van Woerkom W; Grossi D; Prakken H; Verheij B
Justification in Case-Based Reasoning Proceedings Article
In: Čyras, Kristijonas; Kampik, Timotheus; Cocarascu, Oana; Rago, Antonio (Ed.): Proceedings of the First International Workshop on Argumentation for eXplainable AI, pp. 1–13, CEUR Workshop Proceedings, 2022.
@inproceedings{vanwoerkom2022,
title = {Justification in Case-Based Reasoning},
author = {van Woerkom, Wijnand and Grossi, Davide and Prakken, Henry and Verheij, Bart},
editor = {Čyras, Kristijonas and Kampik, Timotheus and Cocarascu, Oana and Rago, Antonio},
url = {https://ceur-ws.org/Vol-3209/5942.pdf},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the First International Workshop on Argumentation for eXplainable AI},
pages = {1–13},
publisher = {CEUR Workshop Proceedings},
abstract = {The explanation and justification of decisions is an important subject in contemporary data-driven automated methods. Case-based argumentation has been proposed as the formal background for the explanation of data-driven automated decision making. In particular, a method was developed in recent work based on the theory of precedential constraint which reasons from a case base, given by the training data of the machine learning system, to produce a justification for the outcome of a focus case. An important role is played in this method by the notions of citability and compensation, and in the present work we develop these in more detail. Special attention is paid to the notion of compensation; we formally specify the notion and identify several of its desirable properties. These considerations reveal a refined formal perspective on the explanation method as an extension of the theory of precedential constraint with a formal notion of justification.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hopner N; Tiddi I; van Hoof H
Leveraging Class Abstraction for Commonsense Reinforcement Learning via Residual Policy Gradient Methods Proceedings Article
In: Raedt, Lud De (Ed.): Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence, IJCAI-22, pp. 3050–3056, International Joint Conferences on Artificial Intelligence Organization, 2022, (Main Track).
@inproceedings{ijcai2022p423,
title = {Leveraging Class Abstraction for Commonsense Reinforcement Learning via Residual Policy Gradient Methods},
author = {Hopner, Niklas and Tiddi, Ilaria and van Hoof, Herke},
editor = {Lud De Raedt},
url = {https://doi.org/10.24963/ijcai.2022/423},
doi = {10.24963/ijcai.2022/423},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence, IJCAI-22},
pages = {3050–3056},
publisher = {International Joint Conferences on Artificial Intelligence Organization},
abstract = {Enabling reinforcement learning (RL) agents to leverage a knowledge base while learning from experience promises to advance RL in knowledge intensive domains. However, it has proven difficult to leverage knowledge that is not manually tailored to the environment. We propose to use the subclass relationships present in open-source knowledge graphs to abstract away from specific objects. We develop a residual policy gradient method that is able to integrate knowledge across different abstraction levels in the class hierarchy. Our method results in improved sample efficiency and generalisation to unseen objects in commonsense games, but we also investigate failure modes, such as excessive noise in the extracted class knowledge or environments with little class structure.},
note = {Main Track},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
van der Meer M; Reuver M; Khurana U; Krause L; Santamaría S B
Will It Blend? Mixing Training Paradigms & Prompting for Argument Quality Prediction Miscellaneous
2022.
@misc{https://doi.org/10.48550/arxiv.2209.08966,
title = {Will It Blend? Mixing Training Paradigms & Prompting for Argument Quality Prediction},
author = {van der Meer, Michiel and Reuver, Myrthe and Khurana, Urja and Krause, Lea and Santamaría, Selene Báez},
url = {https://arxiv.org/abs/2209.08966},
doi = {10.48550/ARXIV.2209.08966},
year = {2022},
date = {2022-01-01},
publisher = {arXiv},
abstract = {This paper describes our winning contribution to the Shared Task of the 9th Workshop on Argument Mining (2022). Our approach uses Large Language Models for the task of Argument Quality Prediction. We perform prompt engineering using GPT-3, and also investigate the training paradigms multi-task learning, contrastive learning, and intermediate-task training. We find that a mixed prediction setup outperforms single models. Prompting GPT-3 works best for predicting argument validity, and argument novelty is best estimated by a model trained using all three training paradigms.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
van Woerkom W; Grossi D; Prakken H; Verheij B
Landmarks in Case-Based Reasoning: From Theory to Data Proceedings Article
In: Schlobach, Stefan; Pérez-Ortiz, María; Tielman, Myrthe (Ed.): HHAI2022: Augmenting Human Intellect, pp. 212–224, IOS Press, 2022.
@inproceedings{woerkom2022landmarks,
title = {Landmarks in Case-Based Reasoning: From Theory to Data},
author = {van Woerkom, Wijnand and Grossi, Davide and Prakken, Henry and Verheij, Bart},
editor = {Schlobach, Stefan and Pérez-Ortiz, María and Tielman, Myrthe},
url = {https://ebooks.iospress.nl/volumearticle/60868},
year = {2022},
date = {2022-01-01},
booktitle = {HHAI2022: Augmenting Human Intellect},
volume = {354},
pages = {212–224},
publisher = {IOS Press},
series = {Frontiers in Artificial Intelligence and Applications},
abstract = {Widespread application of uninterpretable machine learning systems for sensitive purposes has spurred research into elucidating the decision making process of these systems. These efforts have their background in many different disciplines, one of which is the field of AI & law. In particular, recent works have observed that machine learning training data can be interpreted as legal cases. Under this interpretation the formalism developed to study case law, called the theory of precedential constraint, can be used to analyze the way in which machine learning systems draw on training data – or should draw on them – to make decisions. These works predominantly stay on the theoretical level, hence in the present work the formalism is evaluated on a real world dataset. Through this analysis we identify a significant new concept which we call landmark cases, and use it to characterize the types of datasets that are more or less suitable to be described by the theory.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kökciyan N; Yolum P
Taking Situation-Based Privacy Decisions: Privacy Assistants Working with Humans Proceedings Article
In: Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence (IJCAI), pp. 703–709, 2022.
@inproceedings{pas-ijcai-2022,
title = {Taking Situation-Based Privacy Decisions: Privacy Assistants Working with Humans},
author = {Kökciyan, Nadin and Yolum, Pinar},
url = {https://www.ijcai.org/proceedings/2022/0099.pdf},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence (IJCAI)},
pages = {703–709},
abstract = {Privacy on the Web is typically managed by giving consent to individual Websites for various aspects of data usage. This paradigm requires too much human effort and thus is impractical for Internet of Things (IoT) applications where humans interact with many new devices on a daily basis. Ideally, software privacy assistants can help by making privacy decisions in different situations on behalf of the users. To realize this, we propose an agent-based model for a privacy assistant. The model identifies the contexts that a situation implies and computes the trustworthiness of these contexts. Contrary to traditional trust models that capture trust in an entity by observing large number of interactions, our proposed model can assess the trustworthiness even if the user has not interacted with the particular device before. Moreover, our model can decide which situations are inherently ambiguous and thus can request the human to make the decision. We evaluate various aspects of the model using a real-life data set and report adjustments that are needed to serve different types of users well.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ayci G; Şensoy M; Özgür A; Yolum P
Uncertainty-Aware Personal Assistant for Making Personalized Privacy Decisions Journal Article
In: ACM Transactions on Internet Technology, 2022, (In press).
@article{pure-2022,
title = {Uncertainty-Aware Personal Assistant for Making Personalized Privacy Decisions},
author = {Ayci, Gönül and Şensoy, Murat and Özgür, Arzucan and Yolum, Pinar},
url = {https://doi.org/10.1145/3561820},
doi = {10.1145/3561820},
year = {2022},
date = {2022-01-01},
journal = {ACM Transactions on Internet Technology},
publisher = {Association for Computing Machinery},
abstract = {Many software systems, such as online social networks enable users to share information about themselves. While the action of sharing is simple, it requires an elaborate thought process on privacy: what to share, with whom to share, and for what purposes. Thinking about these for each piece of content to be shared is tedious. Recent approaches to tackle this problem build personal assistants that can help users by learning what is private over time and recommending privacy labels such as private or public to individual content that a user considers sharing. However, privacy is inherently ambiguous and highly personal. Existing approaches to recommend privacy decisions do not address these aspects of privacy sufficiently. Ideally, a personal assistant should be able to adjust its recommendation based on a given user, considering that user’s privacy understanding. Moreover, the personal assistant should be able to assess when its recommendation would be uncertain and let the user make the decision on her own. Accordingly, this paper proposes a personal assistant that uses evidential deep learning to classify content based on its privacy label. An important characteristic of the personal assistant is that it can model its uncertainty in its decisions explicitly, determine that it does not know the answer, and delegate from making a recommendation when its uncertainty is high. By factoring in user’s own understanding of privacy, such as risk factors or own labels, the personal assistant can personalize its recommendations per user. We evaluate our proposed personal assistant using a well-known data set. Our results show that our personal assistant can accurately identify uncertain cases, personalize them to its user’s needs, and thus helps users preserve their privacy well.},
note = {In press},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Erdogan E; Dignum F; Verbrugge R; Yolum P
Abstracting Minds: Computational Theory of Mind for Human-Agent Collaboration Book Section
In: HHAI2022: Augmenting Human Intellect, pp. 199–211, IOS Press, 2022.
@incollection{erdogan2022abstracting,
title = {Abstracting Minds: Computational Theory of Mind for Human-Agent Collaboration},
author = {Erdogan, Emre and Dignum, Frank and Verbrugge, Rineke and Yolum, Pinar},
url = {http://dx.doi.org/10.3233/FAIA220199},
year = {2022},
date = {2022-01-01},
booktitle = {HHAI2022: Augmenting Human Intellect},
pages = {199–211},
publisher = {IOS Press},
abstract = {Theory of mind refers to the human ability to reason about mental content of other people such as beliefs, desires, and goals. In everyday life, people rely on their theory of mind to understand, explain, and predict the behaviour of others. Having a theory of mind is especially useful when people collaborate, since individuals can then reason on what the other individual knows as well as what reasoning they might do. Realization of hybrid intelligence, where an agent collaborates with a human, will require the agent to be able to do similar reasoning through computational theory of mind. Accordingly, this paper provides a mechanism for computational theory of mind based on abstractions of single beliefs into higher-level concepts. These concepts can correspond to social norms, roles, as well as values. Their use in decision making serves as a heuristic to choose among interactions, thus facilitating collaboration on decisions. Using examples from the medical domain, we demonstrate how having such a theory of mind enables an agent to interact with humans efficiently and can increase the quality of the decisions humans make.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Erdogan E; Dignum F; Verbrugge R; Yolum P
Computational Theory of Mind for Human-Agent Coordination Proceedings Article
In: Ajmeri, Nirav; Morris Martin, Andreasa; Savarimuthu, Bastin Tony Roy (Ed.): Coordination, Organizations, Institutions, Norms, and Ethics for Governance of Multi-Agent Systems XV, pp. 92–108, Springer International Publishing, 2022.
@inproceedings{erdogan+2022,
title = {Computational Theory of Mind for Human-Agent Coordination},
author = {Erdogan, Emre and Dignum, Frank and Verbrugge, Rineke and Yolum, Pinar},
editor = {Ajmeri, Nirav and Morris Martin, Andreasa and Savarimuthu, Bastin Tony Roy},
url = {http://dx.doi.org/10.1007/978-3-031-20845-4_6},
year = {2022},
date = {2022-01-01},
booktitle = {Coordination, Organizations, Institutions, Norms, and Ethics for Governance of Multi-Agent Systems XV},
pages = {92–108},
publisher = {Springer International Publishing},
abstract = {In everyday life, people often depend on their theory of mind, i.e., their ability to reason about unobservable mental content of others to understand, explain, and predict their behaviour. Many agent-based models have been designed to develop computational theory of mind and analyze its effectiveness in various tasks and settings. However, most existing models are not generic (e.g., only applied in a given setting), not feasible (e.g., require too much information to be processed), or not human-inspired (e.g., do not capture the behavioral heuristics of humans). This hinders their applicability in many settings. Accordingly, we propose a new computational theory of mind, which captures the human decision heuristics of reasoning by abstracting individual beliefs about others. We specifically study computational affinity and show how it can be used in tandem with theory of mind reasoning when designing agent models for human-agent negotiation. We perform two-agent simulations to analyze the role of affinity in getting to agreements when there is a bound on the time to be spent for negotiating. Our results suggest that modeling affinity can ease the negotiation process by decreasing the number of rounds needed for an agreement as well as yield a higher benefit for agents with theory of mind reasoning.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Michelini M; Haret A; Grossi D
Group Wisdom at a Price: Jury Theorems with Costly Information Proceedings Article
In: Raedt, Lud De (Ed.): Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence, IJCAI-22, pp. 419–425, International Joint Conferences on Artificial Intelligence Organization, 2022, (Main Track).
@inproceedings{michelini22group,
title = {Group Wisdom at a Price: Jury Theorems with Costly Information},
author = {Michelini, Matteo and Haret, Adrian and Grossi, Davide},
editor = {Lud De Raedt},
url = {https://doi.org/10.24963/ijcai.2022/60},
doi = {10.24963/ijcai.2022/60},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence, IJCAI-22},
pages = {419–425},
publisher = {International Joint Conferences on Artificial Intelligence Organization},
abstract = {We study epistemic voting on binary issues where voters are characterized by their competence, i.e., the probability of voting for the correct alternative, and can choose between two actions: voting or abstaining. In our setting voting involves the expenditure of some effort, which is required to achieve the appropriate level of competence, whereas abstention carries no effort. We model this scenario as a game and characterize its equilibria under several variations. Our results show that when agents are aware of everyone's incentives, then the addition of effort may lead to Nash equilibria where wisdom of the crowds is lost. We further show that if agents' awareness of each other is constrained by a social network, the topology of the network may actually mitigate this effect.},
note = {Main Track},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Khurana U; Nalisnick E; Fokkens A
How Emotionally Stable is ALBERT? Testing Robustness with Stochastic Weight Averaging on a Sentiment Analysis Task Proceedings Article
In: Proceedings of the 2nd Workshop on Evaluation and Comparison of NLP Systems, pp. 16–31, Association for Computational Linguistics, Punta Cana, Dominican Republic, 2021.
@inproceedings{khurana-etal-2021-emotionally,
title = {How Emotionally Stable is ALBERT? Testing Robustness with Stochastic Weight Averaging on a Sentiment Analysis Task},
author = {Khurana, Urja and Nalisnick, Eric and Fokkens, Antske},
url = {https://aclanthology.org/2021.eval4nlp-1.3},
year = {2021},
date = {2021-11-01},
booktitle = {Proceedings of the 2nd Workshop on Evaluation and Comparison of NLP Systems},
pages = {16–31},
publisher = {Association for Computational Linguistics},
address = {Punta Cana, Dominican Republic},
abstract = {Despite their success, modern language models are fragile. Even small changes in their training pipeline can lead to unexpected results. We study this phenomenon by examining the robustness of ALBERT (Lan et al., 2020) in combination with Stochastic Weight Averaging (SWA)—a cheap way of ensembling—on a sentiment analysis task (SST-2). In particular, we analyze SWA's stability via CheckList criteria (Ribeiro et al., 2020), examining the agreement on errors made by models differing only in their random seed. We hypothesize that SWA is more stable because it ensembles model snapshots taken along the gradient descent trajectory. We quantify stability by comparing the models' mistakes with Fleiss' Kappa (Fleiss, 1971) and overlap ratio scores. We find that SWA reduces error rates in general; yet the models still suffer from their own distinct biases (according to CheckList).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Dobbe R; Krendl Gilbert T; Mintz Y
Hard choices in artificial intelligence Journal Article
In: Artificial Intelligence, vol. 300, 2021, ISSN: 0004-3702.
@article{dobbe_hard_2021,
title = {Hard choices in artificial intelligence},
author = {Dobbe, Roel and Krendl Gilbert, Thomas and Mintz, Yonatan},
url = {https://www.sciencedirect.com/science/article/pii/S0004370221001065},
doi = {10.1016/j.artint.2021.103555},
issn = {0004-3702},
year = {2021},
date = {2021-11-01},
urldate = {2021-08-04},
journal = {Artificial Intelligence},
volume = {300},
abstract = {As AI systems are integrated into high stakes social domains, researchers now examine how to design and operate them in a safe and ethical manner. However, the criteria for identifying and diagnosing safety risks in complex social contexts remain unclear and contested. In this paper, we examine the vagueness in debates about the safety and ethical behavior of AI systems. We show how this vagueness cannot be resolved through mathematical formalism alone, instead requiring deliberation about the politics of development as well as the context of deployment. Drawing from a new sociotechnical lexicon, we redefine vagueness in terms of distinct design challenges at key stages in AI system development. The resulting framework of Hard Choices in Artificial Intelligence (HCAI) empowers developers by 1. identifying points of overlap between design decisions and major sociotechnical challenges; 2. motivating the creation of stakeholder feedback channels so that safety issues can be exhaustively addressed. As such, HCAI contributes to a timely debate about the status of AI development in democratic societies, arguing that deliberation should be the goal of AI Safety, not just the procedure by which it is ensured.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Koopman T; Renooij S
Persuasive Contrastive Explanations (Extended Abstract) Proceedings Article
In: Baader, F.; Bogaerts, B.; Brewka, G.; Hoffmann, J.; Lukasiewicz, T.; Potyka, N.; Toni, F. (Ed.): Proceedings of The 2nd Workshop in Explainable Logic-based Knowledge Representation (XLoKR), 2021.
@inproceedings{xlokr-koopman21,
title = {Persuasive Contrastive Explanations (Extended Abstract)},
author = {Koopman, Tara AND Renooij, Silja},
editor = {Baader, F. AND Bogaerts, B. AND Brewka, G. AND Hoffmann, J. AND Lukasiewicz, T. AND Potyka, N. AND Toni, F.},
url = {https://xlokr21.ai.vub.ac.be/papers/16/paper.pdf},
year = {2021},
date = {2021-11-01},
booktitle = {Proceedings of The 2nd Workshop in Explainable Logic-based Knowledge Representation (XLoKR)},
abstract = {Explanation in Artificial Intelligence is often focused on providing reasons for why a model under consideration and its outcome are correct. Recently, research in explainable machine learning has initiated a shift in focus on including so-called counterfactual explanations. In this paper we propose to combine both types of explanation into a persuasive contrastive explanation that aims to provide an answer to the question Why outcome t instead of t'? posed by a user. In addition, we propose a model-agnostic algorithm for computing persuasive contrastive explanations from AI systems with few input variables.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Koopman T; Renooij S
Persuasive Contrastive Explanations for Bayesian Networks Proceedings Article
In: Vejnarova, J.; Wilson, N. (Ed.): Proceedings of the Sixteenth European Conference on Symbolic and Quantitative Approached to Reasoning with Uncertainty (ECSQARU), pp. 229–242, Springer, Cham, 2021.
@inproceedings{ecsqaru-koopman21,
title = {Persuasive Contrastive Explanations for Bayesian Networks},
author = {Koopman, Tara AND Renooij, Silja},
editor = {Vejnarova, J. and Wilson, N.},
url = {https://webspace.science.uu.nl/~renoo101/Prof/PDF/Conf/ecsqaru2021-final.pdf},
doi = {10.1007/978-3-030-86772-0_17},
year = {2021},
date = {2021-09-01},
booktitle = {Proceedings of the Sixteenth European Conference on Symbolic and Quantitative Approached to Reasoning with Uncertainty (ECSQARU)},
volume = {12897},
pages = {229–242},
publisher = {Springer, Cham},
series = {Lecture Notes in Computer Science},
abstract = {Explanation in Artificial Intelligence is often focused on providing reasons for why a model under consideration and its outcome are correct. Recently, research in explainable machine learning has initiated a shift in focus on including so-called counterfactual explanations. In this paper we propose to combine both types of explanation in the context of explaining Bayesian networks. To this end we introduce persuasive contrastive explanations that aim to provide an answer to the question Why outcome t instead of t'? posed by a user. In addition, we propose an algorithm for computing persuasive contrastive explanations. Both our definition of persuasive contrastive explanation and the proposed algorithm can be employed beyond the current scope of Bayesian networks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rahman M A; Hopner N; Christianos F; Albrecht S V
Towards Open Ad Hoc Teamwork Using Graph-based Policy Learning Proceedings Article
In: Meila, Marina; Zhang, Tong (Ed.): Proceedings of the 38th International Conference on Machine Learning, pp. 8776–8786, PMLR, 2021.
@inproceedings{pmlr-v139-rahman21a,
title = {Towards Open Ad Hoc Teamwork Using Graph-based Policy Learning},
author = {Rahman, Muhammad A and Hopner, Niklas and Christianos, Filippos and Albrecht, Stefano V},
editor = {Meila, Marina and Zhang, Tong},
url = {https://proceedings.mlr.press/v139/rahman21a.html},
year = {2021},
date = {2021-07-01},
booktitle = {Proceedings of the 38th International Conference on Machine Learning},
volume = {139},
pages = {8776–8786},
publisher = {PMLR},
series = {Proceedings of Machine Learning Research},
abstract = {Ad hoc teamwork is the challenging problem of designing an autonomous agent which can adapt quickly to collaborate with teammates without prior coordination mechanisms, including joint training. Prior work in this area has focused on closed teams in which the number of agents is fixed. In this work, we consider open teams by allowing agents with different fixed policies to enter and leave the environment without prior notification. Our solution builds on graph neural networks to learn agent models and joint-action value models under varying team compositions. We contribute a novel action-value computation that integrates the agent model and joint-action value model to produce action-value estimates. We empirically demonstrate that our approach successfully models the effects other agents have on the learner, leading to policies that robustly adapt to dynamic team compositions and significantly outperform several alternative methods.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Baez Santamaria S; Baier T; Kim T; Krause L; Kruijt J; Vossen P
EMISSOR: A platform for capturing multimodal interactions as Episodic Memories and Interpretations with Situated Scenario-based Ontological References Proceedings Article
In: Proceedings of the 1st Workshop on Multimodal Semantic Representations (MMSR), pp. 56–77, Association for Computational Linguistics, Groningen, Netherlands (Online), 2021.
@inproceedings{baez-santamaria-etal-2021-emissor,
title = {EMISSOR: A platform for capturing multimodal interactions as Episodic Memories and Interpretations with Situated Scenario-based Ontological References},
author = {Baez Santamaria, Selene and Baier, Thomas and Kim, Taewoon and Krause, Lea and Kruijt, Jaap and Vossen, Piek},
url = {https://aclanthology.org/2021.mmsr-1.6},
year = {2021},
date = {2021-06-01},
booktitle = {Proceedings of the 1st Workshop on Multimodal Semantic Representations (MMSR)},
pages = {56–77},
publisher = {Association for Computational Linguistics},
address = {Groningen, Netherlands (Online)},
abstract = {We present EMISSOR: a platform to capture multimodal interactions as recordings of episodic experiences with explicit referential interpretations that also yield an episodic Knowledge Graph (eKG). The platform stores streams of multiple modalities as parallel signals. Each signal is segmented and annotated independently with interpretation. Annotations are eventually mapped to explicit identities and relations in the eKG. As we ground signal segments from different modalities to the same instance representations, we also ground different modalities across each other. Unique to our eKG is that it accepts different interpretations across modalities, sources and experiences and supports reasoning over conflicting information and uncertainties that may result from multimodal experiences. EMISSOR can record and annotate experiments in virtual and real-world, combine data, evaluate system behavior and their performance for preset goals but also model the accumulation of knowledge and interpretations in the Knowledge Graph as a result of these episodic experiences.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Boomgaard G; Santamaría S B; Tiddi I; Sips R J; Szávik Z
Learning profile-based recommendations for medical search auto-complete Proceedings Article
In: Martin, Andreas; Hinkelmann, Knut; Fill, Hans-Georg; Gerber, Aurona; Lenat, Doug; Stolle, Reinhard; van Harmelen, Frank (Ed.): AAAI-MAKE 2021 Combining Machine Learning and Knowledge Engineering, pp. 1–13, CEUR-WS, 2021.
@inproceedings{boomgaard-etal-2021-learning,
title = {Learning profile-based recommendations for medical search auto-complete},
author = {Guusje Boomgaard and Selene Baez Santamaría and Ilaria Tiddi and Robert Jan Sips and Zoltán Szávik},
editor = {Andreas Martin and Knut Hinkelmann and Hans-Georg Fill and Aurona Gerber and Doug Lenat and Reinhard Stolle and {van Harmelen}, Frank},
url = {http://ceur-ws.org/Vol-2846/paper34.pdf},
year = {2021},
date = {2021-04-10},
booktitle = {AAAI-MAKE 2021 Combining Machine Learning and Knowledge Engineering},
pages = {1–13},
publisher = {CEUR-WS},
series = {CEUR Workshop Proceedings},
abstract = {Query popularity is a main feature in web-search auto-completion. Several personalization features have been proposed to support specific users' searches, but often do not meet the privacy requirements of a medical environment (e.g. clinical trial search). Furthermore, in such specialized domains, the differences in user expertise and the domain-specific language users employ are far more widespread than in web-search. We propose a query auto-completion method based on different relevancy and diversity features, which can appropriately meet different user needs. Our method incorporates indirect popularity measures, along with graph topology and semantic features. An evolutionary algorithm optimizes relevance, diversity, and coverage to return a top-k list of query completions to the user. We evaluated our approach quantitatively and qualitatively using query log data from a clinical trial search engine, comparing the effects of different relevancy and diversity settings using domain experts. We found that syntax-based diversity has more impact on effectiveness and efficiency, graph-based diversity shows a more compact list of results, and relevancy the most effect on indicated preferences.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mehrotra S
Modelling Trust in Human-AI Interaction Proceedings Article
In: Proceedings of the 20th International Conference on Autonomous Agents and MultiAgent Systems, pp. 1826–1828, International Foundation for Autonomous Agents and Multiagent Systems, Virtual Event, United Kingdom, 2021, ISBN: 9781450383073.
@inproceedings{10.5555/3463952.3464253,
title = {Modelling Trust in Human-AI Interaction},
author = {Mehrotra, Siddharth},
url = {https://pure.tudelft.nl/ws/portalfiles/portal/95731744/p1826.pdf},
isbn = {9781450383073},
year = {2021},
date = {2021-01-01},
booktitle = {Proceedings of the 20th International Conference on Autonomous Agents and MultiAgent Systems},
pages = {1826–1828},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Virtual Event, United Kingdom},
series = {AAMAS '21},
abstract = {Trust is an important element of any interaction, but especially when we are interacting with a piece of technology which does not think like we do. Therefore, AI systems need to understand how humans trust them, and what to do to promote appropriate trust. The aim of this research is to study trust through both a formal and social lens. We will be working on formal models of trust, but with a focus on the social nature of trust in order to represent how humans trust AI. We will then employ methods from human-computer interaction research to study if these models work in practice, and what would eventually be necessary for systems to elicit appropriate levels of trust from their users. The context of this research will be AI agents which interact with their users to offer personal support.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mehrotra S; Jonker C M; Tielman M L
More Similar Values, More Trust? - The Effect of Value Similarity on Trust in Human-Agent Interaction Proceedings Article
In: Proceedings of the 2021 AAAI/ACM Conference on AI, Ethics, and Society, pp. 777–783, Association for Computing Machinery, Virtual Event, USA, 2021, ISBN: 9781450384735.
@inproceedings{10.1145/3461702.3462576,
title = {More Similar Values, More Trust? - The Effect of Value Similarity on Trust in Human-Agent Interaction},
author = {Mehrotra, Siddharth and Jonker, Catholijn M. and Tielman, Myrthe L.},
url = {https://doi.org/10.1145/3461702.3462576},
doi = {10.1145/3461702.3462576},
isbn = {9781450384735},
year = {2021},
date = {2021-01-01},
booktitle = {Proceedings of the 2021 AAAI/ACM Conference on AI, Ethics, and Society},
pages = {777–783},
publisher = {Association for Computing Machinery},
address = {Virtual Event, USA},
series = {AIES '21},
abstract = {As AI systems are increasingly involved in decision making, it also becomes important that they elicit appropriate levels of trust from their users. To achieve this, it is first important to understand which factors influence trust in AI. We identify that a research gap exists regarding the role of personal values in trust in AI. Therefore, this paper studies how human and agent Value Similarity (VS) influences a human's trust in that agent. To explore this, 89 participants teamed up with five different agents, which were designed with varying levels of value similarity to that of the participants. In a within-subjects, scenario-based experiment, agents gave suggestions on what to do when entering the building to save a hostage. We analyzed the agent's scores on subjective value similarity, trust and qualitative data from open-ended questions. Our results show that agents rated as having more similar values also scored higher on trust, indicating a positive effect between the two. With this result, we add to the existing understanding of human-agent trust by providing insight into the role of value-similarity.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jorge C C; Mehrotra S; Tielman M; Jonker C
Trust should correspond to trustworthiness: a formalization of appropriate, mutual trust in human-agent teams Proceedings Article
In: Proceedings of the 22nd International Workshop on Trust in Agent Societies, London, UK, 2021.
@inproceedings{jorge2021trust,
title = {Trust should correspond to trustworthiness: a formalization of appropriate, mutual trust in human-agent teams},
author = {Jorge, C Centeio and Mehrotra, Siddharth and Tielman, ML and Jonker, CM},
url = {https://ceur-ws.org/Vol-3022/paper4.pdf},
year = {2021},
date = {2021-01-01},
booktitle = {Proceedings of the 22nd International Workshop on Trust in Agent Societies},
address = {London, UK},
abstract = {In human-agent teams, how one teammate trusts another teammate should correspond to the latter’s actual trustworthiness, creating what we would call appropriate mutual trust. Although this sounds obvious, the notion of appropriate mutual trust for human-agent teamwork lacks a formal definition. In this article, we propose a formalization which represents trust as a belief about trustworthiness. Then, we address mutual trust, and pose that agents can use beliefs about trustworthiness to represent how they trust their human teammates, as well as to reason about how their human teammates trust them. This gives us a formalization with nested beliefs about beliefs of trustworthiness. Next, we highlight that mutual trust should also be appropriate, where we define appropriate trust in an agent as the trust which corresponds directly to that agent’s trustworthiness. Finally, we explore how agents can define their own trustworthiness, using the concepts of ability, benevolence and integrity. This formalization of appropriate mutual trust can form the base for developing agents which can promote such trust.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Steging C; Renooij S; Verheij B
Discovering the Rationale of Decisions: Towards a Method for Aligning Learning and Reasoning Proceedings Article
In: Proceedings of the Eighteenth International Conference on Artificial Intelligence and Law, pp. 235–239, Association for Computing Machinery, São Paulo, Brazil, 2021, ISBN: 9781450385268.
@inproceedings{StegingICAIL21,
title = {Discovering the Rationale of Decisions: Towards a Method for Aligning Learning and Reasoning},
author = {Steging, Cor and Renooij, Silja and Verheij, Bart},
url = {https://doi.org/10.1145/3462757.3466059},
doi = {10.1145/3462757.3466059},
isbn = {9781450385268},
year = {2021},
date = {2021-01-01},
booktitle = {Proceedings of the Eighteenth International Conference on Artificial Intelligence and Law},
pages = {235–239},
publisher = {Association for Computing Machinery},
address = {São Paulo, Brazil},
series = {ICAIL '21},
abstract = {In AI and law, systems that are designed for decision support should be explainable when pursuing justice. In order for these systems to be fair and responsible, they should make correct decisions and make them using a sound and transparent rationale. In this paper, we introduce a knowledge-driven method for model-agnostic rationale evaluation using dedicated test cases, similar to unit-testing in professional software development. We apply this new quantitative human-in-the-loop method in a machine learning experiment aimed at extracting known knowledge structures from artificial datasets from a real-life legal setting. We show that our method allows us to analyze the rationale of black box machine learning systems by assessing which rationale elements are learned or not. Furthermore, we show that the rationale can be adjusted using tailor-made training data based on the results of the rationale evaluation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
van Bekkum M; de Boer M; van Harmelen F; Meyer-Vitali A; ten Teije A
Modular design patterns for hybrid learning and reasoning systems Journal Article
In: Appl. Intell., vol. 51, no. 9, pp. 6528–6546, 2021.
@article{DBLP:journals/apin/BekkumBHMT21,
title = {Modular design patterns for hybrid learning and reasoning systems},
author = {Michael van Bekkum and Maaike de Boer and Frank van Harmelen and André Meyer{-}Vitali and Annette ten Teije},
url = {https://link.springer.com/article/10.1007/s10489-021-02394-3},
doi = {10.1007/s10489-021-02394-3},
year = {2021},
date = {2021-01-01},
journal = {Appl. Intell.},
volume = {51},
number = {9},
pages = {6528–6546},
abstract = {The unification of statistical (data-driven) and symbolic (knowledge-driven) methods is widely recognised as one of the key challenges of modern AI. Recent years have seen large number of publications on such hybrid neuro-symbolic AI systems. That rapidly growing literature is highly diverse and mostly empirical, and is lacking a unifying view of the large variety of these hybrid systems. In this paper we analyse a large body of recent literature and we propose a set of modular design patterns for such hybrid, neuro-symbolic systems. We are able to describe the architecture of a very large number of hybrid systems by composing only a small set of elementary patterns as building blocks. The main contributions of this paper are: 1) a taxonomically organised vocabulary to describe both processes and data structures used in hybrid systems; 2) a set of 15+ design patterns for hybrid AI systems, organised in a set of elementary patterns and a set of compositional patterns; 3) an application of these design patterns in two realistic use-cases for hybrid AI systems. Our patterns reveal similarities between systems that were not recognised until now. Finally, our design patterns extend and refine Kautz' earlier attempt at categorising neuro-symbolic architectures.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kuzina A; Welling M; Tomczak J M
Diagnosing Vulnerability of Variational Auto-Encoders to Adversarial Attacks Proceedings Article
In: ICLR 2021 Workshop on Robust and Reliable Machine Learning in the Real World, 2021.
@inproceedings{kuzina2021diagnosing,
title = {Diagnosing Vulnerability of Variational Auto-Encoders to Adversarial Attacks},
author = {Kuzina, Anna and Welling, Max and Tomczak, Jakub M},
url = {https://arxiv.org/pdf/2103.06701.pdf},
year = {2021},
date = {2021-01-01},
booktitle = {ICLR 2021 Workshop on Robust and Reliable Machine Learning in the Real World},
abstract = {In this work, we explore adversarial attacks on the Variational Autoencoders (VAE). We show how to modify data point to obtain a prescribed latent code (supervised attack) or just get a drastically different code (unsupervised attack). We examine the influence of model modifications (β-VAE, NVAE) on the robustness of VAEs and suggest metrics to quantify it.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zheng H; Verheij B
Rules, cases and arguments in artificial intelligence and law Book Section
In: Vogl, R (Ed.): Research Handbook on Big Data Law, pp. 373–387, Edgar Elgar Publishing, 2021.
@incollection{Zheng:2021,
title = {Rules, cases and arguments in artificial intelligence and law},
author = {H. Zheng and B. Verheij},
editor = {R Vogl},
url = {https://www.ai.rug.nl/~verheij/publications/handbook2021.htm},
year = {2021},
date = {2021-01-01},
booktitle = {Research Handbook on Big Data Law},
pages = {373–387},
publisher = {Edgar Elgar Publishing},
abstract = {Artificial intelligence and law is an interdisciplinary field of research that dates back at least to the 1970s, with academic conferences starting in the 1980s. In the field, complex problems are addressed about the computational modeling and automated support of legal reasoning and argumentation. Scholars have different backgrounds, and progress is driven by insights from lawyers, judges, computer scientists, philosophers and others. The community investigates and develops artificial intelligence techniques applicable in the legal domain, in order to enhance access to law for citizens and to support the efficiency and quality of work in the legal domain, aiming to promote a just society. Integral to the legal domain, legal reasoning and its structure and process have gained much attention in AI & Law research. Such research is today especially relevant, since in these days of big data and widespread use of algorithms, there is a need in AI to connect knowledge-based and data-driven AI techniques in order to arrive at a social, explainable and responsible AI. By considering knowledge in the form of rules and data in the form of cases connected by arguments, the field of AI & Law contributes relevant representations and algorithms for handling a combination of knowledge and data. In this chapter, as an entry point into the literature on AI & Law, three major styles of modeling legal reasoning are studied: rule-based reasoning, case-based reasoning and argument-based reasoning, which are the focus of this chapter. We describe selected key ideas, leaving out formal detail. As we will see, these styles of modeling legal reasoning are related, and there is much research investigating relations. We use the example domain of Dutch tort law (Section 2) to illustrate these three major styles, which are then more fully explained (Sections 3 to 5)},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Kurtan A C; Yolum P
Assisting humans in privacy management: an agent-based approach Journal Article
In: Autonomous Agents and Multi-Agent Systems, vol. 35, no. 7, 2021.
@article{kurtan-yolum-21,
title = {Assisting humans in privacy management: an agent-based approach},
author = {A. Can Kurtan and P{ı}nar Yolum},
url = {https://link.springer.com/article/10.1007/s10458-020-09488-1},
doi = {https://doi.org/10.1007/s10458-020-09488-1},
year = {2021},
date = {2021-01-01},
journal = {Autonomous Agents and Multi-Agent Systems},
volume = {35},
number = {7},
abstract = {Image sharing is a service offered by many online social networks. In order to preserve privacy of images, users need to think through and specify a privacy setting for each image that they upload. This is difficult for two main reasons: first, research shows that many times users do not know their own privacy preferences, but only become aware of them over time. Second, even when users know their privacy preferences, editing these privacy settings is cumbersome and requires too much effort, interfering with the quick sharing behavior expected on an online social network. Accordingly, this paper proposes a privacy recommendation model for images using tags and an agent that implements this, namely pelte. Each user agent makes use of the privacy settings that its user have set for previous images to predict automatically the privacy setting for an image that is uploaded to be shared. When in doubt, the agent analyzes the sharing behavior of other users in the user's network to be able to recommend to its user about what should be considered as private. Contrary to existing approaches that assume all the images are available to a centralized model, pelte is compatible to distributed environments since each agent accesses only the privacy settings of the images that the agent owner has shared or those that have been shared with the user. Our simulations on a real-life dataset shows that pelte can accurately predict privacy settings even when a user has shared a few images with others, the images have only a few tags or the user's friends have varying privacy preferences.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Liscio E; van der Meer M; Jonker C M; Murukannaiah P K
A Collaborative Platform for Identifying Context-Specific Values Proceedings Article
In: Proceedings of the 20th International Conference on Autonomous Agents and Multiagent Systems, pp. 1773–1775, IFAAMAS, Online, 2021.
@inproceedings{Liscio2021a,
title = {A Collaborative Platform for Identifying Context-Specific Values},
author = {Liscio, Enrico and van der Meer, Michiel and Jonker, Catholijn M. and Murukannaiah, Pradeep K.},
url = {https://www.ifaamas.org/Proceedings/aamas2021/pdfs/p1773.pdf},
year = {2021},
date = {2021-01-01},
booktitle = {Proceedings of the 20th International Conference on Autonomous Agents and Multiagent Systems},
pages = {1773–1775},
publisher = {IFAAMAS},
address = {Online},
series = {AAMAS '21},
abstract = {Value alignment is a crucial aspect of ethical multiagent systems. An important step toward value alignment is identifying values specific to an application context. However, identifying context-specific values is complex and cognitively demanding. To support this process, we develop a methodology and a collaborative web platform that employs AI techniques. We describe this platform, highlighting its intuitive design and implementation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liscio E; van der Meer M; Siebert L C; Jonker C M; Mouter N; Murukannaiah P K
Axies: Identifying and Evaluating Context-Specific Values Proceedings Article
In: Proc. of the 20th International Conference on Autonomous Agents and Multiagent Systems (AAMAS 2021), pp. 799–808, IFAAMAS, Online, 2021.
@inproceedings{Liscio2021b,
title = {Axies: Identifying and Evaluating Context-Specific Values},
author = {Liscio, Enrico and van der Meer, Michiel and Siebert, Luciano C. and Jonker, Catholijn M. and Mouter, Niek and Murukannaiah, Pradeep K.},
url = {https://ii.tudelft.nl/~pradeep/doc/Liscio-2021-AAMAS-Axies.pdf},
year = {2021},
date = {2021-01-01},
booktitle = {Proc. of the 20th International Conference on Autonomous Agents and Multiagent Systems (AAMAS 2021)},
pages = {799–808},
publisher = {IFAAMAS},
address = {Online},
abstract = {The pursuit of values drives human behavior and promotes cooperation. Existing research is focused on general (e.g., Schwartz) values that transcend contexts. However, context-specific values are necessary to (1) understand human decisions, and (2) engineer intelligent agents that can elicit human values and take value-aligned actions. We propose Axies, a hybrid (human and AI) methodology to identify context-specific values. Axies simplifies the abstract task of value identification as a guided value annotation process involving human annotators. Axies exploits the growing availability of value-laden text corpora and Natural Language Processing to assist the annotators in systematically identifying context-specific values. We evaluate Axies in a user study involving 60 subjects. In our study, six annotators generate value lists for two timely and important contexts: Covid-19 measures, and sustainable Energy. Then, two policy experts and 52 crowd workers evaluate Axies value lists. We find that Axies yields values that are context-specific, consistent across different annotators, and comprehensible to end users},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Manggala P; Hoos H H; Nalisnick E
Bayesian Regression from Multiple Sources of Weak Supervision Proceedings Article
In: ICML 2021 Machine Learning for Data: Automated Creation, Privacy, Bias, 2021.
@inproceedings{manggala2021bayesianregression,
title = {Bayesian Regression from Multiple Sources of Weak Supervision},
author = {Manggala, Putra and Hoos, Holger H. and Nalisnick, Eric},
url = {https://pmangg.github.io/papers/brfmsows_mhn_ml4data_icml.pdf},
year = {2021},
date = {2021-01-01},
booktitle = {ICML 2021 Machine Learning for Data: Automated Creation, Privacy, Bias},
abstract = {We describe a Bayesian approach to weakly supervised regression. Our proposed framework propagates uncertainty from the weak supervision to an aggregated predictive distribution. We use a generalized Bayes procedure to account for the supervision being weak and therefore likely misspecified.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Steging C; Renooij S; Verheij B
Discovering the Rationale of Decisions: Experiments on Aligning Learning and Reasoning Proceedings Article
In: 4th EXplainable AI in Law Workshop (XAILA 2021), pp. 235–239, ACM, 2021.
@inproceedings{StegingXAILA21,
title = {Discovering the Rationale of Decisions: Experiments on
Aligning Learning and Reasoning},
author = {Cor Steging and Silja Renooij and Bart Verheij},
url = {https://arxiv.org/abs/2105.06758},
year = {2021},
date = {2021-01-01},
booktitle = {4th EXplainable AI in Law Workshop (XAILA 2021)},
pages = {235–239},
publisher = {ACM},
abstract = {In AI and law, systems that are designed for decision support should be explainable when pursuing justice. In order for these systems to be fair and responsible, they should make correct decisions and make them using a sound and transparent rationale. In this paper, we introduce a knowledge-driven method for model-agnostic rationale evaluation using dedicated test cases, similar to unit-testing in professional software development. We apply this new method in a set of machine learning experiments aimed at extracting known knowledge structures from artificial datasets from fictional and non-fictional legal settings. We show that our method allows us to analyze the rationale of black-box machine learning systems by assessing which rationale elements are learned or not. Furthermore, we show that the rationale can be adjusted using tailor-made training data based on the results of the rationale evaluation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Vessies M B; Vadgama S P; van de Leur R R; Doevendans P A F M; Hassink R J; Bekkers E; van Es R
Interpretable ECG classification via a query-based latent space traversal (qLST) Journal Article
In: CoRR, vol. abs/2111.07386, 2021.
@article{DBLP:journals/corr/abs-2111-07386,
title = {Interpretable ECG classification via a query-based latent space traversal (qLST)},
author = {Melle B. Vessies and Sharvaree P. Vadgama and Rutger R. van de Leur and Pieter A. F. M. Doevendans and Rutger J. Hassink and Erik Bekkers and René van Es},
url = {https://arxiv.org/abs/2111.07386},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2111.07386},
abstract = {Electrocardiography (ECG) is an effective and non-invasive diagnostic tool that measures the electrical activity of the heart. Interpretation of ECG signals to detect various abnormalities is a challenging task that requires expertise. Recently, the use of deep neural networks for ECG classification to aid medical practitioners has become popular, but their black box nature hampers clinical implementation. Several saliency-based interpretability techniques have been proposed, but they only indicate the location of important features and not the actual features. We present a novel interpretability technique called qLST, a query-based latent space traversal technique that is able to provide explanations for any ECG classification model. With qLST, we train a neural network that learns to traverse in the latent space of a variational autoencoder trained on a large university hospital dataset with over 800,000 ECGs annotated for 28 diseases. We demonstrate through experiments that we can explain different black box classifiers by generating ECGs through these traversals.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Dudzik B; Columbus S; Hrkalovic T M; Balliet D; Hung H
Recognizing Perceived Interdependence in Face-to-Face Negotiations through Multimodal Analysis of Nonverbal Behavior Book Chapter
In: Proceedings of the 2021 International Conference on Multimodal Interaction, pp. 121–130, Association for Computing Machinery, New York, NY, USA, 2021, ISBN: 9781450384810.
@inbook{10.1145/3462244.3479935,
title = {Recognizing Perceived Interdependence in Face-to-Face Negotiations through Multimodal Analysis of Nonverbal Behavior},
author = {Dudzik, Bernd and Columbus, Simon and Hrkalovic, Tiffany Matej and Balliet, Daniel and Hung, Hayley},
url = {https://research.tudelft.nl/en/publications/recognizing-perceived-interdependence-in-face-to-face-negotiation},
doi = {10.1145/3462244.3479935},
isbn = {9781450384810},
year = {2021},
date = {2021-01-01},
booktitle = {Proceedings of the 2021 International Conference on Multimodal Interaction},
pages = {121–130},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {Enabling computer-based applications to display intelligent behavior in complex social settings requires them to relate to important aspects of how humans experience and understand such situations. One crucial driver of peoples' social behavior during an interaction is the interdependence they perceive, i.e., how the outcome of an interaction is determined by their own and others' actions. According to psychological studies, both the nonverbal behavior displayed by Motivated by this, we present a series of experiments to automatically recognize interdependence perceptions in dyadic face-to-face negotiations using these sources. Concretely, our approach draws on a combination of features describing individuals' Facial, Upper Body, and Vocal Behavior with state-of-the-art algorithms for multivariate time series classification. Our findings demonstrate that differences in some types of interdependence perceptions can be detected through the automatic analysis of nonverbal behaviors. We discuss implications for developing socially intelligent systems and opportunities for future research.},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}
Steging C; Renooij S; Verheij B
Rationale Discovery and Explainable AI Proceedings Article
In: Erich, Schweighofer (Ed.): Legal Knowledge and Information Systems - JURIX 2021: The Thirty-fourth Annual Conference, Vilnius, Lithuania, 8-10 December 2021, pp. 225–234, IOS Press, 2021.
@inproceedings{DBLP:conf/jurix/StegingRV21,
title = {Rationale Discovery and Explainable AI},
author = {Cor Steging and Silja Renooij and Bart Verheij},
editor = {Schweighofer Erich},
url = {https://doi.org/10.3233/FAIA210341},
doi = {10.3233/FAIA210341},
year = {2021},
date = {2021-01-01},
booktitle = {Legal Knowledge and Information Systems - JURIX 2021: The Thirty-fourth Annual Conference, Vilnius, Lithuania, 8-10 December 2021},
volume = {346},
pages = {225–234},
publisher = {IOS Press},
series = {Frontiers in Artificial Intelligence and Applications},
abstract = {The justification of an algorithm's outcomes is important in many domains, and in particular in the law. However, previous research has shown that machine learning systems can make the right decisions for the wrong reasons: despite high accuracies, not all of the conditions that define the domain of the training data are learned. In this study, we investigate what the system does learn, using state-of-the-art explainable AI techniques. With the use of SHAP and LIME, we are able to show which features impact the decision making process and how the impact changes with different distributions of the training data. However, our results also show that even high accuracy and good relevant feature detection are no guarantee for a sound rationale. Hence these state-of-the-art explainable AI techniques cannot be used to fully expose unsound rationales, further advocating the need for a separate method for rationale evaluation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ho L
Knowledge Representation Formalisms for Hybrid Intelligence Conference
2021, (18th International Conference on Principles of Knowledge Representation and Reasoning, KR 2021 ; Conference date: 03-11-2021 Through 12-11-2021).
@conference{dbdf453dfbad49d981cb01a6964a056a,
title = {Knowledge Representation Formalisms for Hybrid Intelligence},
author = {Loan Ho},
url = {https://research.vu.nl/en/publications/knowledge-representation-formalisms-for-hybrid-intelligence},
year = {2021},
date = {2021-01-01},
pages = {22–25},
abstract = {Knowledge graphs can play an important role to store and provide access to global knowledge, common and accessible to both human and artificial agents, and store local knowledge of individual agents in a larger network of agents. Studying suitable formalisms to model complex, conflicting, dynamic and contextualised knowledge is still a big challenge. Therefore, we investigate the usage of knowledge representation formalisms that allows artificial intelligence systems to adapt and work with complex, conflicting, dynamic and contextualized knowledge.},
note = {18th International Conference on Principles of Knowledge Representation and Reasoning, KR 2021 ; Conference date: 03-11-2021 Through 12-11-2021},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Krause L; Vossen P
When to explain: Identifying explanation triggers in human-agent interaction Proceedings Article
In: 2nd Workshop on Interactive Natural Language Technology for Explainable Artificial Intelligence, pp. 55–60, Association for Computational Linguistics, Dublin, Ireland, 2020.
@inproceedings{krause-vossen-2020-explain,
title = {When to explain: Identifying explanation triggers in human-agent interaction},
author = {Krause, Lea and Vossen, Piek},
url = {https://www.aclweb.org/anthology/2020.nl4xai-1.12},
year = {2020},
date = {2020-11-01},
booktitle = {2nd Workshop on Interactive Natural Language Technology for Explainable Artificial Intelligence},
pages = {55–60},
publisher = {Association for Computational Linguistics},
address = {Dublin, Ireland},
abstract = {With more agents deployed than ever, users need to be able to interact and cooperate with them in an effective and comfortable manner. Explanations have been shown to increase the understanding and trust of a user in human-agent interaction. There have been numerous studies investigating this effect, but they rely on the user explicitly requesting an explanation. We propose a first overview of when an explanation should be triggered and show that there are many instances that would be missed if the agent solely relies on direct questions. For this, we differentiate between direct triggers such as commands or questions and introduce indirect triggers like confusion or uncertainty detection.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Akata Z; Balliet D; de Rijke M; Dignum F; Dignum V; Eiben G; Fokkens A; Grossi D; Hindriks K; Hoos H; Hung H; Jonker C; Monz C; Neerincx M; Oliehoek F; Prakken H; Schlobach S; van der Gaag L; van Harmelen F; van Hoof H; van Riemsdijk B; van Wynsberghe A; Verbrugge R; Verheij B; Vossen P; Welling M
In: IEEE Computer, vol. 53, no. 08, pp. 18–28, 2020, ISSN: 1558-0814.
@article{9153877,
title = {A Research Agenda for Hybrid Intelligence: Augmenting Human Intellect With Collaborative, Adaptive, Responsible, and Explainable Artificial Intelligence},
author = {Zeyneb Akata and Dan Balliet and Maarten de Rijke and Frank Dignum and Virginia Dignum and Guszti Eiben and Anstke Fokkens and Davide Grossi and Koen Hindriks and Holger Hoos and Hayley Hung and Catholijn Jonker and Christof Monz and Mark Neerincx and Frans Oliehoek and Henri Prakken and Stefan Schlobach and Linda van der Gaag and Frank van Harmelen and Herke van Hoof and Birna van Riemsdijk and Aimee van Wynsberghe and Rineke Verbrugge and Bart Verheij and Piek Vossen and Max Welling},
url = {http://www.cs.vu.nl/~frankh/postscript/IEEEComputer2020.pdf},
doi = {10.1109/MC.2020.2996587},
issn = {1558-0814},
year = {2020},
date = {2020-08-01},
journal = {IEEE Computer},
volume = {53},
number = {08},
pages = {18–28},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract = {We define hybrid intelligence (HI) as the combination of human and machine intelligence, augmenting human intellect and capabilities instead of replacing them and achieving goals that were unreachable by either humans or machines. HI is an important new research focus for artificial intelligence, and we set a research agenda for HI by formulating four challenges.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Renting B M; Hoos H H; Jonker C M
Automated Configuration of Negotiation Strategies Proceedings Article
In: Proceedings of the 19th International Conference on Autonomous Agents and MultiAgent Systems, pp. 1116–1124, International Foundation for Autonomous Agents and Multiagent Systems, 2020, ISBN: 978-1-4503-7518-4.
@inproceedings{Renting2020AutomatedStrategies,
title = {Automated Configuration of Negotiation Strategies},
author = {Renting, Bram M. and Hoos, Holger H. and Jonker, Catholijn M.},
url = {https://ifaamas.org/Proceedings/aamas2020/pdfs/p1116.pdf},
isbn = {978-1-4503-7518-4},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the 19th International Conference on Autonomous Agents and MultiAgent Systems},
pages = {1116–1124},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
series = {AAMAS '20},
abstract = {Bidding and acceptance strategies have a substantial impact on the outcome of negotiations in scenarios with linear additive and nonlinear utility functions. Over the years, it has become clear that there is no single best strategy for all negotiation settings, yet many fixed strategies are still being developed. We envision a shift in the strategy design question from: What is a good strategy?, towards: What could be a good strategy? For this purpose, we developed a method leveraging automated algorithm configuration to find the best strategies for a specific set of negotiation settings. By empowering automated negotiating agents using automated algorithm configuration, we obtain a flexible negotiation agent that can be configured automatically for a rich space of opponents and negotiation scenarios. To critically assess our approach, the agent was tested in an ANAC-like bilateral automated negotiation tournament setting against past competitors. We show that our automatically configured agent outperforms all other agents, with a 5.1 percent increase in negotiation payoff compared to the next-best agent. We note that without our agent in the tournament, the top-ranked agent wins by a margin of only 0.01 percent .},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Dvorák W; Zafarghandi A K; Woltran S
Expressiveness of SETAFs and Support-Free ADFs under 3-valued Semantics Journal Article
In: CoRR, vol. abs/2007.03581, 2020.
@article{DBLP:journals/corr/abs-2007-03581,
title = {Expressiveness of SETAFs and Support-Free ADFs under 3-valued Semantics},
author = {Wolfgang Dvorák and
Atefeh {Keshavarzi Zafarghandi} and
Stefan Woltran},
url = {https://arxiv.org/abs/2007.03581},
year = {2020},
date = {2020-01-01},
journal = {CoRR},
volume = {abs/2007.03581},
abstract = {Generalizing the attack structure in argumentation frameworks (AFs) has been studied in different ways. Most prominently, the binary attack relation of Dung frameworks has been extended to the notion of collective attacks. The resulting formalism is often termed SETAFs. Another approach is provided via abstract dialectical frameworks (ADFs), where acceptance conditions specify the relation between arguments; restricting these conditions naturally allows for so-called support-free ADFs. The aim of the paper is to shed light on the relation between these two different approaches. To this end, we investigate and compare the expressiveness of SETAFs and support-free ADFs under the lens of 3-valued semantics. Our results show that it is only the presence of unsatisfiable acceptance conditions in support-free ADFs that discriminate the two approaches.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Verheij B
Artificial intelligence as law Journal Article
In: Artif. Intell. Law, vol. 28, no. 2, pp. 181–206, 2020.
@article{Verheij20,
title = {Artificial intelligence as law},
author = {Bart Verheij},
url = {https://doi.org/10.1007/s10506-020-09266-0},
doi = {10.1007/s10506-020-09266-0},
year = {2020},
date = {2020-01-01},
journal = {Artif. Intell. Law},
volume = {28},
number = {2},
pages = {181–206},
abstract = {Information technology is so ubiquitous and AI’s progress so inspiring that also legal professionals experience its benefits and have high expectations. At the same time, the powers of AI have been rising so strongly that it is no longer obvious that AI applications (whether in the law or elsewhere) help promoting a good society; in fact they are sometimes harmful. Hence many argue that safeguards are needed for AI to be trustworthy, social, responsible, humane, ethical. In short: AI should be good for us. But how to establish proper safeguards for AI? One strong answer readily available is: consider the problems and solutions studied in AI & Law. AI & Law has worked on the design of social, explainable, responsible AI aligned with human values for decades already, AI & Law addresses the hardest problems across the breadth of AI (in reasoning, knowledge, learning and language), and AI & Law inspires new solutions (argumentation, schemes and norms, rules and cases, interpretation). It is argued that the study of AI as Law supports the development of an AI that is good for us, making AI & Law more relevant than ever.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kökciyan N; Yolum P
TURP: Managing Trust for Regulating Privacy in Internet of Things Journal Article
In: IEEE Internet Computing, vol. 24, no. 6, pp. 9–16, 2020.
@article{turp-ic-2020,
title = {TURP: Managing Trust for Regulating Privacy in Internet of Things},
author = {Kökciyan, Nadin and Yolum, P{ı}nar},
url = {https://webspace.science.uu.nl/~yolum001/papers/InternetComputing-20-TURP.pdf},
doi = {https://doi.org/10.1109/MIC.2020.3020006},
year = {2020},
date = {2020-01-01},
journal = {IEEE Internet Computing},
volume = {24},
number = {6},
pages = {9–16},
abstract = {Internet of Things [IoT] applications, such as smart home or ambient assisted livingsystems, promise useful services to end users. Most of these services rely heavily on sharingand aggregating information among devices; many times raising privacy concerns. Contrary totraditional systems, where privacy of each user is managed through well-defined policies, thescale, dynamism, and heterogeneity of the IoT systems make it impossible to specify privacypolicies for all possible situations. Alternatively, this paper argues that handling of privacy has tobe reasoned by the IoT devices, depending on the norms, context, as well as the trust amongentities. We present a technique, where an IoT device collects information from others, evaluatesthe trustworthiness of the information sources to decide the suitability of sharing informationwith others. We demonstrate the applicability of the technique over an IoT pilot study.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ulusoy O; Yolum P
Agents for Preserving Privacy: Learning and Decision Making Collaboratively Proceedings Article
In: Bassiliades, Nick; Chalkiadakis, Georgios; de Jonge, Dave (Ed.): Multi-Agent Systems and Agreement Technologies, pp. 116–131, Springer International Publishing, 2020, ISBN: 978-3-030-66412-1.
@inproceedings{ulusoy-yolum-20,
title = {Agents for Preserving Privacy: Learning and Decision Making Collaboratively},
author = {Ulusoy, Onuralp and Yolum, P{ı}nar},
editor = {Bassiliades, Nick and Chalkiadakis, Georgios and de Jonge, Dave},
url = {https://webspace.science.uu.nl/~yolum001/papers/ulusoy-yolum-20.pdf},
doi = {https://doi.org/10.1007/978-3-030-66412-1_8},
isbn = {978-3-030-66412-1},
year = {2020},
date = {2020-01-01},
booktitle = {Multi-Agent Systems and Agreement Technologies},
pages = {116–131},
publisher = {Springer International Publishing},
abstract = {Privacy is a right of individuals to keep personal information to themselves. Often online systems enable their users to select what information they would like to share with others and what information to keep private. When an information pertains only to a single individual, it is possible to preserve privacy by providing the right access options to the user. However, when an information pertains to multiple individuals, such as a picture of a group of friends or a collaboratively edited document, deciding how to share this information and with whom is challenging as individuals might have conflicting privacy constraints. Resolving this problem requires an automated mechanism that takes into account the relevant individuals' concerns to decide on the privacy configuration of information. Accordingly, this paper proposes an auction-based privacy mechanism to manage the privacy of users when information related to multiple individuals are at stake. We propose to have a software agent that acts on behalf of each user to enter privacy auctions, learn the subjective privacy valuations of the individuals over time, and to bid to respect their privacy. We show the workings of our proposed approach over multiagent simulations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Murukannaiah P K; Ajmeri N; Jonker C M; Singh M P
New Foundations of Ethical Multiagent Systems Proceedings Article
In: Proceedings of the 19th Conference on Autonomous Agents and MultiAgent Systems, pp. 1706–1710, Auckland, 2020.
@inproceedings{Murukannaiah-2020-AAMASBlueSky-EthicalMAS,
title = {New Foundations of Ethical Multiagent Systems},
author = {Pradeep K. Murukannaiah and Nirav Ajmeri and Catholijn M. Jonker and Munindar P. Singh},
url = {https://ii.tudelft.nl/~pradeep/doc/Murukannaiah-2020-AAMASBlueSky-EthicalMAS.pdf},
year = {2020},
date = {2020-01-01},
booktitle = {Proceedings of the 19th Conference on Autonomous Agents and MultiAgent Systems},
pages = {1706–1710},
address = {Auckland},
series = {AAMAS '20},
abstract = {Ethics is inherently a multiagent concern. However, research on AI ethics today is dominated by work on individual agents: (1) how an autonomous robot or car may harm or (differentially) benefit people in hypothetical situations (the so-called trolley problems) and (2) how a machine learning algorithm may produce biased decisions or recommendations. The societal framework is largely omitted. To develop new foundations for ethics in AI, we adopt a sociotechnical stance in which agents (as technical entities) help autonomous social entities or principals (people and organizations). This multiagent conception of a sociotechnical system (STS) captures how ethical concerns arise in the mutual interactions of multiple stakeholders. These foundations would enable us to realize ethical STSs that incorporate social and technical controls to respect stated ethical postures of the agents in the STSs. The envisioned foundations require new thinking, along two broad themes, on how to realize (1) an STS that reflects its stakeholders' values and (2) individual agents that function effectively in such an STS.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu D; Libbi C A; Javdani Rikhtehgar D
What Would You Like to Visit Next? – Using a Knowledge-Graph Driven Museum Guide in a Virtual Exhibition Proceedings Article
In: HHAI2022: Augmenting Human Intellect, pp. 275-277, IOS Press, Amsterdam, the Netherlands, 0000.
@inproceedings{Javdani2022,
title = {What Would You Like to Visit Next? – Using a Knowledge-Graph Driven Museum Guide in a Virtual Exhibition},
author = {Liu, Dou AND Libbi, Claudia Alessandra AND Javdani Rikhtehgar, Delaram},
url = {https://ebooks.iospress.nl/volumearticle/60883},
booktitle = {HHAI2022: Augmenting Human Intellect},
pages = {275-277},
publisher = {IOS Press},
address = {Amsterdam, the Netherlands},
series = {HHAI '22},
abstract = {Conversational agents have been recently incorporated into Virtual Heritage to provide more immersive and interactive user experience. However, existing chatbot guides lack the capacity to leverage the rich background knowledge graphs (KGs) to provide better interactions between visitors and cultural collections. In this paper, we present a KG driven conversational museum guide that answers visitor’s questions and recommend relevant art objects in a virtual exhibition, while modelling user interest to offer personalised information and guidance.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu D; Libbi C A; Javdani Rikhtehgar D
What Would You Like to Visit Next? – Using a Knowledge-Graph Driven Museum Guide in a Virtual Exhibition Proceedings Article
In: HHAI2022: Augmenting Human Intellect, pp. 275-277, IOS Press, Amsterdam, the Netherlands, 0000.
@inproceedings{Javdani2022b,
title = {What Would You Like to Visit Next? – Using a Knowledge-Graph Driven Museum Guide in a Virtual Exhibition},
author = {Liu, Dou AND Libbi, Claudia Alessandra AND Javdani Rikhtehgar, Delaram},
url = {https://ebooks.iospress.nl/volumearticle/60883},
booktitle = {HHAI2022: Augmenting Human Intellect},
pages = {275-277},
publisher = {IOS Press},
address = {Amsterdam, the Netherlands},
series = {HHAI '22},
abstract = {Conversational agents have been recently incorporated into Virtual Heritage to provide more immersive and interactive user experience. However, existing chatbot guides lack the capacity to leverage the rich background knowledge graphs (KGs) to provide better interactions between visitors and cultural collections. In this paper, we present a KG driven conversational museum guide that answers visitor’s questions and recommend relevant art objects in a virtual exhibition, while modelling user interest to offer personalised information and guidance.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu D; Libbi C A; Javdani Rikhtehgar D
What Would You Like to Visit Next? – Using a Knowledge-Graph Driven Museum Guide in a Virtual Exhibition Proceedings Article
In: HHAI2022: Augmenting Human Intellect, pp. 275-277, IOS Press, Amsterdam, the Netherlands, 0000.
@inproceedings{Javdani2022c,
title = {What Would You Like to Visit Next? – Using a Knowledge-Graph Driven Museum Guide in a Virtual Exhibition},
author = {Liu, Dou AND Libbi, Claudia Alessandra AND Javdani Rikhtehgar, Delaram},
url = {https://ebooks.iospress.nl/volumearticle/60883},
booktitle = {HHAI2022: Augmenting Human Intellect},
pages = {275-277},
publisher = {IOS Press},
address = {Amsterdam, the Netherlands},
series = {HHAI '22},
abstract = {Conversational agents have been recently incorporated into Virtual Heritage to provide more immersive and interactive user experience. However, existing chatbot guides lack the capacity to leverage the rich background knowledge graphs (KGs) to provide better interactions between visitors and cultural collections. In this paper, we present a KG driven conversational museum guide that answers visitor’s questions and recommend relevant art objects in a virtual exhibition, while modelling user interest to offer personalised information and guidance.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu D; Libbi C A; Javdani Rikhtehgar D
What Would You Like to Visit Next? – Using a Knowledge-Graph Driven Museum Guide in a Virtual Exhibition Proceedings Article
In: HHAI2022: Augmenting Human Intellect, pp. 275-277, IOS Press, Amsterdam, the Netherlands, 0000.
@inproceedings{Javdani2022d,
title = {What Would You Like to Visit Next? – Using a Knowledge-Graph Driven Museum Guide in a Virtual Exhibition},
author = {Liu, Dou AND Libbi, Claudia Alessandra AND Javdani Rikhtehgar, Delaram},
url = {https://ebooks.iospress.nl/volumearticle/60883},
booktitle = {HHAI2022: Augmenting Human Intellect},
pages = {275-277},
publisher = {IOS Press},
address = {Amsterdam, the Netherlands},
series = {HHAI '22},
abstract = {Conversational agents have been recently incorporated into Virtual Heritage to provide more immersive and interactive user experience. However, existing chatbot guides lack the capacity to leverage the rich background knowledge graphs (KGs) to provide better interactions between visitors and cultural collections. In this paper, we present a KG driven conversational museum guide that answers visitor’s questions and recommend relevant art objects in a virtual exhibition, while modelling user interest to offer personalised information and guidance.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu D; Libbi C A; Javdani Rikhtehgar D
What Would You Like to Visit Next? – Using a Knowledge-Graph Driven Museum Guide in a Virtual Exhibition Proceedings Article
In: HHAI2022: Augmenting Human Intellect, pp. 275-277, IOS Press, Amsterdam, the Netherlands, 0000.
@inproceedings{Javdani2022e,
title = {What Would You Like to Visit Next? – Using a Knowledge-Graph Driven Museum Guide in a Virtual Exhibition},
author = {Liu, Dou AND Libbi, Claudia Alessandra AND Javdani Rikhtehgar, Delaram},
url = {https://ebooks.iospress.nl/volumearticle/60883},
booktitle = {HHAI2022: Augmenting Human Intellect},
pages = {275-277},
publisher = {IOS Press},
address = {Amsterdam, the Netherlands},
series = {HHAI '22},
abstract = {Conversational agents have been recently incorporated into Virtual Heritage to provide more immersive and interactive user experience. However, existing chatbot guides lack the capacity to leverage the rich background knowledge graphs (KGs) to provide better interactions between visitors and cultural collections. In this paper, we present a KG driven conversational museum guide that answers visitor’s questions and recommend relevant art objects in a virtual exhibition, while modelling user interest to offer personalised information and guidance.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu D; Libbi C A; Javdani Rikhtehgar D
What Would You Like to Visit Next? – Using a Knowledge-Graph Driven Museum Guide in a Virtual Exhibition Proceedings Article
In: HHAI2022: Augmenting Human Intellect, pp. 275-277, IOS Press, Amsterdam, the Netherlands, 0000.
@inproceedings{Javdani2022f,
title = {What Would You Like to Visit Next? – Using a Knowledge-Graph Driven Museum Guide in a Virtual Exhibition},
author = {Liu, Dou AND Libbi, Claudia Alessandra AND Javdani Rikhtehgar, Delaram},
url = {https://ebooks.iospress.nl/volumearticle/60883},
booktitle = {HHAI2022: Augmenting Human Intellect},
pages = {275-277},
publisher = {IOS Press},
address = {Amsterdam, the Netherlands},
series = {HHAI '22},
abstract = {Conversational agents have been recently incorporated into Virtual Heritage to provide more immersive and interactive user experience. However, existing chatbot guides lack the capacity to leverage the rich background knowledge graphs (KGs) to provide better interactions between visitors and cultural collections. In this paper, we present a KG driven conversational museum guide that answers visitor’s questions and recommend relevant art objects in a virtual exhibition, while modelling user interest to offer personalised information and guidance.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu D; Libbi C A; Javdani Rikhtehgar D
What Would You Like to Visit Next? – Using a Knowledge-Graph Driven Museum Guide in a Virtual Exhibition Proceedings Article
In: HHAI2022: Augmenting Human Intellect, pp. 275-277, IOS Press, Amsterdam, the Netherlands, 0000.
@inproceedings{Javdani2022g,
title = {What Would You Like to Visit Next? – Using a Knowledge-Graph Driven Museum Guide in a Virtual Exhibition},
author = {Liu, Dou AND Libbi, Claudia Alessandra AND Javdani Rikhtehgar, Delaram},
url = {https://ebooks.iospress.nl/volumearticle/60883},
booktitle = {HHAI2022: Augmenting Human Intellect},
pages = {275-277},
publisher = {IOS Press},
address = {Amsterdam, the Netherlands},
series = {HHAI '22},
abstract = {Conversational agents have been recently incorporated into Virtual Heritage to provide more immersive and interactive user experience. However, existing chatbot guides lack the capacity to leverage the rich background knowledge graphs (KGs) to provide better interactions between visitors and cultural collections. In this paper, we present a KG driven conversational museum guide that answers visitor’s questions and recommend relevant art objects in a virtual exhibition, while modelling user interest to offer personalised information and guidance.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}