Sarvi F; Heuss M; Aliannejadi M; Schelter S; de Rijke M
Understanding and Mitigating the Effect of Outliers in Fair Ranking Proceedings Article
In: WSDM 2022: The Fifteenth International Conference on Web Search and Data Mining, ACM, 2022.
@inproceedings{sarvi-2022-understanding,
title = {Understanding and Mitigating the Effect of Outliers in Fair Ranking},
author = {Sarvi, Fatemeh and Heuss, Maria and Aliannejadi, Mohammad and Schelter, Sebastian and de Rijke, Maarten},
url = {https://arxiv.org/abs/2112.11251},
year = {2022},
date = {2022-02-01},
booktitle = {WSDM 2022: The Fifteenth International Conference on Web Search and Data Mining},
publisher = {ACM},
abstract = {Traditional ranking systems are expected to sort items in the order of their relevance and thereby maximize their utility. In fair ranking, utility is complemented with fairness as an optimization goal. Recent work on fair ranking focuses on developing algorithms to optimize for fairness, given position-based exposure. In contrast, we identify the potential of outliers in a ranking to influence exposure and thereby negatively impact fairness. An outlier in a list of items can alter the examination probabilities, which can lead to different distributions of attention, compared to position-based exposure. We formalize outlierness in a ranking, show that outliers are present in realistic datasets, and present the results of an eye-tracking study, showing that users scanning order and the exposure of items are influenced by the presence of outliers. We then introduce OMIT, a method for fair ranking in the presence of outliers. Given an outlier detection method, OMIT improves fair allocation of exposure by suppressing outliers in the top-k ranking. Using an academic search dataset, we show that outlierness optimization leads to a fairer policy that displays fewer outliers in the top-k, while maintaining a reasonable trade-off between fairness and utility.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ulusoy O; Yolum P
PANOLA: A Personal Assistant for Supporting Users in Preserving Privacy Journal Article
In: ACM Transactions on Internet Technology, vol. 22, no. 1, 2022.
@article{panola-2022,
title = {PANOLA: A Personal Assistant for Supporting Users in Preserving Privacy},
author = {Ulusoy, Onuralp and Yolum, Pinar},
url = {https://webspace.science.uu.nl/~yolum001/papers/panola-2022.pdf},
year = {2022},
date = {2022-02-01},
journal = {ACM Transactions on Internet Technology},
volume = {22},
number = {1},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {Privacy is the right of individuals to keep personal information to themselves. When individuals use online systems, they should be given the right to decide what information they would like to share and what to keep private. When a piece of information pertains only to a single individual, preserving privacy is possible by providing the right access options to the user. However, when a piece of information pertains to multiple individuals, such as a picture of a group of friends or a collaboratively edited document, deciding how to share this information and with whom is challenging. The problem becomes more difficult when the individuals who are affected by the information have different, possibly conflicting privacy constraints. Resolving this problem requires a mechanism that takes into account the relevant individuals’ concerns to decide on the privacy configuration of information. Because these decisions need to be made frequently (i.e., per each piece of shared content), the mechanism should be automated. This article presents a personal assistant to help end-users with managing the privacy of their content. When some content that belongs to multiple users is about to be shared, the personal assistants of the users employ an auction-based privacy mechanism to regulate the privacy of the content. To do so, each personal assistant learns the preferences of its user over time and produces bids accordingly. Our proposed personal assistant is capable of assisting users with different personas and thus ensures that people benefit from it as they need it. Our evaluations over multiagent simulations with online social network content show that our proposed personal assistant enables privacy-respecting content sharing.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Los M; Christoff Z; Grossi D
Proportional Budget Allocations: Towards a Systematization Proceedings Article
In: Raedt, Lud De (Ed.): Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence, IJCAI-22, pp. 398–404, International Joint Conferences on Artificial Intelligence Organization, 2022, (Main Track).
@inproceedings{los22proportional,
title = {Proportional Budget Allocations: Towards a Systematization},
author = {Los, Maaike and Christoff, Zoé and Grossi, Davide},
editor = {Lud De Raedt},
url = {https://doi.org/10.24963/ijcai.2022/57},
doi = {10.24963/ijcai.2022/57},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence, IJCAI-22},
pages = {398–404},
publisher = {International Joint Conferences on Artificial Intelligence Organization},
abstract = {We contribute to the programme of lifting proportionality axioms from the multi-winner voting setting to participatory budgeting. We define novel proportionality axioms for participatory budgeting and test them on known proportionality-driven rules such as Phragmén and Rule X. We investigate logical implications among old and new axioms and provide a systematic overview of proportionality criteria in participatory budgeting.},
note = {Main Track},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhang Y; Grossi D
Tracking Truth by Weighting Proxies in Liquid Democracy Proceedings Article
In: Faliszewski, Piotr; Mascardi, Viviana; Pelachaud, Catherine; Taylor, Matthew E. (Ed.): 21st International Conference on Autonomous Agents and Multiagent Systems, AAMAS 2022, Auckland, New Zealand, May 9-13, 2022, pp. 1482–1490, International Foundation for Autonomous Agents and Multiagent Systems (IFAAMAS), 2022.
@inproceedings{zhang22tracking,
title = {Tracking Truth by Weighting Proxies in Liquid Democracy},
author = {Yuzhe Zhang and Davide Grossi},
editor = {Piotr Faliszewski and Viviana Mascardi and Catherine Pelachaud and Matthew E. Taylor},
url = {https://www.ifaamas.org/Proceedings/aamas2022/pdfs/p1482.pdf},
doi = {10.5555/3535850.3536015},
year = {2022},
date = {2022-01-01},
booktitle = {21st International Conference on Autonomous Agents and Multiagent Systems, AAMAS 2022, Auckland, New Zealand, May 9-13, 2022},
pages = {1482–1490},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems (IFAAMAS)},
abstract = {We study wisdom-of-the-crowd effects in liquid democracy on net- works where agents are allowed to apportion parts of their voting weight to different proxies. We show that in this setting—unlike in the standard one where voting weight is delegated in full to only one proxy—it becomes possible to construct delegation struc- tures that optimize the truth-tracking ability of the group. Focusing on group accuracy we contrast this centralized solution with the setting in which agents are free to choose their weighted delega- tions by greedily trying to maximize their own individual accuracy. While equilibria with weighted delegations may be as bad as with standard delegations, they are never worse and may sometimes be better. To gain further insights into this model we experimentally study quantal response delegation strategies on random networks. We observe that weighted delegations can lead, under specific con- ditions, to higher group accuracy than simple majority voting},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Deja K; Kuzina A; Trzciński T; Tomczak J M
On Analyzing Generative and Denoising Capabilities of Diffusion-based Deep Generative Models Journal Article
In: 36th Conference on Neural Information Processing Systems (NeurIPS 2022), 2022.
@article{deja2022analyzing,
title = {On Analyzing Generative and Denoising Capabilities of Diffusion-based Deep Generative Models},
author = {Deja, Kamil and Kuzina, Anna and Trzci{ń}ski, Tomasz and Tomczak, Jakub M},
url = {https://arxiv.org/abs/2206.00070},
year = {2022},
date = {2022-01-01},
journal = {36th Conference on Neural Information Processing Systems (NeurIPS 2022)},
abstract = {Diffusion-based Deep Generative Models (DDGMs) offer state-of-the-art performance in generative modeling. Their main strength comes from their unique setup in which a model (the backward diffusion process) is trained to reverse the forward diffusion process, which gradually adds noise to the input signal. Although DDGMs are well studied, it is still unclear how the small amount of noise is transformed during the backward diffusion process. Here, we focus on analyzing this problem to gain more insight into the behavior of DDGMs and their denoising and generative capabilities. We observe a fluid transition point that changes the functionality of the backward diffusion process from generating a (corrupted) image from noise to denoising the corrupted image to the final sample. Based on this observation, we postulate to divide a DDGM into two parts: a denoiser and a generator. The denoiser could be parameterized by a denoising auto-encoder, while the generator is a diffusion-based model with its own set of parameters. We experimentally validate our proposition, showing its pros and cons.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kuzina A; Welling M; Tomczak J M
Alleviating Adversarial Attacks on Variational Autoencoders with MCMC Journal Article
In: 36th Conference on Neural Information Processing Systems (NeurIPS 2022), 2022.
@article{kuzina2022alleviating,
title = {Alleviating Adversarial Attacks on Variational Autoencoders with MCMC},
author = {Kuzina, Anna and Welling, Max and Tomczak, Jakub M},
url = {https://arxiv.org/abs/2203.09940},
year = {2022},
date = {2022-01-01},
journal = {36th Conference on Neural Information Processing Systems (NeurIPS 2022)},
abstract = {Variational autoencoders (VAEs) are latent variable models that can generate complex objects and provide meaningful latent representations. Moreover, they could be further used in downstream tasks such as classification. As previous work has shown, one can easily fool VAEs to produce unexpected latent representations and reconstructions for a visually slightly modified input. Here, we examine several objective functions for adversarial attack construction proposed previously and present a solution to alleviate the effect of these attacks. Our method utilizes the Markov Chain Monte Carlo (MCMC) technique in the inference step that we motivate with a theoretical analysis. Thus, we do not incorporate any extra costs during training, and the performance on non-attacked inputs is not decreased. We validate our approach on a variety of datasets (MNIST, Fashion MNIST, Color MNIST, CelebA) and VAE configurations (β -VAE, NVAE, β-TCVAE), and show that our approach consistently improves the model robustness to adversarial attacks.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Vadgama S; Tomczak J M; Bekkers E J
Kendall Shape-VAE : Learning Shapes in a Generative Framework Proceedings Article
In: NeurIPS 2022 Workshop on Symmetry and Geometry in Neural Representations, 2022.
@inproceedings{vadgama2022kendall,
title = {Kendall Shape-VAE : Learning Shapes in a Generative Framework},
author = {Sharvaree Vadgama and Jakub Mikolaj Tomczak and Erik J Bekkers},
url = {https://openreview.net/forum?id=nzh4N6kdl2G},
year = {2022},
date = {2022-01-01},
booktitle = {NeurIPS 2022 Workshop on Symmetry and Geometry in Neural Representations},
abstract = {Learning an interpretable representation of data without supervision is an important precursor for the development of artificial intelligence. In this work, we introduce textitKendall Shape-VAE, a novel Variational Autoencoder framework for learning shapes as it disentangles the latent space by compressing information to simpler geometric symbols. In textitKendall Shape-VAE, we modify the Hyperspherical Variational Autoencoder such that it results in an exactly rotationally equivariant network using the notion of landmarks in the Kendall shape space. We show the exact equivariance of the model through experiments on rotated MNIST.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liscio E; Dondera A E; Geadau A; Jonker C M; Murukannaiah P K
Cross-Domain Classification of Moral Values Proceedings Article
In: Findings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics, pp. 2727–2745, ACL, Seattle, WA, USA, 2022.
@inproceedings{Liscio2022a,
title = {Cross-Domain Classification of Moral Values},
author = {Liscio, Enrico and Dondera, Alin E. and Geadau, Andrei and Jonker, Catholijn M. and Murukannaiah, Pradeep K.},
url = {https://aclanthology.org/2022.findings-naacl.209.pdf},
year = {2022},
date = {2022-01-01},
booktitle = {Findings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics},
pages = {2727–2745},
publisher = {ACL},
address = {Seattle, WA, USA},
series = {NAACL '22},
abstract = {Moral values influence how we interpret and act upon the information we receive. Identifying human moral values is essential for artificially intelligent agents to co-exist with humans. Recent progress in natural language processing allows the identification of moral values in textual discourse. However, domain-specific moral rhetoric poses challenges for transferring knowledge from one domain to another.We provide the first extensive investigation on the effects of cross-domain classification of moral values from text. We compare a state-of-the-art deep learning model (BERT) in seven domains and four cross-domain settings. We show that a value classifier can generalize and transfer knowledge to novel domains, but it can introduce catastrophic forgetting. We also highlight the typical classification errors in cross-domain value classification and compare the model predictions to the annotators agreement. Our results provide insights to computer and social scientists that seek to identify moral rhetoric specific to a domain of discourse.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Siebert L C; Liscio E; Murukannaiah P K; Kaptein L; Spruit S L; van den Hoven J; Jonker C M
Estimating Value Preferences in a Hybrid Participatory System Proceedings Article
In: HHAI2022: Augmenting Human Intellect, pp. 114–127, IOS Press, Amsterdam, the Netherlands, 2022.
@inproceedings{Siebert2022,
title = {Estimating Value Preferences in a Hybrid Participatory System},
author = {Siebert, Luciano C. and Liscio, Enrico and Murukannaiah, Pradeep K. and Kaptein, Lionel and Spruit, Shannon L. and van den Hoven, Jeroen and Jonker, Catholijn M.},
url = {https://ebooks.iospress.nl/volumearticle/60861},
year = {2022},
date = {2022-01-01},
booktitle = {HHAI2022: Augmenting Human Intellect},
pages = {114–127},
publisher = {IOS Press},
address = {Amsterdam, the Netherlands},
series = {HHAI '22},
abstract = {We propose methods for an AI agent to estimate the value preferences of individuals in a hybrid participatory system, considering a setting where participants make choices and provide textual motivations for those choices. We focus on situations where there is a conflict between participants' choices and motivations, and operationalize the philosophical stance that “valuing is deliberatively consequential. That is, if a user's choice is based on a deliberation of value preferences, the value preferences can be observed in the motivation the user provides for the choice. Thus, we prioritize the value preferences estimated from motivations over the value preferences estimated from choices alone. We evaluate the proposed methods on a dataset of a large-scale survey on energy transition. The results show that explicitly addressing inconsistencies between choices and motivations improves the estimation of an individual's value preferences. The proposed methods can be integrated in a hybrid participatory system, where artificial agents ought to estimate humans' value preferences to pursue value alignment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
van der Meer M; Liscio E; Jonker C M; Plaat A; Vossen P; Murukannaiah P K
HyEnA: A Hybrid Method for Extracting Arguments from Opinions Proceedings Article
In: HHAI2022: Augmenting Human Intellect, pp. 17–31, IOS Press, Amsterdam, the Netherlands, 2022.
@inproceedings{vanderMeer2022,
title = {HyEnA: A Hybrid Method for Extracting Arguments from Opinions},
author = {van der Meer, Michiel and Liscio, Enrico and Jonker, Catholijn M. and Plaat, Aske and Vossen, Piek and Murukannaiah, Pradeep K.},
url = {https://ebooks.iospress.nl/volumearticle/60855},
year = {2022},
date = {2022-01-01},
booktitle = {HHAI2022: Augmenting Human Intellect},
pages = {17–31},
publisher = {IOS Press},
address = {Amsterdam, the Netherlands},
series = {HHAI '22},
abstract = {The key arguments underlying a large and noisy set of opinions help understand the opinions quickly and accurately. Fully automated methods can extract arguments but (1) require large labeled datasets and (2) work well for known viewpoints, but not for novel points of view. We propose HyEnA, a hybrid (human + AI) method for extracting arguments from opinionated texts, combining the speed of automated processing with the understanding and reasoning capabilities of humans. We evaluate HyEnA on three feedback corpora. We find that, on the one hand, HyEnA achieves higher coverage and precision than a state-of-the-art automated method, when compared on a common set of diverse opinions, justifying the need for human insight. On the other hand, HyEnA requires less human effort and does not compromise quality compared to (fully manual) expert analysis, demonstrating the benefit of combining human and machine intelligence.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Loftin R; Oliehoek F A
On the Impossibility of Learning to Cooperate with Adaptive Partner Strategies in Repeated Games Proceedings Article
In: International Conference on Machine Learning, pp. 14197–14209, PMLR 2022.
@inproceedings{loftin2022impossibility,
title = {On the Impossibility of Learning to Cooperate with Adaptive Partner Strategies in Repeated Games},
author = {Loftin, Robert and Oliehoek, Frans A},
url = {https://arxiv.org/abs/2206.10614},
year = {2022},
date = {2022-01-01},
booktitle = {International Conference on Machine Learning},
pages = {14197–14209},
organization = {PMLR},
abstract = {Learning to cooperate with other agents is challenging when those agents also possess the ability to adapt to our own behavior. Practical and theoretical approaches to learning in cooperative settings typically assume that other agents' behaviors are stationary, or else make very specific assumptions about other agents' learning processes. The goal of this work is to understand whether we can reliably learn to cooperate with other agents without such restrictive assumptions, which are unlikely to hold in real-world applications. Our main contribution is a set of impossibility results, which show that no learning algorithm can reliably learn to cooperate with all possible adaptive partners in a repeated matrix game, even if that partner is guaranteed to cooperate with some stationary strategy. Motivated by these results, we then discuss potential alternative assumptions which capture the idea that an adaptive partner will only adapt rationally to our behavior.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
van Rhenen J; Centeio Jorge C; Matej Hrkalovic T; Dudzik B
Effects of Social Behaviours in Online Video Games on Team Trust Proceedings Article
In: Extended Abstracts of the 2022 Annual Symposium on Computer-Human Interaction in Play, pp. 159–165, 2022.
@inproceedings{van2022effects,
title = {Effects of Social Behaviours in Online Video Games on Team Trust},
author = {van Rhenen, Jan-Willem and Centeio Jorge, Carolina and Matej Hrkalovic, Tiffany and Dudzik, Bernd},
url = {https://pure.tudelft.nl/admin/files/146989190/vanRhenen2021_author.pdf},
year = {2022},
date = {2022-01-01},
booktitle = {Extended Abstracts of the 2022 Annual Symposium on Computer-Human Interaction in Play},
pages = {159–165},
abstract = {In competitive multiplayer online video games, teamwork is of utmost importance, implying high levels of interdependence between the joint outcomes of players. When engaging in such interdependent interactions, humans rely on trust to facilitate coordination of their individual behaviours. However, online games often take place between teams of strangers, with individual members having little to no information about each other than what they observe throughout the interaction itself. A better understanding of the social behaviours that are used by players to form trust could not only facilitate richer gaming experiences, but could also lead to insights about team interactions. As such, this paper presents a first step towards understanding how and which types of in-game behaviour relate to trust formation. In particular, we investigate a)which in-game behaviour were relevant for trust formation (first part of the study) and b) how they relate to the reported player’s trust in their teammates (the second part of the study). The first part consisted of interviews with League of Legends players in order to create a taxonomy of in-game behaviours relevant for trust formation. As for the second part, we ran a small-scale pilot study where participants played the game and then answered a questionnaire to measure their trust in their teammates. Our preliminary results present a taxonomy of in-game behaviours which can be used to annotate the games regarding trust behaviours. Based on the pilot study, the list of behaviours could be extended as to improve the results. These findings can be used to research the role of trust formation in teamwork},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Matej Hrkalovic T
Designing Hybrid Intelligence Techniques for Facilitating Collaboration Informed by Social Science Proceedings Article
In: Proceedings of the 2022 International Conference on Multimodal Interaction, pp. 679–684, 2022.
@inproceedings{matej2022designing,
title = {Designing Hybrid Intelligence Techniques for Facilitating Collaboration Informed by Social Science},
author = {Matej Hrkalovic, Tiffany},
url = {https://research.vu.nl/en/publications/designing-hybrid-intelligence-techniques-for-facilitating-collabo},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the 2022 International Conference on Multimodal Interaction},
pages = {679–684},
abstract = {Designing (socially) intelligent systems for facilitating collaborations in human-human and human-AI teams will require them to have a basic understanding of principles underlying social decision- making. Partner selection - the ability to identify and select suitable partners for collaborative relationships - is one relevant component of social intelligence and an important ingredient for successful relationship management. In everyday life, decision to engage in joint undertakings are often based on impressions made during social interactions with potential partners. These impressions, and consequently, partner selection are informed by (non)-verbal behavioral cues. Despite its importance, research investigating how these impressions and partner selection decisions unfold in naturalistic settings seem to be lacking. Thus, in this paper, we present a project focused on understanding, predicting and modeling partner selection and understanding its relationship with human impressions in semi- naturalistic settings, such as social interactions, with the aim of informing future designing approaches of (hybrid) intelligence system that can understand, predict and aid in initiating and facilitating (current and future) collaborations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Alivanistos D; Santamaría S B; Cochez M; Kalo J; van Krieken E; Thanapalasingam T
Prompting as Probing: Using Language Models for Knowledge Base Construction Journal Article
In: 2022.
@article{alivanistos2022prompting,
title = {Prompting as Probing: Using Language Models for Knowledge Base Construction},
author = {Alivanistos, Dimitrios and Santamaría, Selene Báez and Cochez, Michael and Kalo, Jan-Christoph and van Krieken, Emile and Thanapalasingam, Thiviyan},
url = {https://ceur-ws.org/Vol-3274/paper2.pdf},
doi = {10.48550/arXiv.2208.11057},
year = {2022},
date = {2022-01-01},
booktitle = {LM-KBC’22: Knowledge Base Construction from Pre-trained Language Models, Challenge at ISWC 2022},
publisher = {CEUR-WS},
series = {CEUR Workshop Proceedings},
abstract = {Language Models (LMs) have proven to be useful in various downstream applications, such as summarisation, translation, question answering and text classification. LMs are becoming increasingly important tools in Artificial Intelligence, because of the vast quantity of information they can store. In this work, we present ProP (Prompting as Probing), which utilizes GPT-3, a large Language Model originally proposed by OpenAI in 2020, to perform the task of Knowledge Base Construction (KBC). ProP implements a multi-step approach that combines a variety of prompting techniques to achieve this. Our results show that manual prompt curation is essential, that the LM must be encouraged to give answer sets of variable lengths, in particular including empty answer sets, that true/false questions are a useful device to increase precision on suggestions generated by the LM, that the size of the LM is a crucial factor, and that a dictionary of entity aliases improves the LM score. Our evaluation study indicates that these proposed techniques can substantially enhance the quality of the final predictions: ProP won track 2 of the LM-KBC competition, outperforming the baseline by 36.4 percentage points. Our implementation is available on https://github.com/HEmile/iswc-challenge.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Martin A; Hinkelmann K; Fill H; Gerber A; Lenat D; Stolle R; van Harmelen F (Ed.)
CEUR-WS.org, vol. 3121, 2022.
@proceedings{AAAI-MAKE:2022,
title = {Proceedings of the AAAI 2022 Spring Symposium on Machine Learning and Knowledge Engineering for Hybrid Intelligence (AAAI-MAKE 2022), Stanford University, Palo Alto, California, USA, March 21-23, 2022},
editor = {Andreas Martin and Knut Hinkelmann and Hans{-}Georg Fill and Aurona Gerber and Doug Lenat and Reinhard Stolle and Frank van Harmelen},
url = {https://ceur-ws.org/Vol-3121},
year = {2022},
date = {2022-01-01},
volume = {3121},
publisher = {CEUR-WS.org},
series = {CEUR Workshop Proceedings},
abstract = {The AAAI 2022 Spring Symposium on Machine Learning and Knowledge Engineering for Hybrid Intelligence (AAAI-MAKE 2022) brought together researchers and practitioners of the two fields to reflect on advances in combining them, and to present the first results in creating hybrid intelligence with the two AI methods. AAAI-MAKE 2022 is the fourth consecutive edition of this symposium, which combines two prominent AI approaches, symbolic and sub-symbolic AI, as hybrid AI. The remarkable number of submissions again showed a huge demand for combined/hybrid AI approaches that address hybrid intelligence. These proceedings are a collection of papers that contribute to the symposium’s aim of combining machine learning and knowledge engineering, hybrid intelligence / intelligent systems, as well as hybrid AI and neuro-symbolic approaches/methods},
keywords = {},
pubstate = {published},
tppubtype = {proceedings}
}
Verma M; Acar E
Learning to Cooperate with Human Evaluative Feedback and Demonstrations Book Section
In: HHAI2022: Augmenting Human Intellect, pp. 46–59, IOS Press, 2022.
@incollection{verma2022learning,
title = {Learning to Cooperate with Human Evaluative Feedback and Demonstrations},
author = {Verma, Mehul and Acar, Erman},
url = {https://ebooks.iospress.nl/volumearticle/60857},
year = {2022},
date = {2022-01-01},
booktitle = {HHAI2022: Augmenting Human Intellect},
pages = {46–59},
publisher = {IOS Press},
abstract = {Cooperation is a widespread phenomenon in nature that has also been a cornerstone in the development of human intelligence. Understanding cooperation, therefore, on matters such as how it emerges, develops, or fails is an important avenue of research, not only in a human context, but also for the advancement of next generation artificial intelligence paradigms which are presumably human-compatible. With this motivation in mind, we study the emergence of cooperative behaviour between two independent deep reinforcement learning (RL) agents provided with human input in a novel game environment. In particular, we investigate whether evaluative human feedback (through interactive RL) and expert demonstration (through inverse RL) can help RL agents to learn to cooperate better. We report two main findings. Firstly, we find that the amount of feedback given has a positive impact on the accumulated reward obtained through cooperation. That is, agents trained with a limited amount of feedback outperform agents trained without any feedback, and the performance increases even further as more feedback is provided. Secondly, we find that expert demonstration also helps agents’ performance, although with more modest improvements compared to evaluative feedback. In conclusion, we present a novel game environment to better understand the emergence of cooperative behaviour and show that providing human feedback and demonstrations can accelerate this process},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Maas J
A Neo-Republican Critique of AI ethics Journal Article
In: Journal of Responsible Technology, vol. 9, pp. 100022, 2022, ISSN: 2666-6596.
@article{MAAS2022100022,
title = {A Neo-Republican Critique of AI ethics},
author = {Jonne Maas},
url = {https://www.sciencedirect.com/science/article/pii/S2666659621000159},
doi = {https://doi.org/10.1016/j.jrt.2021.100022},
issn = {2666-6596},
year = {2022},
date = {2022-01-01},
journal = {Journal of Responsible Technology},
volume = {9},
pages = {100022},
abstract = {The AI Ethics literature, aimed to responsibly develop AI systems, widely agrees on the fact that society is in dire need for effective accountability mechanisms with regards to AI systems. Particularly, machine learning (ML) systems cause reason for concern due to their opaque and self-learning characteristics. Nevertheless, what such accountability mechanisms should look like remains either largely unspecified (e.g., stakeholder input) or ineffective (e.g., ethical guidelines). In this paper, I argue that the difficulty to formulate and develop effective accountability mechanisms lies partly in the predominant focus on Mill's harm's principle, rooted in the conception of freedom as non-interference. A strong focus on harm overcasts other moral wrongs, such as potentially problematic power dynamics between those who shape the system and those affected by it. I propose that the neo-republican conception of freedom as non-domination provides a suitable framework to inform responsible ML development. Domination, understood by neo-republicans, is a moral wrong as it undermines the potential for human flourishing. In order to mitigate domination, neo-republicans plead for accountability mechanisms that minimize arbitrary relations of power. Neo-republicanism should hence inform responsible ML development as it provides substantive and concrete grounds when accountability mechanisms are effective (i.e. when they are non-dominating).},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Shortall R; Itten A; van der Meer M; Murukannaiah P; Jonker C
Reason against the machine? Future directions for mass online deliberation Journal Article
In: Frontiers in Political Science, vol. 4, 2022, ISSN: 2673-3145.
@article{10.3389/fpos.2022.946589,
title = {Reason against the machine? Future directions for mass online deliberation},
author = {Shortall, Ruth and Itten, Anatol and van der Meer, Michiel and Murukannaiah, Pradeep and Jonker, Catholijn},
url = {https://www.frontiersin.org/articles/10.3389/fpos.2022.946589},
doi = {10.3389/fpos.2022.946589},
issn = {2673-3145},
year = {2022},
date = {2022-01-01},
journal = {Frontiers in Political Science},
volume = {4},
abstract = {Designers of online deliberative platforms aim to counter the degrading quality of online debates. Support technologies such as machine learning and natural language processing open avenues for widening the circle of people involved in deliberation, moving from small groups to crowd scale. Numerous design features of large-scale online discussion systems allow larger numbers of people to discuss shared problems, enhance critical thinking, and formulate solutions. We review the transdisciplinary literature on the design of digital mass deliberation platforms and examine the commonly featured design aspects (e.g., argumentation support, automated facilitation, and gamification) that attempt to facilitate scaling up. We find that the literature is largely focused on developing technical fixes for scaling up deliberation, but may neglect the more nuanced requirements of high quality deliberation. Furthermore, current design research is carried out with a small, atypical segment of the world's population, and little research deals with how to facilitate and accommodate different genders or cultures in deliberation, counter pre-existing social inequalities, build motivation and self-efficacy in certain groups, or deal with differences in cognitive abilities and cultural or linguistic differences. We make design and process recommendations to correct this course and suggest avenues for future research.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rikhtehgar D J
Conversational access of large-scale knowledge graphs Journal Article
In: 2022.
@article{rikhtehgar2021conversational,
title = {Conversational access of large-scale knowledge graphs},
author = {Rikhtehgar, Delaram Javdani},
url = {https://ceur-ws.org/Vol-3165/paper5.pdf},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the doctoral Consortium at ISWC2022},
address = {China},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Onnes A
Monitoring AI Systems: A Problem Analysis, Framework and Outlook Proceedings Article
In: Schlobach, Stefan; Pérez-Ortiz, María; Tielman, Myrthe (Ed.): HHAI 2022: Augmenting Human Intellect, pp. 238–240, IOS press, 2022.
@inproceedings{Onnes2022,
title = {Monitoring AI Systems: A Problem Analysis, Framework and Outlook},
author = {Onnes, Annet},
editor = {Schlobach, Stefan and Pérez-Ortiz, María and Tielman, Myrthe},
url = {https://ebooks.iospress.nl/volumearticle/60870},
year = {2022},
date = {2022-01-01},
booktitle = {HHAI 2022: Augmenting Human Intellect},
volume = {354},
pages = {238–240},
publisher = {IOS press},
series = {Frontiers in Artificial Intelligence and Applications},
abstract = {Knowledge-based systems have been used to monitor machines and processes in the real world. In this paper we propose the use of knowledge-based systems to monitor other AI systems in operation. We motivate and provide a problem analysis of this novel setting and subsequently propose a framework that allows for structuring future research related to this setting. Several directions for further research are also discussed.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Onnes A
Monitoring AI systems: A Problem Analysis, Framework and Outlook Miscellaneous
2022.
@misc{onnes2022b,
title = {Monitoring AI systems: A Problem Analysis, Framework and Outlook},
author = {Onnes, Annet},
url = {https://arxiv.org/abs/2205.02562},
year = {2022},
date = {2022-01-01},
abstract = {Knowledge-based systems have been used to monitor machines and processes in the real world. In this paper we propose the use of knowledge-based systems to monitor other AI systems in operation. We motivate and provide a problem analysis of this novel setting and subsequently propose a framework that allows for structuring future research related to this setting. Several directions for further research are also discussed.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Baier T; Santamaria S B; Vossen P
A modular architecture for creating multimodal agents Proceedings Article
In: MMAI2022: Workshop on the representation, sharing and evaluation of multimodal agent interaction, Workshop at HHAI 2022, 2022.
@inproceedings{baier2022modular,
title = {A modular architecture for creating multimodal agents},
author = {Thomas Baier and Selene Baez Santamaria and Piek Vossen},
url = {https://arxiv.org/abs/2206.00636},
doi = {10.48550/arXiv.2206.00636},
year = {2022},
date = {2022-01-01},
booktitle = {MMAI2022: Workshop on the representation, sharing and evaluation of multimodal agent interaction, Workshop at HHAI 2022},
abstract = {The paper describes a flexible and modular platform to create multimodal interactive agents. The platform operates through an event-bus on which signals and interpretations are posted in a sequence in time. Different sensors and interpretation components can be integrated by defining their input and output as topics, which results in a logical workflow for further interpretations. We explain a broad range of components that have been developed so far and integrated into a range of interactive agents. We also explain how the actual interaction is recorded as multimodal data as well as in a so-called episodic Knowledge Graph. By analysing the recorded interaction, we can analyse and compare different agents and agent components.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Steging C; Renooij S; Verheij B
Discovering the Rationale of Decisions Proceedings Article
In: Schlobach, S.; Pérez-Ortiz, M.; Tielman, M. (Ed.): Proceedings of the First International Conference on Hybrid Human-Artificial Intelligence, pp. 255–257, IOS Press, Amsterdam, the Netherlands, 2022.
@inproceedings{steging2022discovering,
title = {Discovering the Rationale of Decisions},
author = {Steging, C. and Renooij, S. and Verheij, B.},
editor = {Schlobach, S. and Pérez-Ortiz,M. and Tielman, M.},
url = {https://ebooks.iospress.nl/volumearticle/60876},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the First International Conference on Hybrid Human-Artificial Intelligence},
volume = {354},
pages = {255–257},
publisher = {IOS Press},
address = {Amsterdam, the Netherlands},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Krause L; Sommerauer P; Vossen P
Towards More Informative List Verbalisations Proceedings Article
In: International Workshop on Knowledge Graph Summarization, 2022.
@inproceedings{Krause2022TowardsMI,
title = {Towards More Informative List Verbalisations},
author = {Lea Krause and Pia Sommerauer and P. Vossen},
url = {https://ceur-ws.org/Vol-3257/paper14.pdf},
year = {2022},
date = {2022-01-01},
booktitle = {International Workshop on Knowledge Graph Summarization},
abstract = {In this paper we propose the task of list verbalisation within a Knowledge Graph Question Answering system. Inspired by the Gricean Maxims of Quantity, Relation, and Manner we show a proof of concept ranking answer candidates through graph-based and language model-based measurements for on the one hand popularity and on the other hand a more pragmatically informed context. Our finding show that in our current set-up graph-based measures work best, while language model-based systems need further refinement and may benefit from approaches such as fine-tuning or prompting. We evaluate our approach with a user study and give insights into promising future directions of the task.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
de Droog S; Ligthart M
Opportunity map: Potential child-robot-relationship factors Miscellaneous
2022.
@misc{de_droog_opportunity_2022,
title = {Opportunity map: Potential child-robot-relationship factors},
author = {de Droog, Simone and Ligthart, Mike},
url = {https://www.researchgate.net/publication/366228485_Opportunity_map_Potential_child-robot-relationship_factors},
doi = {10.13140/RG.2.2.26355.60964},
year = {2022},
date = {2022-01-01},
publisher = {University of Applied Sciences Utrecht},
abstract = {Robots are increasingly being used in education. But children's motivation to interact with a robot often disappears once the novelty wears off. As a result, learning effects are short-lived and offer little added value for education. A new, more sustainable, motivation for children to keep interacting could be that they feel a meaningful relationship with a robot. However, little is known about how robots can stimulate relationship building. For this reason, several workshops with child-robot researchers and practitioners (i.e., robot software developers, robot vendors, school robot facilitators and trainers, pedagogical experts) were organized to identify factors (from academic literature and field experiences) that potentially stimulate relationship formation between children and robots. This opportunity map presents the most important insights to inspire researchers and practitioners in the educational field to further develop, test and implement these factors.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Ligthart M; Neerincx M; Hindriks K
Memory-Based Personalization for Fostering a Long-Term Child-Robot Relationship Proceedings Article
In: Sakamoto, Daisuke; Weiss, Astrid; Hiatt, Laura M.; Shiomi, Masahiro (Ed.): ACM/IEEE International Conference on Human-Robot Interaction, HRI 2022, Sapporo, Hokkaido, Japan, March 7 - 10, 2022, pp. 80–89, IEEE / ACM, 2022.
@inproceedings{DBLP:conf/hri/LigthartNH22,
title = {Memory-Based Personalization for Fostering a Long-Term Child-Robot Relationship},
author = {Mike Ligthart and Mark Neerincx and Koen Hindriks},
editor = {Daisuke Sakamoto and Astrid Weiss and Laura M. Hiatt and Masahiro Shiomi},
url = {https://repository.tudelft.nl/islandora/object/uuid%3A99db22f6-19cb-4d21-85c2-bc1250a9fa01},
doi = {10.1109/HRI53351.2022.9889446},
year = {2022},
date = {2022-01-01},
booktitle = {ACM/IEEE International Conference on Human-Robot Interaction, HRI 2022, Sapporo, Hokkaido, Japan, March 7 - 10, 2022},
pages = {80–89},
publisher = {IEEE / ACM},
abstract = {After the novelty effect wears off children need a new motivator to keep interacting with a social robot. Enabling children to build a relationship with the robot is the key for facilitating a sustainable long-term interaction. We designed a memory-based personalization strategy that safeguards the continuity between sessions and tailors the interaction to the child's needs and interests to foster the child-robot relationship. A longitudinal (five sessions in two months) user study (N = 46, 8-10 y.o) showed that the strategy kept children interested longer in the robot, fosters more closeness, elicits more positive social cues, and adds continuity between sessions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Elloumi L; Bossema M; de Droog S M; Smakman M; van Ginkel S; Ligthart M E U; Hoogland K; Hindriks K V; Ben Allouch S
Exploring Requirements and Opportunities for Social Robots in Primary Mathematics Education Proceedings Article
In: 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN), IEEE Press, 2022.
@inproceedings{elloumi_exploring_2022,
title = {Exploring Requirements and Opportunities for Social Robots in Primary Mathematics Education},
author = {Elloumi, Lamia and Bossema, Marianne and de Droog, Simone M. and Smakman, Matthijs and van Ginkel, Stan and Ligthart, Mike E.U. and Hoogland, Kees and Hindriks, Koen V. and Ben Allouch, Somaya},
url = {https://research.vu.nl/en/publications/exploring-requirements-and-opportunities-for-social-robots-in-pri},
doi = {10.1109/RO-MAN53752.2022.9900569},
year = {2022},
date = {2022-01-01},
booktitle = {31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN)},
publisher = {IEEE Press},
abstract = {Social robots have been introduced in different fields such as retail, health care and education. Primary education in the Netherlands (and elsewhere) recently faced new challenges because of the COVID-19 pandemic, lockdowns and quarantines including students falling behind and teachers burdened with high workloads. Together with two Dutch municipalities and nine primary schools we are exploring the long-term use of social robots to study how social robots might support teachers in primary education, with a focus on mathematics education. This paper presents an explorative study to define requirements for a social robot math tutor. Multiple focus groups were held with the two main stakeholders, namely teachers and students. During the focus groups the aim was 1) to understand the current situation of mathematics education in the upper primary school level, 2) to identify the problems that teachers and students encounter in mathematics education, and 3) to identify opportunities for deploying a social robot math tutor in primary education from the perspective of both the teachers and students. The results inform the development of social robots and opportunities for pedagogical methods used in math teaching, child-robot interaction and potential support for teachers in the classroom.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Raman C; Quiros J V; Tan S; Islam A; Gedik E; Hung H
ConfLab: A Data Collection Concept, Dataset, and Benchmark for Machine Analysis of Free-Standing Social Interactions in the Wild Proceedings Article
In: Thirty-sixth Conference on Neural Information (NeurIPS) Processing Systems Datasets and Benchmarks Track, 2022.
@inproceedings{raman2022conflab,
title = {ConfLab: A Data Collection Concept, Dataset, and Benchmark for Machine Analysis of Free-Standing Social Interactions in the Wild},
author = {Chirag Raman and Jose Vargas Quiros and Stephanie Tan and Ashraful Islam and Ekin Gedik and Hayley Hung},
url = {https://openreview.net/forum?id=CNJQKM5cV2o},
year = {2022},
date = {2022-01-01},
booktitle = {Thirty-sixth Conference on Neural Information (NeurIPS) Processing Systems Datasets and Benchmarks Track},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tsfasman M; Fenech K; Tarvirdians M; Lorincz A; Jonker C; Oertel C
Towards Creating a Conversational Memory for Long-Term Meeting Support: Predicting Memorable Moments in Multi-Party Conversations through Eye-Gaze Proceedings Article
In: Proceedings of the 2022 International Conference on Multimodal Interaction, pp. 94–104, Association for Computing Machinery, Bengaluru, India, 2022, ISBN: 9781450393904.
@inproceedings{10.1145/3536221.3556613,
title = {Towards Creating a Conversational Memory for Long-Term Meeting Support: Predicting Memorable Moments in Multi-Party Conversations through Eye-Gaze},
author = {Tsfasman, Maria and Fenech, Kristian and Tarvirdians, Morita and Lorincz, Andras and Jonker, Catholijn and Oertel, Catharine},
url = {https://doi.org/10.1145/3536221.3556613},
doi = {10.1145/3536221.3556613},
isbn = {9781450393904},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the 2022 International Conference on Multimodal Interaction},
pages = {94–104},
publisher = {Association for Computing Machinery},
address = {Bengaluru, India},
series = {ICMI '22},
abstract = {When working in a group, it is essential to understand each other’s viewpoints to increase group cohesion and meeting productivity. This can be challenging in teams: participants might be left misunderstood and the discussion could be going around in circles. To tackle this problem, previous research on group interactions has addressed topics such as dominance detection, group engagement, and group creativity. Conversational memory, however, remains a widely unexplored area in the field of multimodal analysis of group interaction. The ability to track what each participant or a group as a whole find memorable from each meeting would allow a system or agent to continuously optimise its strategy to help a team meet its goals. In the present paper, we therefore investigate what participants take away from each meeting and how it is reflected in group dynamics.As a first step toward such a system, we recorded a multimodal longitudinal meeting corpus (MEMO), which comprises a first-party annotation of what participants remember from a discussion and why they remember it. We investigated whether participants of group interactions encode what they remember non-verbally and whether we can use such non-verbal multimodal features to predict what groups are likely to remember automatically. We devise a coding scheme to cluster participants’ memorisation reasons into higher-level constructs. We find that low-level multimodal cues, such as gaze and speaker activity, can predict conversational memorability. We also find that non-verbal signals can indicate when a memorable moment starts and ends. We could predict four levels of conversational memorability with an average accuracy of 44 %. We also showed that reasons related to participants’ personal feelings and experiences are the most frequently mentioned grounds for remembering meeting segments.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Dudzik B; Küster D; St-Onge D; Putze F
The 4th Workshop on Modeling Socio-Emotional and Cognitive Processes from Multimodal Data In-the-Wild (MSECP-Wild) Proceedings Article
In: Proceedings of the 2022 International Conference on Multimodal Interaction, pp. 803–804, Association for Computing Machinery, Bengaluru, India, 2022, ISBN: 9781450393904.
@inproceedings{10.1145/3536221.3564029,
title = {The 4th Workshop on Modeling Socio-Emotional and Cognitive Processes from Multimodal Data In-the-Wild (MSECP-Wild)},
author = {Dudzik, Bernd and Küster, Dennis and St-Onge, David and Putze, Felix},
url = {https://pure.tudelft.nl/admin/files/140622118/3536221.3564029.pdf},
doi = {10.1145/3536221.3564029},
isbn = {9781450393904},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the 2022 International Conference on Multimodal Interaction},
pages = {803–804},
publisher = {Association for Computing Machinery},
address = {Bengaluru, India},
series = {ICMI '22},
abstract = {The ability to automatically infer relevant aspects of human users’ thoughts and feelings is crucial for technologies to adapt their behaviors in complex interactions intelligently (e.g., social robots or tutoring systems). Research on multimodal analysis has demonstrated the potential of technology to provide such estimates for a broad range of internal states and processes. However, constructing robust enough approaches for deployment in real-world applications remains an open problem. The MSECP-Wild workshop series serves as a multidisciplinary forum to present and discuss research addressing this challenge. This 4th iteration focuses on addressing varying contextual conditions (e.g., throughout an interaction or across different situations and environments) in intelligent systems as a crucial barrier for more valid real-world predictions and actions. Submissions to the workshop span efforts relevant to multimodal data collection and context-sensitive modeling. These works provide important impulses for discussions of the state-of-the-art and opportunities for future research on these subjects.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Dudzik B; Hung H
Exploring the Detection of Spontaneous Recollections during Video-Viewing In-the-Wild Using Facial Behavior Analysis Proceedings Article
In: Proceedings of the 2022 International Conference on Multimodal Interaction, pp. 236–246, Association for Computing Machinery, Bengaluru, India, 2022, ISBN: 9781450393904.
@inproceedings{10.1145/3536221.3556609,
title = {Exploring the Detection of Spontaneous Recollections during Video-Viewing In-the-Wild Using Facial Behavior Analysis},
author = {Dudzik, Bernd and Hung, Hayley},
url = {https://dl.acm.org/doi/10.1145/3536221.3556609},
doi = {10.1145/3536221.3556609},
isbn = {9781450393904},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the 2022 International Conference on Multimodal Interaction},
pages = {236–246},
publisher = {Association for Computing Machinery},
address = {Bengaluru, India},
series = {ICMI '22},
abstract = {Intelligent systems might benefit from automatically detecting when a stimulus has triggered a user’s recollection of personal memories, e.g., to identify that a piece of media content holds personal significance for them. While computational research has demonstrated the potential to identify related states based on facial behavior (e.g., mind-wandering), the automatic detection of spontaneous recollections specifically has not been investigated this far. Motivated by this, we present machine learning experiments exploring the feasibility of detecting whether a video clip has triggered personal memories in a viewer based on the analysis of their Head Rotation, Head Position, Eye Gaze, and Facial Expressions. Concretely, we introduce an approach for automatic detection and evaluate its potential for predictions using in-the-wild webcam recordings. Overall, our findings demonstrate the capacity for above chance detections in both settings, with substantially better performance for the video-independent variant. Beyond this, we investigate the role of person-specific recollection biases for predictions of our video-independent models and the importance of specific modalities of facial behavior. Finally, we discuss the implications of our findings for detecting recollections and user-modeling in adaptive systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
van Krieken E; Acar E; van Harmelen F
Analyzing differentiable fuzzy logic operators Journal Article
In: Artificial Intelligence, vol. 302, pp. 103602, 2022.
@article{van2022analyzing,
title = {Analyzing differentiable fuzzy logic operators},
author = {van Krieken, Emile and Acar, Erman and van Harmelen, Frank},
url = {https://research.vu.nl/ws/portalfiles/portal/146020254/2002.06100v2.pdf},
year = {2022},
date = {2022-01-01},
journal = {Artificial Intelligence},
volume = {302},
pages = {103602},
publisher = {Elsevier},
abstract = {The AI community is increasingly putting its attention towards combining symbolic and neural approaches, as it is often argued that the strengths and weaknesses of these approaches are complementary. One recent trend in the literature are weakly supervised learning techniques that employ operators from fuzzy logics. In particular, these use prior background knowledge described in such logics to help the training of a neural network from unlabeled and noisy data. By interpreting logical symbols using neural networks, this background knowledge can be added to regular loss functions, hence making reasoning a part of learning. We study, both formally and empirically, how a large collection of logical operators from the fuzzy logic literature behave in a differentiable learning setting. We find that many of these operators, including some of the most well-known, are highly unsuitable in this setting. A further finding concerns the treatment of implication in these fuzzy logics, and shows a strong imbalance between gradients driven by the antecedent and the consequent of the implication. Furthermore, we introduce a new family of fuzzy implications (called sigmoidal implications) to tackle this phenomenon. Finally, we empirically show that it is possible to use Differentiable Fuzzy Logics for semi-supervised learning, and compare how different operators behave in practice. We find that, to achieve the largest performance improvement over a supervised baseline, we have to resort to non-standard combinations of logical operators which perform well in learning, but no longer satisfy the usual logical laws.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Romero D W; Kuzina A; Bekkers E J; Tomczak J M; Hoogendoorn M
CKConv: Continuous Kernel Convolution For Sequential Data Journal Article
In: International Conference on Learning Representations (ICLR), 2022, 2022.
@article{DBLP:journals/corr/abs-2102-02611,
title = {CKConv: Continuous Kernel Convolution For Sequential Data},
author = {David W. Romero and Anna Kuzina and Erik J. Bekkers and Jakub M. Tomczak and Mark Hoogendoorn},
url = {https://openreview.net/pdf?id=8FhxBtXSl0},
year = {2022},
date = {2022-01-01},
journal = {International Conference on Learning Representations (ICLR), 2022},
abstract = {Conventional neural architectures for sequential data present important limitations. Recurrent networks suffer from exploding and vanishing gradients, small effective memory horizons, and must be trained sequentially. Convolutional networks are unable to handle sequences of unknown size and their memory horizon must be defined a priori. In this work, we show that all these problems can be solved by formulating convolutional kernels in CNNs as continuous functions. The resulting Continuous Kernel Convolution (CKConv) allows us to model arbitrarily long sequences in a parallel manner, within a single operation, and without relying on any form of recurrence. We show that Continuous Kernel Convolutional Networks (CKCNNs) obtain state-of-the-art results in multiple datasets, e.g., permuted MNIST, and, thanks to their continuous nature, are able to handle non-uniformly sampled datasets and irregularly-sampled data natively. CKCNNs match or perform better than neural ODEs designed for these purposes in a faster and simpler manner.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Liscio E; van der Meer M; Siebert L C; Jonker C M; Murukannaiah P K
What values should an agent align with? Journal Article
In: Autonomous Agents and Multi-Agent Systems, vol. 36, no. 23, pp. 32, 2022.
@article{Liscio2022,
title = {What values should an agent align with?},
author = {Liscio, Enrico and van der Meer, Michiel and Siebert, Luciano C. and Jonker, Catholijn M. and Murukannaiah, Pradeep K.},
url = {https://link.springer.com/content/pdf/10.1007/s10458-022-09550-0},
year = {2022},
date = {2022-01-01},
journal = {Autonomous Agents and Multi-Agent Systems},
volume = {36},
number = {23},
pages = {32},
publisher = {Springer US},
abstract = {The pursuit of values drives human behavior and promotes cooperation. Existing research is focused on general values (e.g., Schwartz) that transcend contexts. However, context-specific values are necessary to (1) understand human decisions, and (2) engineer intelligent agents that can elicit and align with human values. We propose Axies, a hybrid (human and AI) methodology to identify context-specific values. Axies simplifies the abstract task of value identification as a guided value annotation process involving human annotators. Axies exploits the growing availability of value-laden text corpora and Natural Language Processing to assist the annotators in systematically identifying context-specific values. We evaluate Axies in a user study involving 80 human subjects. In our study, six annotators generate value lists for two timely and important contexts: Covid-19 measures and sustainable Energy. We employ two policy experts and 72 crowd workers to evaluate Axies value lists and compare them to a list of general (Schwartz) values. We find that Axies yields values that are (1) more context-specific than general values, (2) more suitable for value annotation than general values, and (3) independent of the people applying the methodology.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kiseleva J; Li Z; Aliannejadi M; Mohanty S; ter Hoeve M; Burtsev M; Skrynnik A; Zholus A; Panov A; Srinet K; Szlam A; Sun Y; Hofmann K; Awadallah A; Abdrazakov L; Churin I; Manggala P; Naszadi K; van der Meer M; Kim T
Interactive Grounded Language Understanding in a Collaborative Environment: IGLU 2021 Journal Article
In: 2022.
@article{IGLU2022,
title = {Interactive Grounded Language Understanding in a Collaborative Environment: IGLU 2021},
author = {Kiseleva, Julia and Li, Ziming and Aliannejadi, Mohammad and Mohanty, Shrestha and ter Hoeve, Maartje and Burtsev, Mikhail and Skrynnik, Alexey and Zholus, Artem and Panov, Aleksandr and Srinet, Kavya and Szlam, Arthur and Sun, Yuxuan and Hofmann, Marc-Alexandre Côté, Katja and Awadallah, Ahmed and Abdrazakov, Linar and Churin, Igor and Manggala, Putra and Naszadi, Kata and van der Meer, Michiel and Kim, Taewoon},
url = {https://arxiv.org/abs/2205.02388},
doi = {10.48550/ARXIV.2205.02388},
year = {2022},
date = {2022-01-01},
publisher = {arXiv},
abstract = {Human intelligence has the remarkable ability to quickly adapt to new tasks and environments. Starting from a very young age, humans acquire new skills and learn how to solve new tasks either by imitating the behavior of others or by following provided natural language instructions. To facilitate research in this direction, we propose emphIGLU: Interactive Grounded Language Understanding in a Collaborative Environment. The primary goal of the competition is to approach the problem of how to build interactive agents that learn to solve a task while provided with grounded natural language instructions in a collaborative environment. Understanding the complexity of the challenge, we split it into sub-tasks to make it feasible for participants.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Grossi D
Social Choice Around the Block: On the Computational Social Choice of Blockchain Proceedings Article
In: Faliszewski, Piotr; Mascardi, Viviana; Pelachaud, Catherine; Taylor, Matthew E. (Ed.): 21st International Conference on Autonomous Agents and Multiagent Systems, AAMAS 2022, Auckland, New Zealand, May 9-13, 2022, pp. 1788–1793, International Foundation for Autonomous Agents and Multiagent Systems (IFAAMAS), 2022.
@inproceedings{DBLP:conf/atal/Grossi22,
title = {Social Choice Around the Block: On the Computational Social Choice of Blockchain},
author = {Davide Grossi},
editor = {Piotr Faliszewski and Viviana Mascardi and Catherine Pelachaud and Matthew E. Taylor},
url = {https://www.ifaamas.org/Proceedings/aamas2022/pdfs/p1788.pdf},
year = {2022},
date = {2022-01-01},
booktitle = {21st International Conference on Autonomous Agents and Multiagent Systems, AAMAS 2022, Auckland, New Zealand, May 9-13, 2022},
pages = {1788–1793},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems (IFAAMAS)},
abstract = {One of the most innovative aspects of blockchain technology con- sists in the introduction of an incentive layer to regulate the behav- ior of distributed protocols. The designer of a blockchain system faces therefore issues that are akin to those relevant for the design of economic mechanisms, and faces them in a computational setting. From this perspective the present paper argues for the importance of computational social choice in blockchain research. It identifies a few challenges at the interface of the two fields that illustrate the strong potential for cross-fertilization between them.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Verma R; Nalisnick E
Calibrated Learning to Defer with One-vs-All Classifiers Proceedings Article
In: ICML 2022 Workshop on Human-Machine Collaboration and Teaming, 2022.
@inproceedings{Verma-Nalisnick-ICML:2022,
title = {Calibrated Learning to Defer with One-vs-All Classifiers},
author = {Rajeev Verma and Eric Nalisnick},
url = {https://icml.cc/Conferences/2022/ScheduleMultitrack?event=18123},
year = {2022},
date = {2022-01-01},
booktitle = {ICML 2022 Workshop on Human-Machine Collaboration and Teaming},
abstract = {The learning to defer (L2D) framework has the potential to make AI systems safer. For a given input, the system can defer the decision to a human if the human is more likely than the model to take the correct action. We study the calibration of L2D systems, investigating if the probabilities they output are sound. We find that Mozannar & Sontag’s (2020) multiclass framework is not calibrated with respect to expert correctness. Moreover, it is not even guaranteed to produce valid probabilities due to its parameterization being degenerate for this purpose. We propose an L2D system based on one-vs-all classifiers that is able to produce calibrated probabilities of expert correctness. Furthermore, our loss function is also a consistent surrogate for multiclass L2D, like Mozannar & Sontag's (2020). Our experiments verify that not only is our system calibrated, but this benefit comes at no cost to accuracy. Our model's accuracy is always comparable (and often superior) to Mozannar & Sontag's (2020) model's in tasks ranging from hate speech detection to galaxy classification to diagnosis of skin lesions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Manggala P; Hoos H H; Nalisnick E
Bayesian Weak Supervision via an Optimal Transport Approach Proceedings Article
In: ICML 2022 Workshop on Human-Machine Collaboration and Teaming, 2022.
@inproceedings{manggala2022optimaltransportweaksupervision,
title = {Bayesian Weak Supervision via an Optimal Transport Approach},
author = {Manggala, Putra and Hoos, Holger H. and Nalisnick, Eric},
url = {https://openreview.net/forum?id=YJkf-6tTFiY},
year = {2022},
date = {2022-01-01},
booktitle = {ICML 2022 Workshop on Human-Machine Collaboration and Teaming},
abstract = {Large-scale machine learning is often impeded by a lack of labeled training data. To address this problem, the paradigm of weak supervision aims to collect and then aggregate multiple noisy labels. We propose a Bayesian probabilistic model that employs a tractable Sinkhorn-based optimal transport formulation to derive a ground-truth label. The translation between true and weak labels is cast as a transport problem with an inferred cost structure. Our approach achieves strong performance on the WRENCH weak supervision benchmark. Moreover, the posterior distribution over cost matrices allows for exploratory analysis of the weak sources.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Dobbe R
System Safety and Artificial Intelligence Book Section
In: Oxford Handbook of AI Governance, vol. To Appear, Oxford, 2022, ISBN: 978-0-19-757932-9.
@incollection{dobbe_system_2022,
title = {System Safety and Artificial Intelligence},
author = {Dobbe, Roel},
url = {https://arxiv.org/abs/2202.09292},
isbn = {978-0-19-757932-9},
year = {2022},
date = {2022-01-01},
booktitle = {Oxford Handbook of AI Governance},
volume = {To Appear},
address = {Oxford},
abstract = {This chapter formulates seven lessons for preventing harm in artificial intelligence (AI) systems based on insights from the field of system safety for software-based automation in safety-critical domains. New applications of AI across societal domains and public organizations and infrastructures come with new hazards, which lead to new forms of harm, both grave and pernicious. The text addresses the lack of consensus for diagnosing and eliminating new AI system hazards. For decades, the field of system safety has dealt with accidents and harm in safety-critical systems governed by varying degrees of software-based automation and decision-making. This field embraces the core assumption of systems and control that AI systems cannot be safeguarded by technical design choices on the model or algorithm alone, instead requiring an end-to-end hazard analysis and design frame that includes the context of use, impacted stakeholders and the formal and informal institutional environment in which the system operates. Safety and other values are then inherently socio-technical and emergent system properties that require design and control measures to instantiate these across the technical, social and institutional components of a system. This chapter honors system safety pioneer Nancy Leveson, by situating her core lessons for today's AI system safety challenges. For every lesson, concrete tools are offered for rethinking and reorganizing the safety management of AI systems, both in design and governance. This history tells us that effective AI safety management requires transdisciplinary approaches and a shared language that allows involvement of all levels of society.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Sauter A; Acar E; François-Lavet V
A Meta-Reinforcement Learning Algorithm for Causal Discovery Miscellaneous
2022.
@misc{Sauter22MetaRL,
title = {A Meta-Reinforcement Learning Algorithm for Causal Discovery},
author = {Sauter, Andreas and Acar, Erman and François-Lavet, Vincent},
url = {https://arxiv.org/abs/2207.08457},
doi = {10.48550/ARXIV.2207.08457},
year = {2022},
date = {2022-01-01},
publisher = {arXiv},
abstract = {Causal discovery is a major task with the utmost importance for machine learning since causal structures can enable models to go beyond pure correlation-based inference and significantly boost their performance. However, finding causal structures from data poses a significant challenge both in computational effort and accuracy, let alone its impossibility without interventions in general. In this paper, we develop a meta-reinforcement learning algorithm that performs causal discovery by learning to perform interventions such that it can construct an explicit causal graph. Apart from being useful for possible downstream applications, the estimated causal graph also provides an explanation for the data-generating process. In this article, we show that our algorithm estimates a good graph compared to the SOTA approaches, even in environments whose underlying causal structure is previously unseen. Further, we make an ablation study that shows how learning interventions contribute to the overall performance of our approach. We conclude that interventions indeed help boost the performance, efficiently yielding an accurate estimate of the causal structure of a possibly unseen environment.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Heuss M; Sarvi F; de Rijke M
Fairness of Exposure in Light of Incomplete Exposure Estimation Proceedings Article
In: Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 759–769, Association for Computing Machinery, Madrid, Spain, 2022.
@inproceedings{heuss-2022-fairness,
title = {Fairness of Exposure in Light of Incomplete Exposure Estimation},
author = {Heuss, Maria and Sarvi, Fatemeh and de Rijke, Maarten},
url = {https://irlab.science.uva.nl/wp-content/papercite-data/pdf/heuss-2022-fairness.pdf},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval},
pages = {759–769},
publisher = {Association for Computing Machinery},
address = {Madrid, Spain},
abstract = {Fairness of exposure is a commonly used notion of fairness for ranking systems. It is based on the idea that all items or item groups should get exposure proportional to the merit of the item or the collective merit of the items in the group. Often, stochastic ranking policies are used to ensure fairness of exposure. Previous work unrealistically assumes that we can reliably estimate the expected exposure for all items in each ranking produced by the stochastic policy. In this work, we discuss how to approach fairness of exposure in cases where the policy contains rankings of which, due to inter-item dependencies, we cannot reliably estimate the exposure distribution. In such cases, we cannot determine whether the policy can be considered fair. Our contributions in this paper are twofold. First, we define a method called method for finding stochastic policies that avoid showing rankings with unknown exposure distribution to the user without having to compromise user utility or item fairness. Second, we extend the study of fairness of exposure to the top-k setting and also assess method in this setting. We find that method can significantly reduce the number of rankings with unknown exposure distribution without a drop in user utility or fairness compared to existing fair ranking methods, both for full-length and top-k rankings. This is an important first step in developing fair ranking methods for cases where we have incomplete knowledge about the user's behaviour.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ho L; de Boer V; van Riemsdijk M B; Schlobach S; Tielman M
Argumentation for Knowledge Base Inconsistencies in Hybrid Intelligence Scenarios Proceedings Article
In: The 1st International Workshop on Knowledge Representation for Hybrid Intelligence (KR4HI), online, 2022.
@inproceedings{HHAI2022,
title = {Argumentation for Knowledge Base Inconsistencies in Hybrid Intelligence Scenarios},
author = {Ho, Loan and de Boer, Victor and van Riemsdijk, M. Birna and Schlobach, Stefan and Tielman, Myrthe},
url = {https://research.utwente.nl/en/publications/argumentation-for-knowledge-base-inconsistencies-in-hybrid-intell},
year = {2022},
date = {2022-01-01},
publisher = {The 1st International Workshop on Knowledge Representation for Hybrid Intelligence (KR4HI)},
address = {online},
abstract = {Hybrid Intelligence (HI) is the combination of human and machine intelligence, expanding human intellect instead of replacing it. In HI scenarios, inconsistencies in knowledge bases (KBs) can occur for a variety of reasons. These include shifting preferences, user’s motivation and or external conditions (for example, available resources and environment can vary over time). Argumentation is a potential method to address such inconsistencies as it provides a mechanism for reasoning with conflicting information, with natural explanations that are understandable to humans. In this paper, we investigate the capabilities of Argumentation in representing and reasoning about knowledge of both human and artificial agents in the presence of inconsistency. Moreover, we show how Argumentation enables Explainability for addressing problems in Decision-Making and Justification of an opinion. In order to investigate the applicability of Argumentation in HI scenarios, we demonstrate a mapping of two specific HI scenarios to Argumentation problems. We analyse to what extent of Argumentation is applicable by clarifying the practical inconsistency types of the HI scenarios that Argumentation can address. These include inconsistencies related to recommendations and decision making. We then model particularly the presentation of conflicting information for each scenario based on the form of argument representation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ho L; Arch-int S; Acar E; Schlobach S; Arch-int N
An argumentative approach for handling inconsistency in prioritized Datalog± ontologies Journal Article
In: AI Commun., vol. 35, no. 3, pp. 243–267, 2022, ISSN: 0921-7126.
@article{10.3233/AIC-220087,
title = {An argumentative approach for handling inconsistency in prioritized Datalog± ontologies},
author = {Ho, Loan and Arch-int, Somjit and Acar, Erman and Schlobach, Stefan and Arch-int, Ngamnij},
url = {https://research.vu.nl/en/publications/an-argumentative-approach-for-handling-inconsistency-in-prioritiz},
doi = {10.3233/AIC-220087},
issn = {0921-7126},
year = {2022},
date = {2022-01-01},
journal = {AI Commun.},
volume = {35},
number = {3},
pages = {243–267},
publisher = {IOS Press},
address = {NLD},
abstract = {Prioritized Datalog± is a well-studied formalism for modelling ontological knowledge and data, and has a success story in many applications in the (Semantic) Web and in other domains. Since the information content on the Web is both inherently context-dependent and frequently updated, the occurrence of a logical inconsistency is often inevitable. This phenomenon has led the research community to develop various types of inconsistency-tolerant semantics over the last few decades. Although the study of query answering under inconsistency-tolerant semantics is well-understood, the problem of explaining query answering under such semantics took considerably less attention, especially in the scenario where the facts are prioritized. In this paper, we aim to fill this gap. More specifically, we use Dung''s abstract argumentation framework to address the problem of explaining inconsistency-tolerant query answering in Datalog± KB where facts are prioritized, or preordered. We clarify the relationship between preferred repair semantics and various notions of extensions for argumentation frameworks. The strength of such argumentation-based approach is the explainability; users can more easily understand why different points of views are conflicting and why the query answer is entailed (or not) under different semantics. To this end we introduce the formal notion of a dialogical explanation, and show how it can be used to both explain showing why query results hold and not hold according to the known semantics in inconsistent Datalog± knowledge bases.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Prakken H; Ratsma R
A top-level model of case-based argumentation for explanation: Formalisation and experiments Journal Article
In: Argument and Computation, vol. 13, pp. 159–194, 2022.
@article{p+r22,
title = {A top-level model of case-based argumentation for explanation: Formalisation and experiments},
author = {H. Prakken and R. Ratsma},
url = {https://content.iospress.com/articles/argument-and-computation/aac210009},
year = {2022},
date = {2022-01-01},
journal = {Argument and Computation},
volume = {13},
pages = {159–194},
abstract = {This paper proposes a formal top-level model of explaining the outputs of machine-learning-based decision-making applications and evaluates it experimentally with three data sets. The model draws on AI & law research on argumentation with cases, which models how lawyers draw analogies to past cases and discuss their relevant similarities and differences in terms of relevant factors and dimensions in the problem domain. A case-based approach is natural since the input data of machine-learning applications can be seen as cases. While the approach is motivated by legal decision making, it also applies to other kinds of decision making, such as commercial decisions about loan applications or employee hiring, as long as the outcome is binary and the input conforms to this factor- or dimension format. The model is top-level in that it can be extended with more refined accounts of similarities and differences between cases. It is shown to overcome several limitations of similar argumentation-based explanation models, which only have binary features and do not represent the tendency of features towards particular outcomes. The results of the experimental evaluation studies indicate that the model may be feasible in practice, but that further development and experimentation is needed to confirm its usefulness as an explanation model. Main challenges here are selecting from a large number of possible explanations, reducing the number of features in the explanations and adding more meaningful information to them. It also remains to be investigated how suitable our approach is for explaining non-linear models.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Çelikok M M; Oliehoek F A; Kaski S
Best-Response Bayesian Reinforcement Learning with Bayes-Adaptive POMDPs for Centaurs Proceedings Article
In: Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems, pp. 235–243, International Foundation for Autonomous Agents and Multiagent Systems, Virtual Event, New Zealand, 2022, ISBN: 9781450392136.
@inproceedings{10.5555/3535850.3535878,
title = {Best-Response Bayesian Reinforcement Learning with Bayes-Adaptive POMDPs for Centaurs},
author = {Çelikok, Mustafa Mert and Oliehoek, Frans A. and Kaski, Samuel},
url = {https://ifaamas.org/Proceedings/aamas2022/pdfs/p235.pdf},
isbn = {9781450392136},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems},
pages = {235–243},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Virtual Event, New Zealand},
series = {AAMAS '22},
abstract = {Centaurs are half-human, half-AI decision-makers where the AI's goal is to complement the human. To do so, the AI must be able to recognize the goals and constraints of the human and have the means to help them. We present a novel formulation of the interaction between the human and the AI as a sequential game where the agents are modelled using Bayesian best-response models. We show that in this case the AI's problem of helping bounded-rational humans make better decisions reduces to a Bayes-adaptive POMDP. In our simulated experiments, we consider an instantiation of our framework for humans who are subjectively optimistic about the AI's future behaviour. Our results show that when equipped with a model of the human, the AI can infer the human's bounds and nudge them towards better decisions. We discuss ways in which the machine can learn to improve upon its own limitations as well with the help of the human. We identify a novel trade-off for centaurs in partially observable tasks: for the AI's actions to be acceptable to the human, the machine must make sure their beliefs are sufficiently aligned, but aligning beliefs might be costly. We present a preliminary theoretical analysis of this trade-off and its dependence on task structure.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Prakken H
Formalising an aspect of argument strength: degrees of attackability Book Section
In: Toni, Francesca (Ed.): Computational Models of Argument. Proceedings of COMMA 2022, pp. 296–307, IOS Press, Amsterdam etc, 2022.
@incollection{hp22gradual,
title = {Formalising an aspect of argument strength: degrees of attackability},
author = {H. Prakken},
editor = {Francesca Toni et al.},
url = {https://ebooks.iospress.nl/doi/10.3233/FAIA220161},
doi = {10.3233/FAIA220161},
year = {2022},
date = {2022-01-01},
booktitle = {Computational Models of Argument. Proceedings of COMMA 2022},
pages = {296–307},
publisher = {IOS Press},
address = {Amsterdam etc},
abstract = {This paper formally studies a notion of dialectical argument strength in terms of the number of ways in which an argument can be successfully attacked in expansions of an abstract argumentation framework. The proposed model is abstract but its design is motivated by the wish to avoid overly limiting assumptions that may not hold in particular dialogue contexts or in particular structured accounts of argumentation. It is shown that most principles for gradual argument acceptability proposed in the literature fail to hold for the proposed notion of dialectical strength, which clarifies their rational foundations and highlights the importance of distinguishing between logical, dialectical and rhetorical argument strength.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
van Woerkom W; Grossi D; Prakken H; Verheij B
Justification in Case-Based Reasoning Proceedings Article
In: Čyras, Kristijonas; Kampik, Timotheus; Cocarascu, Oana; Rago, Antonio (Ed.): Proceedings of the First International Workshop on Argumentation for eXplainable AI, pp. 1–13, CEUR Workshop Proceedings, 2022.
@inproceedings{vanwoerkom2022,
title = {Justification in Case-Based Reasoning},
author = {van Woerkom, Wijnand and Grossi, Davide and Prakken, Henry and Verheij, Bart},
editor = {Čyras, Kristijonas and Kampik, Timotheus and Cocarascu, Oana and Rago, Antonio},
url = {https://ceur-ws.org/Vol-3209/5942.pdf},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the First International Workshop on Argumentation for eXplainable AI},
pages = {1–13},
publisher = {CEUR Workshop Proceedings},
abstract = {The explanation and justification of decisions is an important subject in contemporary data-driven automated methods. Case-based argumentation has been proposed as the formal background for the explanation of data-driven automated decision making. In particular, a method was developed in recent work based on the theory of precedential constraint which reasons from a case base, given by the training data of the machine learning system, to produce a justification for the outcome of a focus case. An important role is played in this method by the notions of citability and compensation, and in the present work we develop these in more detail. Special attention is paid to the notion of compensation; we formally specify the notion and identify several of its desirable properties. These considerations reveal a refined formal perspective on the explanation method as an extension of the theory of precedential constraint with a formal notion of justification.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hopner N; Tiddi I; van Hoof H
Leveraging Class Abstraction for Commonsense Reinforcement Learning via Residual Policy Gradient Methods Proceedings Article
In: Raedt, Lud De (Ed.): Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence, IJCAI-22, pp. 3050–3056, International Joint Conferences on Artificial Intelligence Organization, 2022, (Main Track).
@inproceedings{ijcai2022p423,
title = {Leveraging Class Abstraction for Commonsense Reinforcement Learning via Residual Policy Gradient Methods},
author = {Hopner, Niklas and Tiddi, Ilaria and van Hoof, Herke},
editor = {Lud De Raedt},
url = {https://doi.org/10.24963/ijcai.2022/423},
doi = {10.24963/ijcai.2022/423},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence, IJCAI-22},
pages = {3050–3056},
publisher = {International Joint Conferences on Artificial Intelligence Organization},
abstract = {Enabling reinforcement learning (RL) agents to leverage a knowledge base while learning from experience promises to advance RL in knowledge intensive domains. However, it has proven difficult to leverage knowledge that is not manually tailored to the environment. We propose to use the subclass relationships present in open-source knowledge graphs to abstract away from specific objects. We develop a residual policy gradient method that is able to integrate knowledge across different abstraction levels in the class hierarchy. Our method results in improved sample efficiency and generalisation to unseen objects in commonsense games, but we also investigate failure modes, such as excessive noise in the extracted class knowledge or environments with little class structure.},
note = {Main Track},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
van der Meer M; Reuver M; Khurana U; Krause L; Santamaría S B
Will It Blend? Mixing Training Paradigms & Prompting for Argument Quality Prediction Miscellaneous
2022.
@misc{https://doi.org/10.48550/arxiv.2209.08966,
title = {Will It Blend? Mixing Training Paradigms & Prompting for Argument Quality Prediction},
author = {van der Meer, Michiel and Reuver, Myrthe and Khurana, Urja and Krause, Lea and Santamaría, Selene Báez},
url = {https://arxiv.org/abs/2209.08966},
doi = {10.48550/ARXIV.2209.08966},
year = {2022},
date = {2022-01-01},
publisher = {arXiv},
abstract = {This paper describes our winning contribution to the Shared Task of the 9th Workshop on Argument Mining (2022). Our approach uses Large Language Models for the task of Argument Quality Prediction. We perform prompt engineering using GPT-3, and also investigate the training paradigms multi-task learning, contrastive learning, and intermediate-task training. We find that a mixed prediction setup outperforms single models. Prompting GPT-3 works best for predicting argument validity, and argument novelty is best estimated by a model trained using all three training paradigms.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
van Woerkom W; Grossi D; Prakken H; Verheij B
Landmarks in Case-Based Reasoning: From Theory to Data Proceedings Article
In: Schlobach, Stefan; Pérez-Ortiz, María; Tielman, Myrthe (Ed.): HHAI2022: Augmenting Human Intellect, pp. 212–224, IOS Press, 2022.
@inproceedings{woerkom2022landmarks,
title = {Landmarks in Case-Based Reasoning: From Theory to Data},
author = {van Woerkom, Wijnand and Grossi, Davide and Prakken, Henry and Verheij, Bart},
editor = {Schlobach, Stefan and Pérez-Ortiz, María and Tielman, Myrthe},
url = {https://ebooks.iospress.nl/volumearticle/60868},
year = {2022},
date = {2022-01-01},
booktitle = {HHAI2022: Augmenting Human Intellect},
volume = {354},
pages = {212–224},
publisher = {IOS Press},
series = {Frontiers in Artificial Intelligence and Applications},
abstract = {Widespread application of uninterpretable machine learning systems for sensitive purposes has spurred research into elucidating the decision making process of these systems. These efforts have their background in many different disciplines, one of which is the field of AI & law. In particular, recent works have observed that machine learning training data can be interpreted as legal cases. Under this interpretation the formalism developed to study case law, called the theory of precedential constraint, can be used to analyze the way in which machine learning systems draw on training data – or should draw on them – to make decisions. These works predominantly stay on the theoretical level, hence in the present work the formalism is evaluated on a real world dataset. Through this analysis we identify a significant new concept which we call landmark cases, and use it to characterize the types of datasets that are more or less suitable to be described by the theory.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}