2022
Siebert, Luciano Cavalcante; Lupetti, Maria Luce; Aizenberg, Evgeni; Beckers, Niek; Zgonnikov, Arkady; Veluwenkamp, Herman; Abbink, David; Giaccardi, Elisa; Houben, Geert-Jan; Jonker, Catholijn M.; Hoven, Jeroen; Forster, Deborah; Lagendijk, Reginald L.
Meaningful human control: actionable properties for AI system development Journal Article
In: AI Ethics, vol. 3, no. 1, pp. 241–255, 2022, ISSN: 2730-5961.
Abstract | Links | BibTeX | Tags: artificial intelligence, autonomy, value:autonomy
@article{CavalcanteSiebert2022,
title = {Meaningful human control: actionable properties for AI system development},
author = {Luciano Cavalcante Siebert and Maria Luce Lupetti and Evgeni Aizenberg and Niek Beckers and Arkady Zgonnikov and Herman Veluwenkamp and David Abbink and Elisa Giaccardi and Geert-Jan Houben and Catholijn M. Jonker and Jeroen Hoven and Deborah Forster and Reginald L. Lagendijk},
doi = {10.1007/s43681-022-00167-3},
issn = {2730-5961},
year = {2022},
date = {2022-05-18},
urldate = {2022-05-18},
journal = {AI Ethics},
volume = {3},
number = {1},
pages = {241–255},
publisher = {Springer Science and Business Media LLC},
abstract = {<jats:title>Abstract</jats:title><jats:p>How can humans remain in control of artificial intelligence (AI)-based systems designed to perform tasks autonomously? Such systems are increasingly ubiquitous, creating benefits - but also undesirable situations where moral responsibility for their actions cannot be properly attributed to any particular person or group. The concept of meaningful human control has been proposed to address responsibility gaps and mitigate them by establishing conditions that enable a proper attribution of responsibility for humans; however, clear requirements for researchers, designers, and engineers are yet inexistent, making the development of AI-based systems that remain under meaningful human control challenging. In this paper, we address the gap between philosophical theory and engineering practice by identifying, through an iterative process of abductive thinking, four actionable properties for AI-based systems under meaningful human control, which we discuss making use of two applications scenarios: automated vehicles and AI-based hiring. First, a system in which humans and AI algorithms interact should have an explicitly defined domain of morally loaded situations within which the system ought to operate. Second, humans and AI agents within the system should have appropriate and mutually compatible representations. Third, responsibility attributed to a human should be commensurate with that human’s ability and authority to control the system. Fourth, there should be explicit links between the actions of the AI agents and actions of humans who are aware of their moral responsibility. We argue that these four properties will support practically minded professionals to take concrete steps toward designing and engineering for AI systems that facilitate meaningful human control.</jats:p>},
keywords = {artificial intelligence, autonomy, value:autonomy},
pubstate = {published},
tppubtype = {article}
}
2019
Wynsberghe, Aimee; Robbins, Scott
Critiquing the Reasons for Making Artificial Moral Agents Journal Article
In: Science and Engineering Ethics, vol. 25, no. 3, pp. 719–735, 2019, ISSN: 1471-5546.
Abstract | Links | BibTeX | Tags: artificial intelligence, artificial moral agents, machine ethics, robot ethics
@article{vanWynsberghe2019,
title = {Critiquing the Reasons for Making Artificial Moral Agents},
author = {Aimee Wynsberghe and Scott Robbins},
url = {https://doi.org/10.1007/s11948-018-0030-8},
doi = {10.1007/s11948-018-0030-8},
issn = {1471-5546},
year = {2019},
date = {2019-06-01},
urldate = {2019-06-01},
journal = {Science and Engineering Ethics},
volume = {25},
number = {3},
pages = {719–735},
abstract = {Many industry leaders and academics from the field of machine ethics would have us believe that the inevitability of robots coming to have a larger role in our lives demands that robots be endowed with moral reasoning capabilities. Robots endowed in this way may be referred to as artificial moral agents (AMA). Reasons often given for developing AMAs are: the prevention of harm, the necessity for public trust, the prevention of immoral use, such machines are better moral reasoners than humans, and building these machines would lead to a better understanding of human morality. Although some scholars have challenged the very initiative to develop AMAs, what is currently missing from the debate is a closer examination of the reasons offered by machine ethicists to justify the development of AMAs. This closer examination is especially needed because of the amount of funding currently being allocated to the development of AMAs (from funders like Elon Musk) coupled with the amount of attention researchers and industry leaders receive in the media for their efforts in this direction. The stakes in this debate are high because moral robots would make demands on society; answers to a host of pending questions about what counts as an AMA and whether they are morally responsible for their behavior or not. This paper shifts the burden of proof back to the machine ethicists demanding that they give good reasons to build AMAs. The paper argues that until this is done, the development of commercially available AMAs should not proceed further.},
keywords = {artificial intelligence, artificial moral agents, machine ethics, robot ethics},
pubstate = {published},
tppubtype = {article}
}
2018
Dignum, Virginia
Responsible artificial intelligence: designing AI for human values Journal Article
In: ITU Journal: ICT Discoveries, vol. 1, no. 1, pp. 1-8, 2018.
Abstract | Links | BibTeX | Tags: artificial intelligence, design for values, ethics, societal impact, value:accountability
@article{Dignum2017,
title = {Responsible artificial intelligence: designing AI for human values},
author = {Virginia Dignum},
url = {https://www.itu.int/en/journal/001/Pages/01.aspx},
year = {2018},
date = {2018-03-01},
urldate = {2018-03-01},
journal = {ITU Journal: ICT Discoveries},
volume = {1},
number = {1},
pages = {1-8},
abstract = {Artificial intelligence (AI) is increasingly affecting our lives in smaller or greater ways. In order to ensure that systems will uphold human values, design methods are needed that incorporate ethical principles and address societal concerns. In this paper, we explore the impact of AI in the case of the expected effects on the European labor market, and propose the accountability, responsibility and transparency (ART) design principles for the development of AI systems that are sensitive to human values.},
keywords = {artificial intelligence, design for values, ethics, societal impact, value:accountability},
pubstate = {published},
tppubtype = {article}
}
Sio, Filippo Santoni; Hoven, Jeroen
Meaningful Human Control over Autonomous Systems: A Philosophical Account Journal Article
In: Frontiers in Robotics and AI, vol. 5, pp. 15, 2018, ISSN: 2296-9144.
Abstract | Links | BibTeX | Tags: artificial intelligence, guidance control, lethal autonomous weapon systems, meaningful human control, moral responsibility, responsible innovation, value sensitive design, value:autonomy
@article{10.3389/frobt.2018.00015,
title = {Meaningful Human Control over Autonomous Systems: A Philosophical Account},
author = {Filippo Santoni Sio and Jeroen Hoven},
url = {https://www.frontiersin.org/article/10.3389/frobt.2018.00015},
doi = {10.3389/frobt.2018.00015},
issn = {2296-9144},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
journal = {Frontiers in Robotics and AI},
volume = {5},
pages = {15},
abstract = {Debates on lethal autonomous weapon systems have proliferated in the last five years. Ethical concerns have been voiced about a possible raise in the number of wrongs and crimes in military operations and about the creation of a “responsibility gap” for harms caused by these systems. To address these concerns, the principle of “meaningful human control” has been introduced in the legal-political debate; according to this principle humans not computers and their algorithms should ultimately remain in control of, and thus morally responsible for relevant decisions about (lethal) military operations. However, policy-makers and technical designers lack a detailed theory of what “meaningful human control” exactly means. In this paper we lay the foundation of a philosophical account of meaningful human control, based on the concept of “guidance control” as elaborated in the philosophical debate on free will and moral responsibility. Following the ideals of “Responsible Innovation” and “Value-sensitive Design” our account of meaningful human control is cast in the form of design requirements. We identify two general, necessary conditions to be satisfied for an autonomous system to remain under meaningful human control: first, a “tracking” condition, according to which the system should be able to respond to both the relevant moral reasons of the humans designing and deploying the system and the relevant facts in the environment in which the system operates; second, a “tracing” condition, according to which the system should be designed in such a way as to grant the possibility to always trace back the outcome of its operations to at least one human along the chain of design and operation. As we think that meaningful human control can be one of the central notions in ethics of robotics and AI, in the last part of the paper we start exploring the implications of our account for the design and use of non-military autonomous systems, for instance self-driving cars.},
keywords = {artificial intelligence, guidance control, lethal autonomous weapon systems, meaningful human control, moral responsibility, responsible innovation, value sensitive design, value:autonomy},
pubstate = {published},
tppubtype = {article}
}
Bennati, Stefano; Dusparic, Ivana; Shinde, Rhythima; Jonker, Catholijn M
Volunteers in the Smart City: Comparison of Contribution Strategies on Human-Centered Measures Journal Article
In: Sensors, vol. 18, no. 11, 2018, ISSN: 1424-8220.
Abstract | Links | BibTeX | Tags: artificial intelligence, big data, fairness, participatory sensing, privacy, public good, smart cities, value:justice
@article{s18113707,
title = {Volunteers in the Smart City: Comparison of Contribution Strategies on Human-Centered Measures},
author = {Stefano Bennati and Ivana Dusparic and Rhythima Shinde and Catholijn M Jonker},
url = {http://www.mdpi.com/1424-8220/18/11/3707},
doi = {10.3390/s18113707},
issn = {1424-8220},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
journal = {Sensors},
volume = {18},
number = {11},
abstract = {Provision of smart city services often relies on users contribution, e.g., of data, which can be costly for the users in terms of privacy. Privacy risks, as well as unfair distribution of benefits to the users, should be minimized as they undermine user participation, which is crucial for the success of smart city applications. This paper investigates privacy, fairness, and social welfare in smart city applications by means of computer simulations grounded on real-world data, i.e., smart meter readings and participatory sensing. We generalize the use of public good theory as a model for resource management in smart city applications, by proposing a design principle that is applicable across application scenarios, where provision of a service depends on user contributions. We verify its applicability by showing its implementation in two scenarios: smart grid and traffic congestion information system. Following this design principle, we evaluate different classes of algorithms for resource management, with respect to human-centered measures, i.e., privacy, fairness and social welfare, and identify algorithm-specific trade-offs that are scenario independent. These results could be of interest to smart city application designers to choose a suitable algorithm given a scenario-specific set of requirements, and to users to choose a service based on an algorithm that matches their privacy preferences.},
keywords = {artificial intelligence, big data, fairness, participatory sensing, privacy, public good, smart cities, value:justice},
pubstate = {published},
tppubtype = {article}
}
2016
Wynsberghe, Aimee
Service robots, care ethics, and design Journal Article
In: Ethics and Information Technology, vol. 18, no. 4, pp. 311–321, 2016, ISSN: 1572-8439.
Abstract | Links | BibTeX | Tags: applied ethics, artificial intelligence, care ethics, robot ethics, service robots, value sensitive design, value:health
@article{vanWynsberghe2016,
title = {Service robots, care ethics, and design},
author = {Aimee Wynsberghe},
url = {https://doi.org/10.1007/s10676-016-9409-x},
doi = {10.1007/s10676-016-9409-x},
issn = {1572-8439},
year = {2016},
date = {2016-12-01},
urldate = {2016-12-01},
journal = {Ethics and Information Technology},
volume = {18},
number = {4},
pages = {311–321},
abstract = {It should not be a surprise in the near future to encounter either a personal or a professional service robot in our homes and/or our work places: according to the International Federation for Robots, there will be approx 35 million service robots at work by 2018. Given that individuals will interact and even cooperate with these service robots, their design and development demand ethical attention. With this in mind I suggest the use of an approach for incorporating ethics into the design process of robots known as Care Centered Value Sensitive Design (CCVSD). Although this approach was originally and intentionally designed for the healthcare domain, the aim of this paper is to present a preliminary study of how personal and professional service robots might also be evaluated using the CCVSD approach. The normative foundations for CCVSD come from its reliance on the care ethics tradition and in particular the use of care practices for: (1) structuring the analysis and, (2) determining the values of ethical import. To apply CCVSD outside of healthcare one must show that the robot has been integrated into a care practice. Accordingly, the practice into which the robot is to be used must be assessed and shown to meet the conditions of a care practice. By investigating the foundations of the approach I hope to show why it may be applicable for service robots and further to give examples of current robot prototypes that can and cannot be evaluated using CCVSD.},
keywords = {applied ethics, artificial intelligence, care ethics, robot ethics, service robots, value sensitive design, value:health},
pubstate = {published},
tppubtype = {article}
}