@InProceedings{2020:Li:HIL-Model,
AUTHOR = {Li, Nianyu and C\'{a}mara, Javier and Garlan, David and Schmerl, Bradley},
TITLE = {Reasoning about When to Provide Explanation for Human-in-the-loop Self-Adaptive Systems},
YEAR = {2020},
MONTH = {19-23 August},
BOOKTITLE = {Proceedings of the 2020 IEEE Conference on Autonomic Computing and Self-organizing Systems (ACSOS)},
ADDRESS = {Washington, D.C.},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/ExplanationInLoop2020.pdf},
ABSTRACT = {Many self-adaptive systems benefit from human involvement, where a human operator can provide expertise not available to the system and perform adaptations involving physical changes that cannot be automated.
However, a lack of transparency and intelligibility of system goals and the autonomous behaviors enacted to achieve them may hinder a human operator's effort to make such involvement effective.
Explanation is sometimes helpful to allow the human to understand why the system is making certain decisions. However, explanations come with costs in terms of, e.g., delayed actions.
Hence, it is not always obvious whether explanations will improve the satisfaction of system goals and, if so,
when to provide them to the operator. In this work, we define a formal framework for reasoning about explanations of adaptive system behaviors and the conditions under which they are warranted. Specifically, we characterize explanations in terms of their impact on a human operator's ability to effectively engage in adaptive actions.
We then present
a decision-making approach for planning in self-adaptation
that leverages a probabilistic reasoning tool to determine when the explanation should be used
in an adaptation strategy in order to improve overall system utility.
We illustrate our approach in a representative scenario for the application of an adaptive news website in the context of potential denial-of-service attacks. },
NOTE = {Presentation Video},
KEYWORDS = {Formal Methods, Human-in-the-loop, Self-adaptation, Stochastic Games} }
|
|