%
% GENERATED FROM http://acme.able.cs.cmu.edu
% by : anonymous
% IP : ec2-34-238-143-70.compute-1.amazonaws.com
% at : Thu, 28 Mar 2024 19:05:13 -0400 GMT
%
% Selection : Author: Sridhar_Adepu
%
@InProceedings{Li:2020:SEAMS-Expl,
AUTHOR = {Li, Nianyu and Adepu, Sridhar and Kang, Eunsuk and Garlan, David},
TITLE = {Explanations for Human-on-the-loop: A Probabilistic Model Checking Approach},
YEAR = {2020},
MONTH = {29 June - 3 July},
BOOKTITLE = {Proceedings of the 15th International Symposium on Software Engineering for Adaptive and Self-managing Systems (SEAMS)},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/SEAMS_CameraReady-8.pdf},
ABSTRACT = {Many self-adaptive systems benefit from human involvement and
oversight, where a human operator can provide expertise not available
to the system and can detect problems that the system is
unaware of. One way of achieving this is by placing the human
operator on the loop – i.e., providing supervisory oversight and
intervening in the case of questionable adaptation decisions. To
make such interaction effective, explanation is sometimes helpful to
allow the human to understand why the system is making certain
decisions and calibrate confidence from the human perspective.
However, explanations come with costs in terms of delayed actions
and the possibility that a human may make a bad judgement. Hence,
it is not always obvious whether explanations will improve overall
utility and, if so, what kinds of explanation to provide to the operator.
In this work, we define a formal framework for reasoning
about explanations of adaptive system behaviors and the conditions
under which they are warranted. Specifically, we characterize
explanations in terms of explanation content, effect, and cost. We
then present a dynamic adaptation approach that leverages a probabilistic
reasoning technique to determine when the explanation
should be used in order to improve overall system utility.},
NOTE = {Talk},
KEYWORDS = {Explainable Software, Model Checking, Self-adaptation}
}
@Article{Adepu:ExpInd:2022,
AUTHOR = {Adepu, Sridhar and Li, Nianyu and Kang, Eunsuk and Garlan, David},
TITLE = {Modeling and Analysis of Explanation for Secure Industrial Control Systems},
YEAR = {2022},
MONTH = {July},
JOURNAL = {ACM Transactions on Autonomous and Adaptive Systems},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/Explanation_for_Secure_Industrial_Control_System.pdf},
ABSTRACT = {Many self-adaptive systems benefit from human involvement and oversight, where a human operator can provide expertise not
available to the system and detect problems that the system is unaware of. One way of achieving this synergy is by placing the human
operator on the loop – i.e., providing supervisory oversight and intervening in the case of questionable adaptation decisions. To make
such interaction effective, an explanation can play an important role in allowing the human operator to understand why the system is
making certain decisions and improve the level of knowledge that the operator has about the system. This, in turn, may improve
the operator’s capability to intervene and if necessarily, override the decisions being made by the system. However, explanations
may incur costs, in terms of delay in actions and the possibility that a human may make a bad judgement. Hence, it is not always
obvious whether an explanation will improve overall utility and, if so, what kind of explanation should be provided to the operator. In
this work, we define a formal framework for reasoning about explanations of adaptive system behaviors and the conditions under
which they are warranted. Specifically, we characterize explanations in terms of explanation content, effect, and cost. We then present a
dynamic system adaptation approach that leverages a probabilistic reasoning technique to determine when an explanation should be
used in order to improve overall system utility. We evaluate our explanation framework in the context of a realistic industrial control
system with adaptive behaviors.},
NOTE = {https://dl.acm.org/doi/10.1145/3557898},
KEYWORDS = {Explainable Software, Formal Methods, Self-adaptation}
}