%
% GENERATED FROM http://acme.able.cs.cmu.edu
% by : anonymous
% IP : ec2-13-58-34-110.us-east-2.compute.amazonaws.com
% at : Wed, 02 Apr 2025 22:33:51 -0400 GMT
%
% Selection : Author: Nianyu_Li
%
@InProceedings{Li:2020:SEAMS-Expl,
AUTHOR = {Li, Nianyu and Adepu, Sridhar and Kang, Eunsuk and Garlan, David},
TITLE = {Explanations for Human-on-the-loop: A Probabilistic Model Checking Approach},
YEAR = {2020},
MONTH = {29 June - 3 July},
BOOKTITLE = {Proceedings of the 15th International Symposium on Software Engineering for Adaptive and Self-managing Systems (SEAMS)},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/SEAMS_CameraReady-8.pdf},
ABSTRACT = {Many self-adaptive systems benefit from human involvement and
oversight, where a human operator can provide expertise not available
to the system and can detect problems that the system is
unaware of. One way of achieving this is by placing the human
operator on the loop – i.e., providing supervisory oversight and
intervening in the case of questionable adaptation decisions. To
make such interaction effective, explanation is sometimes helpful to
allow the human to understand why the system is making certain
decisions and calibrate confidence from the human perspective.
However, explanations come with costs in terms of delayed actions
and the possibility that a human may make a bad judgement. Hence,
it is not always obvious whether explanations will improve overall
utility and, if so, what kinds of explanation to provide to the operator.
In this work, we define a formal framework for reasoning
about explanations of adaptive system behaviors and the conditions
under which they are warranted. Specifically, we characterize
explanations in terms of explanation content, effect, and cost. We
then present a dynamic adaptation approach that leverages a probabilistic
reasoning technique to determine when the explanation
should be used in order to improve overall system utility.},
NOTE = {Talk},
KEYWORDS = {Explainable Software, Model Checking, Self-adaptation}
}
@InProceedings{2020:Li:HIL-Model,
AUTHOR = {Li, Nianyu and C\'{a}mara, Javier and Garlan, David and Schmerl, Bradley},
TITLE = {Reasoning about When to Provide Explanation for Human-in-the-loop Self-Adaptive Systems},
YEAR = {2020},
MONTH = {19-23 August},
BOOKTITLE = {Proceedings of the 2020 IEEE Conference on Autonomic Computing and Self-organizing Systems (ACSOS)},
ADDRESS = {Washington, D.C.},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/ExplanationInLoop2020.pdf},
ABSTRACT = {Many self-adaptive systems benefit from human involvement, where a human operator can provide expertise not available to the system and perform adaptations involving physical changes that cannot be automated.
However, a lack of transparency and intelligibility of system goals and the autonomous behaviors enacted to achieve them may hinder a human operator's effort to make such involvement effective.
Explanation is sometimes helpful to allow the human to understand why the system is making certain decisions. However, explanations come with costs in terms of, e.g., delayed actions.
Hence, it is not always obvious whether explanations will improve the satisfaction of system goals and, if so,
when to provide them to the operator. In this work, we define a formal framework for reasoning about explanations of adaptive system behaviors and the conditions under which they are warranted. Specifically, we characterize explanations in terms of their impact on a human operator's ability to effectively engage in adaptive actions.
We then present
a decision-making approach for planning in self-adaptation
that leverages a probabilistic reasoning tool to determine when the explanation should be used
in an adaptation strategy in order to improve overall system utility.
We illustrate our approach in a representative scenario for the application of an adaptive news website in the context of potential denial-of-service attacks. },
NOTE = {Presentation Video},
KEYWORDS = {Formal Methods, Human-in-the-loop, Self-adaptation, Stochastic Games}
}
@InProceedings{2021:FASE:Li,
AUTHOR = {Li, Nianyu and Zhang, Mingyue and Kang, Eunsuk and Garlan, David},
TITLE = {Engineering Secure Self-adaptive Systems with Bayesian Games},
YEAR = {2021},
MONTH = {27 March - 1 April},
BOOKTITLE = {Proceedings of the 24th International Conference on Fundamental Approaches to Software Engineering},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/FASE2021.pdf},
ABSTRACT = {Security attacks present unique challenges to self-adaptive
system design due to the adversarial nature of the environment. Game
theory approaches have been explored in security to model malicious
behaviors and design reliable defense for the system in a mathematically
grounded manner. However, modeling the system as a single player, as
done in prior works, is insufficient for the system under partial compromise
and for the design of fine-grained defensive strategies where the rest of the
system with autonomy can cooperate to mitigate the impact of attacks.
To deal with such issues, we propose a new self-adaptive framework incorporating
Bayesian game theory and model the defender (i.e., the system)
at the granularity of components. Under security attacks, the architecture
model of the system is translated into a Bayesian multi-player game,
where each component is explicitly modeled as an independent player
while security attacks are encoded as variant types for the components.
The optimal defensive strategy for the system is dynamically computed
by solving the pure equilibrium (i.e., adaptation response) to achieve
the best possible system utility, improving the resiliency of the system
against security attacks. We illustrate our approach using an example
involving load balancing and a case study on inter-domain routing.},
KEYWORDS = {Formal Methods, Science of Security, Self-adaptation}
}
@Article{Adepu:ExpInd:2022,
AUTHOR = {Adepu, Sridhar and Li, Nianyu and Kang, Eunsuk and Garlan, David},
TITLE = {Modeling and Analysis of Explanation for Secure Industrial Control Systems},
YEAR = {2022},
MONTH = {July},
JOURNAL = {ACM Transactions on Autonomous and Adaptive Systems},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/Explanation_for_Secure_Industrial_Control_System.pdf},
ABSTRACT = {Many self-adaptive systems benefit from human involvement and oversight, where a human operator can provide expertise not
available to the system and detect problems that the system is unaware of. One way of achieving this synergy is by placing the human
operator on the loop – i.e., providing supervisory oversight and intervening in the case of questionable adaptation decisions. To make
such interaction effective, an explanation can play an important role in allowing the human operator to understand why the system is
making certain decisions and improve the level of knowledge that the operator has about the system. This, in turn, may improve
the operator’s capability to intervene and if necessarily, override the decisions being made by the system. However, explanations
may incur costs, in terms of delay in actions and the possibility that a human may make a bad judgement. Hence, it is not always
obvious whether an explanation will improve overall utility and, if so, what kind of explanation should be provided to the operator. In
this work, we define a formal framework for reasoning about explanations of adaptive system behaviors and the conditions under
which they are warranted. Specifically, we characterize explanations in terms of explanation content, effect, and cost. We then present a
dynamic system adaptation approach that leverages a probabilistic reasoning technique to determine when an explanation should be
used in order to improve overall system utility. We evaluate our explanation framework in the context of a realistic industrial control
system with adaptive behaviors.},
NOTE = {https://dl.acm.org/doi/10.1145/3557898},
KEYWORDS = {Explainable Software, Formal Methods, Self-adaptation}
}
@InProceedings{2021:Li:HeyPreparatory,
AUTHOR = {Li, Nianyu and C\'{a}mara, Javier and Garlan, David and Schmerl, Bradley and Jin, Zhi},
TITLE = {Hey! Preparing Humans to do Tasks in Self-adaptive Systems},
YEAR = {2021},
MONTH = {18-21 May},
BOOKTITLE = {Proceedings of the 16th Symposium on Software Engineering for Adaptive and Self-Managing Systems (SEAMS 2021)},
ADDRESS = {Virtual},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/Preparatory_SEAMS.pdf},
ABSTRACT = {Many self-adaptive systems benefit from human involvement, where human operators can complement the capabilities of systems (e.g., by supervising decisions, or performing adaptations and tasks involving physical changes that cannot be automated). However, insufficient preparation (e.g., lack of task context comprehension) may hinder the effectiveness of human involvement, especially when operators are unexpectedly interrupted to perform a new task. Preparatory notification of a task provided in advance can sometimes help human operators focus their attention on the forthcoming task and understand its context before task execution, hence improving effectiveness. Nevertheless, deciding when to use preparatory notification as a tactic is not obvious and entails considering different factors that include uncertainties induced by human operator behavior (who might ignore the notice message), human attributes (e.g., operator training level), and other information that refers to the state of the system and its environment.
In this paper, informed by work in cognitive science on human attention and context management, we introduce a formal framework to reason about
the usage of preparatory notifications
in self-adaptive systems involving human operators. Our framework characterizes the effects of managing attention via task notification in terms of task context comprehension. We also build on our framework to develop an automated probabilistic reasoning technique able to determine when and in what form a preparatory notification tactic should be used to optimize system goals. We illustrate our approach in a representative scenario of human-robot collaborative goods delivery. },
NOTE = {Awarded Best Student Paper for SEAMS 2021},
KEYWORDS = {Explainable Software, Formal Methods, Robot Adaptation, Self-adaptation}
}