%
% GENERATED FROM http://acme.able.cs.cmu.edu
% by : anonymous
% IP : 216.73.216.246
% at : Sun, 22 Jun 2025 00:59:41 -0400 GMT
%
% Selection : Year = 2022
%
@Article{Adepu:ExpInd:2022,
AUTHOR = {Adepu, Sridhar and Li, Nianyu and Kang, Eunsuk and Garlan, David},
TITLE = {Modeling and Analysis of Explanation for Secure Industrial Control Systems},
YEAR = {2022},
MONTH = {July},
JOURNAL = {ACM Transactions on Autonomous and Adaptive Systems},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/Explanation_for_Secure_Industrial_Control_System.pdf},
ABSTRACT = {Many self-adaptive systems benefit from human involvement and oversight, where a human operator can provide expertise not
available to the system and detect problems that the system is unaware of. One way of achieving this synergy is by placing the human
operator on the loop – i.e., providing supervisory oversight and intervening in the case of questionable adaptation decisions. To make
such interaction effective, an explanation can play an important role in allowing the human operator to understand why the system is
making certain decisions and improve the level of knowledge that the operator has about the system. This, in turn, may improve
the operator’s capability to intervene and if necessarily, override the decisions being made by the system. However, explanations
may incur costs, in terms of delay in actions and the possibility that a human may make a bad judgement. Hence, it is not always
obvious whether an explanation will improve overall utility and, if so, what kind of explanation should be provided to the operator. In
this work, we define a formal framework for reasoning about explanations of adaptive system behaviors and the conditions under
which they are warranted. Specifically, we characterize explanations in terms of explanation content, effect, and cost. We then present a
dynamic system adaptation approach that leverages a probabilistic reasoning technique to determine when an explanation should be
used in order to improve overall system utility. We evaluate our explanation framework in the context of a realistic industrial control
system with adaptive behaviors.},
NOTE = {https://dl.acm.org/doi/10.1145/3557898},
KEYWORDS = {Explainable Software, Formal Methods, Self-adaptation}
}
@InProceedings{2022:ICSA:ROSDiscover,
AUTHOR = {Timperley, Christopher Steven and D\"{u}rschmid, Tobias and Schmerl, Bradley and Garlan, David and Le Goues, Claire},
TITLE = {ROSDiscover: Statically Detecting Run-Time Architecture Misconfigurations in Robotics Systems},
YEAR = {2022},
MONTH = {12-15 March},
BOOKTITLE = {Proceedings of the 19th International Conference on Software Architecture (ICSA 2022)},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/ROSDiscover_Statically_Detecting_Run-Time_Architecture_Misconfigurations_in_Robotics_Systems__Artifact_Paper.pdf},
ABSTRACT = {Robot systems are growing in importance and complexity. Ecosystems for robot software, such as the ROS, provide libraries of reusable software components that can be configured and composed into larger systems. To support compositionality, ROS uses late binding and architecture configuration via "launch files" that describe how to initialize the components in a system.
However, late binding often leads to systems failing silently due to misconfiguration, for example by misrouting or dropping messages entirely.
In this paper we present ROSDiscover, which statically recovers the run-time architecture of ROS systems to find such architecture misconfiguration bugs. First, ROSDiscover constructs component level architectural models (ports, parameters) from source code. Second, architecture configuration files are analyzed to compose the system from these component models and derive the connections in the system. Finally, the reconstructed architecture is checked against architectural rules described in first-order logic to identify potential misconfigurations.
We present an evaluation of ROSDiscover on real world, off-the-shelf robotic systems, measuring the accuracy, effectiveness, and practicality of our approach. To that end, we collected the first data set of architecture configuration bugs in ROS from popular open-source systems and measure how effective our approach is for detecting configuration bugs in that set.},
KEYWORDS = {Acme, Architectural Analysis, Publish Subscribe Systems, Software Architecture}
}
@Article{Wohlrab2021negotiation,
AUTHOR = {Wohlrab, Rebekka and Garlan, David},
TITLE = {A Negotiation Support System for Defining Utility Functions for Multi-Stakeholder Self-Adaptive Systems},
YEAR = {2022},
JOURNAL = {Requirements Engineering},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/2021_Wohlrab_utility_negotiation_accepted.pdf},
ABSTRACT = {For realistic self-adaptive systems, multiple quality attributes need to be
considered and traded off against each other. These quality attributes are commonly encoded in a utility function, for instance, a weighted sum of relevant
objectives. Utility functions are typically subject to a set of constraints, i.e.,
hard requirements that should not be violated by the system. The research
agenda for requirements engineering for self-adaptive systems has raised the
need for decision-making techniques that consider the trade-offs and priorities of multiple objectives. Human stakeholders need to be engaged in the
decision-making process so that constraints and the relative importance of
each objective can be correctly elicited. This paper presents a method that
supports multiple stakeholders in eliciting constraints, prioritizing relevant
quality attributes, negotiating priorities, and giving input to define utility
functions for self-adaptive systems. We developed tool support in the form
of a blackboard system that aggregates information by different stakeholders,
detects conflicts, proposes mechanisms to reach an agreement, and generates
a utility function. We performed a think-aloud study with 14 participants to
investigate negotiation processes and assess the approach’s understandability
and user satisfaction. Our study sheds light on how humans reason about and
how they negotiate around quality attributes. The mechanisms for conflict detection and resolution were perceived as very useful. Overall, our approach was
found to make the process of utility function definition more understandable
and transparent.},
NOTE = {https://doi.org/10.1007/s00766-021-00368-y},
KEYWORDS = {Explainable Software, Self-adaptation}
}
@InProceedings{ICSA:Diaz:2022,
AUTHOR = {Diaz-Pace, Andres and Garlan, David},
TITLE = {Making Architecture Optimization Transparent with Tactic-Based Explanations},
YEAR = {2022},
MONTH = {12-15 March},
BOOKTITLE = {Proceedings of the 19th International Conference on Software Architecture (ICSA 2022)},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/Making Architecture Optimizat….pdf},
ABSTRACT = {Over the past decade, a number of automated techniques
and tools have been developed for optimizing architectural
designs with respect to quality-attribute goals. In these systems,
the optimization process is typically seen as a black box, since it
is not possible for a human to have access to the decisions that led
to a particular solution generated by an optimization tool. Even
when these decisions are available for inspection, the amount of
information can be overwhelming for the architect. As a result,
humans might not completely understand the rationale behind
a given solution or trust that a tool made correct decisions. To
mitigate this problem, we propose a semi-automated approach
for generating textual explanations for any architectural solution
produced by a tool. This kind of explanation provides a summary
of the key architectural tactics that were applied to achieve an
optimized architecture that satisfies a set of quality-attribute objectives.
In this paper, we discuss two procedures for determining
the key tactics to be explained. As an initial experiment, we used a
popular optimization tool to generate solutions and explanations
for a small but non-trivial design space involving performance,
reliability, and cost objectives. We also performed an exploratory
user study to assess the effectiveness of these explanations.},
KEYWORDS = {Explainable Software, Software Architecture}
}
@InProceedings{2022:Wohlrab:SEAMS,
AUTHOR = {Wohlrab, Rebekka and Meira-G\'{o}es, R\^{o}mulo and Vierhauser, Michael},
TITLE = {Run-Time Adaptation of Quality Attributes for Automated Planning},
YEAR = {2022},
MONTH = {18-24 May},
BOOKTITLE = {Proceedings of the 17th Symposium on Software Engineering for Adaptive and Self-Managing Systems (SEAMS’22)},
ADDRESS = {Pittsburgh, USA and Virtual},
ABSTRACT = {Self-adaptive systems typically operate in heterogeneous environments and need to optimize their behavior based on a variety of
quality attributes to meet stakeholders’ needs. During adaptation
planning, these quality attributes are considered in the form of constraints, describing requirements that must be fulfilled, and utility
functions, which are used to select an optimal plan among several
alternatives. Up until now, most automated planning approaches are
not designed to adapt quality attributes, their priorities, and their
trade-offs at run time. Instead, both utility functions and constraints
are commonly defined at design time. There exists a clear lack of
run-time mechanisms that support their adaptation in response
to changes in the environment or in stakeholders’ preferences. In
this paper, we present initial work that combines automated planning and adaptation of quality attributes to address this gap. The
approach helps to semi-automatically adjust utility functions and
constraints based on changes at run time. We present a preliminary experimental evaluation that indicates that our approach can
provide plans with higher utility values while fulfilling changed
or added constraints. We conclude this paper with our envisioned
research outlook and plans for future empirical studies.},
KEYWORDS = {Self-adaptation}
}
@PhdThesis{2022:Sukkerd:Thesis,
AUTHOR = {Sukkerd, Roykrong},
TITLE = {Improving Transparency and Intelligibility of Multi-Objective Probabilistic Planning},
YEAR = {2022},
NUMBER = {CMU-ISR-22-104},
SCHOOL = {Institute for Software Research, School of Computer Science},
ABSTRACT = {Sequential decision-making problems with multiple objectives are natural to many application domains of AI-enabled systems. As these systems are increasingly used to work with people or to make decisions that impact people, it is important that their reasoning is intelligible to the end-users and stakeholders, to foster trust and effective human-agent collaborations. However, understanding the reasoning behind solving sequential decision problems is difficult for end-users even when white-box decision models such as Markov decision processes (MDPs) are used. Such intelligibility challenge is due to the combinatorial explosion of possible strategies for solving long-horizon problems. The multi-objective optimization aspect further complicates the problem as different objectives may conflict and reasoning about tradeoffs is required. These complexities pose a barrier for end-users to know whether the agent has made the right decisions for a given context, and may prohibit them from intervening if the agent was wrong. The goal of this thesis is to develop an explainability framework that enables the agent making sequential decisions to communicate its goals and rationale for its behavior to the end-users.
We present an explainable planning framework for MDP, particularly to support problem domains with multiple optimization objectives. We propose consequence-oriented contrastive explanations, in which an argument for an agent's policy is in terms of its expected consequences on the task objectives, put in context of the selected viable alternatives to demonstrate the optimization and tradeoff reasoning of the agent. Our modeling framework supports reward decomposition, and augments MDP representation to ground the components of the reward or cost function in the domain-level concepts and semantics, to facilitate explanation generation. Our explanation generation method computes policy-level contrastive foils that describe the inflection points in the agent's decision making in terms of optimization and trade-off reasoning of the decomposed task objectives. We demonstrate the applicability of our explainable planning framework by applying it to three planning problem domains: waypoint-based navigation, UAV mission planning, and clinic scheduling.
We design and conduct a human subjects experiment to evaluate the effectiveness of explanations based on measurable task performance. We design the users' task in the experiment to be: assessing the agent's planning decisions to determine whether they are the best decisions for a given problem context. Our experimental results show that our proposed consequence-oriented contrastive explanation approach significantly improves the users' ability to correctly assess the agent's planning decisions, as well as the users' confidence in their assessment.
Lastly, we investigate the feasibility of a user-guided approach to our consequence oriented contrastive explanation paradigm. We propose a theoretical framework and approaches to formulate Why Not behavioral questions as state-action constraints and linear temporal logic constraints on the planning problem, and to solve for satisfying policies in order to explain the full impact that the queried behavior has on the subsequent decisions and on the task objectives. },
KEYWORDS = {Explainable Software, Planning, Self-adaptation}
}
@Article{2022:SoSym:UIP,
AUTHOR = {C\'{a}mara, Javier and Troya, Javier and Vallecillo, Antonio and Bencomo, Nelly and Calinescu, Radu and Cheng, Betty H.C. and Garlan, David and Schmerl, Bradley},
TITLE = {The uncertainty interaction problem in self-adaptive systems},
YEAR = {2022},
MONTH = {August},
JOURNAL = {Software System and Modelling},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/Sosym2022.pdf},
ABSTRACT = {The problem of mitigating uncertainty in self-adaptation has driven much of the research proposed in the area of software
engineering for self-adaptive systems in the last decade. Although many solutions have already been proposed, most of
them tend to tackle specific types, sources, and dimensions of uncertainty (e.g., in goals, resources, adaptation functions)
in isolation. A special concern are the aspects associated with uncertainty modeling in an integrated fashion. Different
uncertainties are rarely independent and often compound, affecting the satisfaction of goals and other system properties in
subtle and often unpredictable ways. Hence, there is still limited understanding about the specific ways in which uncertainties
from various sources interact and ultimately affect the properties of self-adaptive, software-intensive systems. In this SoSym
expert voice, we introduce the Uncertainty Interaction Problem as a way to better qualify the scope of the challenges with
respect to representing different types of uncertainty while capturing their interaction in models employed to reason about
self-adaptation. We contribute a characterization of the problem and discuss its relevance in the context of case studies taken
from two representative application domains. We posit that the Uncertainty Interaction Problem should drive future research
in software engineering for autonomous and self-adaptive systems, and therefore, contribute to evolving uncertainty modeling
towards holistic approaches that would enable the construction of more resilient self-adaptive systems.},
NOTE = {Expert Voice Paper (https://doi.org/10.1007/s10270-022-01037-6)},
KEYWORDS = {Self-adaptation, uncertainty}
}
@InProceedings{Casimiro:ACSOS:2022,
AUTHOR = {Casimiro, Maria and Romano, Paolo and Garlan, David and Rodrigues, Luis},
TITLE = {Towards a Framework for Adapting Machine Learning Components},
YEAR = {2022},
BOOKTITLE = {2022 IEEE International Conference on Autonomic Computing and Self-Organizing Systems (ACSOS)},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/ACSOS2022.pdf},
ABSTRACT = {Machine Learning (ML) models are now commonly
used as components in systems. As any other component, ML
components can produce erroneous outputs that may penalize
system utility. In this context, self-adaptive systems emerge as
a natural approach to cope with ML mispredictions, through
the execution of adaptation tactics such as model retraining. To
synthesize an adaptation strategy, the self-adaptation manager
needs to reason about the cost-benefit tradeoffs of the applicable
tactics, which is a non-trivial task for tactics such as model
retraining, whose benefits are both context- and data-dependent.
To address this challenge, this paper proposes a probabilistic
modeling framework that supports automated reasoning about
the cost/benefit tradeoffs associated with improving ML components of ML-based systems. The key idea of the proposed approach is to decouple the problems of (i) estimating the expected
performance improvement after retrain and (ii) estimating the
impact of ML improved predictions on overall system utility.
We demonstrate the application of the proposed framework by
using it to self-adapt a state-of-the-art ML-based fraud-detection
system, which we evaluate using a publicly-available, real fraud
detection dataset. We show that by predicting system utility stemming from retraining a ML component, the probabilistic model
checker can generate adaptation strategies that are significantly
closer to the optimal, as compared against baselines such as
periodic retraining, or reactive retraining.},
NOTE = {Presentation Video},
KEYWORDS = {Machine Learning, Self-adaptation}
}
@Article{2022:JSS:ExpSAS,
AUTHOR = {Wohlrab, Rebekka and C\'{a}mara, Javier and Garlan, David and Schmerl, Bradley},
TITLE = {Explaining quality attribute tradeoffs in automated planning for self-adaptive systems},
YEAR = {2022},
MONTH = {October},
JOURNAL = {The Journal of Systems and Software},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/xplanqasas.pdf},
ABSTRACT = {Self-adaptive systems commonly operate in heterogeneous contexts and need to consider multiple quality attributes. Human stakeholders often express their quality preferences by defining utility func- tions, which are used by self-adaptive systems to automatically generate adaptation plans. However, the adaptation space of realistic systems is large and it is obscure how utility functions impact the generated adaptation behavior, as well as structural, behavioral, and quality constraints. Moreover, human stakeholders are often not aware of the underlying tradeoffs between quality attributes. To address this issue, we present an approach that uses machine learning techniques (dimensionality reduction, clustering, and decision tree learning) to explain the reasoning behind automated planning. Our approach focuses on the tradeoffs between quality attributes and how the choice of weights in utility functions results in different plans being generated. We help humans understand quality attribute tradeoffs, identify key decisions in adaptation behavior, and explore how differences in utility functions result in different adaptation alternatives. We present two systems to demonstrate the approach’s applicability and consider its potential application to 24 exemplar self-adaptive systems. Moreover, we describe our assessment of the tradeoff between the information reduction and the amount of explained variance retained by the results obtained with our approach.},
KEYWORDS = {Explainable Software, Planning, Self-adaptation}
}
@Article{2022:JSS:Extra,
AUTHOR = {C\'{a}mara, Javier and Wohlrab, Rebekka and Garlan, David and Schmerl, Bradley},
TITLE = {ExTrA: Explaining architectural design tradeoff spaces via dimensionality reduction.},
YEAR = {2022},
MONTH = {December},
JOURNAL = {Journal of Systems and Software},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/ExTrA__Explaining_Architectural_Design_Tradeoff_Spaces_via_Dimensionality_Reduction.pdf},
ABSTRACT = {In software design, guaranteeing the correctness of run-time system behavior while achieving an acceptable balance among multiple quality attributes remains a challenging problem. Moreover, providing guarantees about the satisfaction of those requirements when systems are subject to uncertain environments is even more challenging. While recent developments in architectural analysis techniques can assist architects in exploring the satisfaction of quantitative guarantees across the design space, existing approaches are still limited because they do not explicitly link design decisions to satisfaction of quality requirements. Furthermore, the amount of information they yield can be overwhelming to a human designer, making it difficult to see the forest for the trees. In this paper we present ExTrA (Explaining Tradeoffs of software Architecture design spaces), an approach to analyzing architectural design spaces that addresses these limitations and provides a basis for explaining design tradeoffs. Our approach employs dimensionality reduction techniques employed in machine learning pipelines like Principal Component Analysis (PCA) and Decision Tree Learning (DTL) to enable architects to understand how design decisions contribute to the satisfaction of extra-functional properties across the design space. Our results show feasibility of the approach in two case studies and evidence that combining complementary techniques like PCA and DTL is a viable approach to facilitate comprehension of tradeoffs in poorly-understood design spaces.},
NOTE = {https://doi.org/10.1016/j.jss.2022.111578},
KEYWORDS = {Architectural Analysis, Explainable Software}
}
@PhdThesis{Dwivedi:Thesis:2022,
AUTHOR = {Dwivedi, Vishal},
TITLE = {Halo: A Framework for End-User Architecting},
YEAR = {2022},
NUMBER = {CMU-S3D-22-110},
SCHOOL = {Software and Societal Systems Department, School of Computer Science},
KEYWORDS = {End-user Architecture}
}