%
% GENERATED FROM http://acme.able.cs.cmu.edu
% by : anonymous
% IP : 216.73.216.171
% at : Wed, 02 Jul 2025 16:56:15 -0400 GMT
%
% Selection : Author: Eunsuk_Kang
%
@InProceedings{ToD:ICSE:2019,
AUTHOR = {D\"{u}rschmid, Tobias and Kang, Eunsuk and Garlan, David},
TITLE = {Trade-off-oriented Development: Making Quality Attribute Trade-offs First-class},
YEAR = {2019},
MONTH = {May},
BOOKTITLE = {Proceedings of the 41st International Conference on Software Engineering: New Ideas and Emerging Results},
ADDRESS = {Montreal, CA},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/TradeoffOrientedDevelopmentToD_ICSE_2019.pdf},
ABSTRACT = {Implementing a solution for a design decision that
precisely satisfies the trade-off between quality attributes can be
extremely challenging. Further, typically quality attribute tradeoffs
are not represented as first-class entities in development
artifacts. Hence, decisions might be suboptimal and lack requirements
traceability as well as changeability. We propose Tradeoff-
oriented Development (ToD), a new concept to automate
the selection and integration of reusable implementations for
a given design decision based on quality attribute trade-offs.
Implementations that vary in quality attributes and that solve
reoccurring design decisions are collected in a design decision
library. Developers declaratively specify the quality attribute
trade-off, which is then used to automatically select the best
fitting implementation. We argue that thereby, software could
satisfy the trade-offs more precisely, requirements are traceable
and changeable, and advances in implementations automatically
improve existing software.},
KEYWORDS = {Software Architecture}
}
@InProceedings{Li:2020:SEAMS-Expl,
AUTHOR = {Li, Nianyu and Adepu, Sridhar and Kang, Eunsuk and Garlan, David},
TITLE = {Explanations for Human-on-the-loop: A Probabilistic Model Checking Approach},
YEAR = {2020},
MONTH = {29 June - 3 July},
BOOKTITLE = {Proceedings of the 15th International Symposium on Software Engineering for Adaptive and Self-managing Systems (SEAMS)},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/SEAMS_CameraReady-8.pdf},
ABSTRACT = {Many self-adaptive systems benefit from human involvement and
oversight, where a human operator can provide expertise not available
to the system and can detect problems that the system is
unaware of. One way of achieving this is by placing the human
operator on the loop – i.e., providing supervisory oversight and
intervening in the case of questionable adaptation decisions. To
make such interaction effective, explanation is sometimes helpful to
allow the human to understand why the system is making certain
decisions and calibrate confidence from the human perspective.
However, explanations come with costs in terms of delayed actions
and the possibility that a human may make a bad judgement. Hence,
it is not always obvious whether explanations will improve overall
utility and, if so, what kinds of explanation to provide to the operator.
In this work, we define a formal framework for reasoning
about explanations of adaptive system behaviors and the conditions
under which they are warranted. Specifically, we characterize
explanations in terms of explanation content, effect, and cost. We
then present a dynamic adaptation approach that leverages a probabilistic
reasoning technique to determine when the explanation
should be used in order to improve overall system utility.},
NOTE = {Talk},
KEYWORDS = {Explainable Software, Model Checking, Self-adaptation}
}
@InProceedings{2021:FASE:Li,
AUTHOR = {Li, Nianyu and Zhang, Mingyue and Kang, Eunsuk and Garlan, David},
TITLE = {Engineering Secure Self-adaptive Systems with Bayesian Games},
YEAR = {2021},
MONTH = {27 March - 1 April},
BOOKTITLE = {Proceedings of the 24th International Conference on Fundamental Approaches to Software Engineering},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/FASE2021.pdf},
ABSTRACT = {Security attacks present unique challenges to self-adaptive
system design due to the adversarial nature of the environment. Game
theory approaches have been explored in security to model malicious
behaviors and design reliable defense for the system in a mathematically
grounded manner. However, modeling the system as a single player, as
done in prior works, is insufficient for the system under partial compromise
and for the design of fine-grained defensive strategies where the rest of the
system with autonomy can cooperate to mitigate the impact of attacks.
To deal with such issues, we propose a new self-adaptive framework incorporating
Bayesian game theory and model the defender (i.e., the system)
at the granularity of components. Under security attacks, the architecture
model of the system is translated into a Bayesian multi-player game,
where each component is explicitly modeled as an independent player
while security attacks are encoded as variant types for the components.
The optimal defensive strategy for the system is dynamically computed
by solving the pure equilibrium (i.e., adaptation response) to achieve
the best possible system utility, improving the resiliency of the system
against security attacks. We illustrate our approach using an example
involving load balancing and a case study on inter-domain routing.},
KEYWORDS = {Formal Methods, Science of Security, Self-adaptation}
}
@Article{Adepu:ExpInd:2022,
AUTHOR = {Adepu, Sridhar and Li, Nianyu and Kang, Eunsuk and Garlan, David},
TITLE = {Modeling and Analysis of Explanation for Secure Industrial Control Systems},
YEAR = {2022},
MONTH = {July},
JOURNAL = {ACM Transactions on Autonomous and Adaptive Systems},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/Explanation_for_Secure_Industrial_Control_System.pdf},
ABSTRACT = {Many self-adaptive systems benefit from human involvement and oversight, where a human operator can provide expertise not
available to the system and detect problems that the system is unaware of. One way of achieving this synergy is by placing the human
operator on the loop – i.e., providing supervisory oversight and intervening in the case of questionable adaptation decisions. To make
such interaction effective, an explanation can play an important role in allowing the human operator to understand why the system is
making certain decisions and improve the level of knowledge that the operator has about the system. This, in turn, may improve
the operator’s capability to intervene and if necessarily, override the decisions being made by the system. However, explanations
may incur costs, in terms of delay in actions and the possibility that a human may make a bad judgement. Hence, it is not always
obvious whether an explanation will improve overall utility and, if so, what kind of explanation should be provided to the operator. In
this work, we define a formal framework for reasoning about explanations of adaptive system behaviors and the conditions under
which they are warranted. Specifically, we characterize explanations in terms of explanation content, effect, and cost. We then present a
dynamic system adaptation approach that leverages a probabilistic reasoning technique to determine when an explanation should be
used in order to improve overall system utility. We evaluate our explanation framework in the context of a realistic industrial control
system with adaptive behaviors.},
NOTE = {https://dl.acm.org/doi/10.1145/3557898},
KEYWORDS = {Explainable Software, Formal Methods, Self-adaptation}
}
@InProceedings{2020:ASE:CPS,
AUTHOR = {Gafford, Benjamin and D\"{u}rschmid, Tobias and Moreno, Gabriel A. and Kang, Eunsuk},
TITLE = {Synthesis-Based Resolution of Feature Interactions in Cyber-Physical Systems},
YEAR = {2020},
MONTH = {21-25 September},
BOOKTITLE = {Proceedings of the IEEE/ACM International Conference on Automated Software Engineering (ASE)},
ADDRESS = {Virtual},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/ase20-feature-interactions.pdf},
ABSTRACT = {The feature interaction problem arises when two or more independent features interact with each other in an undesirable manner.Feature interactions remain a challenging and important problem in emerging domains of cyber-physical systems (CPS), such as intelligent vehicles, unmanned aerial vehicles (UAVs) and the Internet of Things (IoT), where the outcome of an unexpected interaction may result in a safety failure. Existing approaches to resolving feature interactions rely on priority lists or fixed strategies, but may not be effective in scenarios where none of the competing feature actions are satisfactory with respect to system requirements. This paper proposes a novel synthesis-based approach to resolution, where a conflict among features is resolved by synthesizingan action that best satisfies the specification of desirable system behaviors in the given environmental context. Unlike existing resolution methods,our approach is capable of producing a desirable system outcome even when none of the conflicting actions are satisfactory. The effectiveness of the proposed approach is demonstrated using a case study involving interactions among safety-critical features in an autonomous drone.},
KEYWORDS = {Cyberphysical Systems, Formal Methods}
}
@InProceedings{2020:FSE:Robustness,
AUTHOR = {Zhang, Changjian and Garlan, David and Kang, Eunsuk},
TITLE = {A Behavioral Notion of Robustness for Software Systems},
YEAR = {2020},
MONTH = {6-16 November},
BOOKTITLE = {Proceedings of the 2020 ACM Joint European Software Engineering Conference and Symposium on the Foundations of Software Engineering (ESEC/FSE)},
ADDRESS = {Virtual},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/fse20-robustness.pdf},
ABSTRACT = {Software systems are designed and implemented with assumptions about the environment. However, once the system is deployed,the actual environment may deviate from its expected behavior,possibly undermining desired properties of the system. To enable systematic design of systems that are robust against potential environmental deviations, we propose a rigorous notion of robustness for software systems. In particular, the robustness of a system is de-fined as the largest set of deviating environmental behaviors under which the system is capable of guaranteeing a desired property. We describe a new set of design analysis problems based on our notion of robustness, and a technique for automatically computing robustness of a system given its behavior description. We demonstrate potential applications of our robustness notion on two case studies involving network protocols and safety-critical interfaces.},
KEYWORDS = {Architectural Analysis, Explainable Software, Model Checking}
}
@InProceedings{2021:FSE:Chang,
AUTHOR = {Zhang, Changjian and Wagner, Ryan and Orvalho, Pedro and Garlan, David and Manquinho, Vasco and Martins, Ruben and Kang, Eunsuk},
TITLE = {AlloyMax: Bringing Maximum Satisfaction to Relational Specifications.},
YEAR = {2021},
MONTH = {23-28 August},
BOOKTITLE = {The ACM Joint European Software Engineering Conference and Symposium on the Foundations of Software Engineering (ESEC/FSE) 2021},
ADDRESS = {Virtual},
ABSTRACT = {Alloy is a declarative modeling language based on a first-order relational logic. Its constraint-based analysis has enabled a wide range of applications in software engineering, including configuration synthesis, bug finding, test-case generation, and security analysis.
Certain types of analysis tasks in these domains involve finding an optimal solution. For example, in a network configuration problem, instead of finding any valid configuration, it may be desirable to find one that is most permissive (i.e., it permits a maximum number of packets). Due to its dependence on SAT, however, Alloy cannot be used to specify and analyze these types of problems.
We propose AlloyMax, an extension of Alloy with a capability
to express and analyze problems with optimal solutions. AlloyMax introduces (1) a small addition of language constructs that can be used to specify a wide range of problems that involve optimality and (2) a new analysis engine that leverages a Maximum Satisfiability (MaxSAT) solver to generate optimal solutions. To enable this
new type of analysis, we show how a specification in a first-order relational logic can be translated into an input format of MaxSAT solvers—namely, a Boolean formula in weighted conjunctive normal form (WCNF). We demonstrate the applicability and scalability of AlloyMax on a benchmark of problems. To our knowledge, AlloyMax is the first approach to enable analysis with optimality in a relational modeling language, and we believe that AlloyMax has the potential to bring a wide range of new applications to Alloy.},
NOTE = {Distinguished Paper Award},
KEYWORDS = {Formal Methods, Planning}
}
@InProceedings{SAML21,
AUTHOR = {Casimiro, Maria and Romano, Paolo and Garlan, David and Moreno, Gabriel A. and Kang, Eunsuk and Klein, Mark},
TITLE = {Self-Adaptation for Machine Learning Based Systems},
YEAR = {2021},
MONTH = {14 September},
BOOKTITLE = {Proceedings of the 1st International Workshop on Software Architecture and Machine Learning (SAML)},
SERIES = {LNCS},
ADDRESS = {Virtual, (Originally V\"axjö, Sweden)},
PUBLISHER = {Springer},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/SAML2021-paper6SAML21.pdf},
ABSTRACT = {Today’s world is witnessing a shift from human-written software to machine-learned software, with the rise of systems
that rely on machine learning. These systems typically operate in non-static environments, which are prone to unexpected
changes, as is the case of self-driving cars and enterprise systems. In this context, machine-learned software can misbehave.
Thus, it is paramount that these systems are capable of detecting problems with their machined-learned components and
adapt themselves to maintain desired qualities. For instance, a fraud detection system that cannot adapt its machine-learned
model to efficiently cope with emerging fraud patterns or changes in the volume of transactions is subject to losses of millions
of dollars. In this paper, we take a first step towards the development of a framework aimed to self-adapt systems that rely
on machine-learned components. We describe: (i) a set of causes of machine-learned component misbehavior and a set of
adaptation tactics inspired by the literature on machine learning, motivating them with the aid of a running example; (ii) the
required changes to the MAPE-K loop, a popular control loop for self-adaptive systems; and (iii) the challenges associated
with developing this framework. We conclude the paper with a set of research questions to guide future work.},
KEYWORDS = {Machine Learning, Self-adaptation}
}
@InProceedings{Zhang:ICSE:2023,
AUTHOR = {Zhang, Changjian and Saluja, Taranj and Meira-G\'{o}es, R\^{o}mulo and Bolton, Matthew and Garlan, David and Kang, Eunsuk},
TITLE = {Robustification of Behavioral Designs against Environmental Deviations},
YEAR = {2023},
MONTH = {14-20 May},
BOOKTITLE = {Proceedings of the 45th International Conference on Software Engineering},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/Robustification___ICSE_23_Camera_Ready.pdf},
ABSTRACT = {Modern software systems are deployed in a highly dynamic, uncertain environment. Ideally, a system that is robust should be capable of establishing its most critical requirements even in the presence of possible deviations in the environment. We propose a technique called behavioral robustification, which involves systematically and rigorously improving the robustness of a design against potential deviations. Given behavioral models of a system and its environment, along with a set of user-specified deviations, our robustification method produces a redesign that is capable of satisfying a desired property even when the environment exhibits those deviations. In particular, we describe how the robustification problem can be formulated as a multi- objective optimization problem, where the goal is to restrict the deviating environment from causing a violation of a desired property, while maximizing the amount of existing functionality and minimizing the cost of changes to the original design. We demonstrate the effectiveness of our approach on case studies involving the robustness of an electronic voting machine and safety-critical interfaces.},
KEYWORDS = {Formal Methods, Resilience, Resource prediction}
}
@InProceedings{Chu:SEAMS:2023,
AUTHOR = {Chu, Simon and Shedden, Emma and Zhang, Changjian and Meira-G\'{o}es, R\^{o}mulo and Moreno, Gabriel A. and Garlan, David and Kang, Eunsuk},
TITLE = {Runtime Resolution of Feature Interactions through Adaptive Requirement Weakening.},
YEAR = {2023},
MONTH = {15-16 May},
BOOKTITLE = {Proceedings of the 18th International Symposium on Software Engineering for Adaptive and Self-Managing Systems},
ABSTRACT = {The feature interaction problem occurs when two or
more independently developed components interact with each
other in unanticipated ways, resulting in undesirable system
behaviors. Feature interaction problems remain a challenge for
emerging domains in cyber-physical systems (CPS), such as the
Internet of Things and autonomous drones. Existing techniques
for resolving feature interactions take a “winner-takes-all” approach, where one out of the conflicting features is selected
as the most desirable one, and the rest are disabled. However,
when multiple of the conflicting features fulfill important system
requirements, being forced to select one of them can result in
an undesirable system outcome. In this paper, we propose a new
resolution approach that allows all of the conflicting features
to continue to partially fulfill their requirements during the
resolution process. In particular, our approach leverages the
idea of adaptive requirement weakening, which involves one or
more features temporarily weakening their level of performance
in order to co-exist with the other features in a consistent
manner. Given feature requirements specified in Signal Temporal
Logic (STL), we propose an automated method and a runtime
architecture for automatically weakening the requirements to
resolve a conflict. We demonstrate our approach through case
studies on feature interactions in autonomous drones.},
KEYWORDS = {Formal Methods, Self-adaptation}
}
@InProceedings{FEMCAD:2023,
AUTHOR = {Zhang, Changjian and Dardik, Ian and Meira-G\'{o}es, R\^{o}mulo and Garlan, David and Kang, Eunsuk},
TITLE = {Fortis: A Tool for Analysis and Repair of Robust Software Systems},
YEAR = {2023},
MONTH = {23-27 October},
BOOKTITLE = {Proc. Formal Methods in Computer-Aided Design 2023},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/fortis-fmcad.pdf},
ABSTRACT = {This paper presents Fortis, a tool for automated, formal analysis and repair of robust discrete systems. Given a system model, an environment model, and a safety property, the tool can be used to automatically compute robustness as the amount of deviations in the environment under which the system can continue to guarantee the property. In addition, Fortis enables automated repair of a given system to improve its robustness against a set of intolerable deviations through a process called robustification. With these techniques, Fortis enables a new process for developing robust-by-design systems. The paper presents the overall design of Fortis as well as the key details behind the robustness analysis and robustification techniques. The applicability and performance of Fortis are illustrated through experimental results over a set of case study systems, including a radiation therapy system, an electronic voting machine, network protocols, and a transportation fare system.},
KEYWORDS = {Formal Methods}
}
@InProceedings{Chu:SEAMS:2024,
AUTHOR = {Chu, Simon and Koe, Justin and Garlan, David and Kang, Eunsuk},
TITLE = {Integrating Graceful Degradation and Recovery through Requirement-driven Adaptation},
YEAR = {2024},
MONTH = {15-16 April},
BOOKTITLE = {Proc. the International Conference on Software Engineering for Adaptive and Self-managing Systems (SEAMS)},
ABSTRACT = {Cyber-physical systems (CPS) are subject to environmental uncer- tainties such as adverse operating conditions, malicious attacks, and hardware degradation. These uncertainties may lead to failures that put the system in a sub-optimal or unsafe state. Systems that are resilient to such uncertainties rely on two types of operations: (1) graceful degradation, for ensuring that the system maintains an acceptable level of safety during unexpected environmental condi- tions and (2) recovery, to facilitate the resumption of normal system functions. Typically, mechanisms for degradation and recovery are developed independently from each other, and later integrated into a system, requiring the designer to develop an additional, ad-hoc logic for activating and coordinating between the two operations.
In this paper, we propose a self-adaptation approach for improv- ing system resiliency through automated triggering and coordina- tion of graceful degradation and recovery. The key idea behind our approach is to treat degradation and recovery as requirement-driven adaptation tasks: Degradation can be thought of as temporarily weakening original (i.e., ideal) system requirements to be achieved by the system, and recovery as strengthening the weakened require- ments when the environment returns within an expected operating boundary. Furthermore, by treating weakening and strengthen- ing as dual operations, we argue that a single requirement-based adaptation method is sufficient to enable coordination between degradation and recovery. Given system requirements specified in signal temporal logic (STL), we propose a run-time adaptation framework that performs degradation and recovery in response to environmental changes. We describe a prototype implementation of our framework and demonstrate the feasibility of the proposed approach using a case study in unmanned underwater vehicles.},
KEYWORDS = {Self-adaptation}
}
@InProceedings{Zhang:FM24,
AUTHOR = {Zhang, Changjian and Kapoor, Parv and Meira Goes, Romulo and Garlan, David and Kang, Eunsuk and Ganlath, Akila and Mishra, Shatadal and Ammar, Nejib},
TITLE = {Tolerance of Reinforcement Learning Controllers against Deviations in Cyber Physical Systems},
YEAR = {2024},
MONTH = {11-13 September},
BOOKTITLE = {26th International Symposium on Formal Methods (FM24)},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/FM 2024.pdf},
ABSTRACT = {Cyber-physical systems (CPS) with reinforcement learning (RL)-based controllers are increasingly being deployed in complex phys- ical environments such as autonomous vehicles, the Internet-of-Things (IoT), and smart cities. An important property of a CPS is tolerance; i.e., its ability to function safely under possible disturbances and un- certainties in the actual operation. In this paper, we introduce a new, expressive notion of tolerance that describes how well a controller is ca- pable of satisfying a desired system requirement, specified using Signal Temporal Logic (STL), under possible deviations in the system. Based on this definition, we propose a novel analysis problem, called the tol- erance falsification problem, which involves finding small deviations that result in a violation of the given requirement. We present a novel, two- layer simulation-based analysis framework and a novel search heuristic for finding small tolerance violations. To evaluate our approach, we con- struct a set of benchmark problems where system parameters can be configured to represent different types of uncertainties and disturbances in the system. Our evaluation shows that our falsification approach and heuristic can effectively find small tolerance violations.
},
KEYWORDS = {Cyberphysical Systems, Formal Methods, Machine Learning}
}