%
% GENERATED FROM http://acme.able.cs.cmu.edu
% by : anonymous
% IP : ec2-18-226-52-49.us-east-2.compute.amazonaws.com
% at : Sat, 20 Jul 2024 13:26:41 -0400 GMT
%
% Selection : Year = 2017
%
@TechReport{CMU-ISR-17-119,
AUTHOR = {Glazier, Thomas J. and Schmerl, Bradley and C\'{a}mara, Javier and Garlan, David},
TITLE = {Utility Theory for Self-Adaptive Systems},
YEAR = {2017},
MONTH = {December},
NUMBER = {CMU-ISR-17-119},
INSTITUTION = {Carnegie Mellon University Institute for Software Research},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/CMU-ISR-17-119.pdf},
ABSTRACT = {Self-adaptive systems choose adaptations to perform on a running system. Typically, the self-adaptive system must choose the "best" adaptation to perform in a given circumstance from a set of adaptations that may apply. Utility functions are typically used to encode some measure of goodness or badness the result of choosing a particular adaptation. Within the area of utility theory, there are a set of theories that could be used to help choose the best adaptation, that vary in the assumptions and requirements made about the system, the environment, and the business context of use. By understanding some of the formalities and advanced concepts in Utility Theory, engineers and administrators of self-adaptive systems can create more effective utility functions for their purposes, enable new forms of analysis, and potentially move into new frontiers in self-adaptive systems. In this report, we survey some of the more interesting topics in Utility Theory relevant to self-adaptive systems include objective and subjective expected utility, stochastic and fuzzy utility, and changing and state dependent utility functions.},
NOTE = {http://reports-archive.adm.cs.cmu.edu/anon/isr2017/abstracts/17-119.html},
KEYWORDS = {Self-adaptation}
}
@InBook{Camara/2016/SMGs,
AUTHOR = {C\'{a}mara, Javier and Garlan, David and Moreno, Gabriel A. and Schmerl, Bradley},
TITLE = {Analyzing Self-Adaptation via Model Checking of Stochastic Games},
YEAR = {2017},
BOOKTITLE = {Software Engineering for Self-Adaptive Systems III. Assurances},
NUMBER = {9640},
EDITOR = {de Lemos, Rog\'{e}rio and Garlan, David and Ghezzi, Carlo and Giese, Holger},
PUBLISHER = {Springer},
TYPE = {Lecture Notes in Computer Science},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/SEfSAS3-SMGs.pdf},
ABSTRACT = {Design decisions made during early development stages of self-adaptive systems tend to have a significant impact upon system properties at runtime
(e.g., safety, QoS). However, understanding the implications of these decisions
a priori is difficult due to the different types and degrees of uncertainty that affect such systems (e.g., simplifying assumptions, human-in-the-loop). To provide
some assurances about self-adaptive system designs, evidence can be gathered
from activities such as simulations and prototyping, but these demand a significant effort and do not provide a systematic way of dealing with uncertainty. In this chapter, we describe an approach based on model checking of stochastic multiplayer games (SMGs) that enables developers to approximate the behavioral envelope of a self-adaptive system by analyzing best- and worst-case scenarios of alternative designs for self-adaptation mechanisms. Compared to other sources of evidence, such as simulations or prototypes, our approach provides developers with a preliminary understanding of adaptation behavior with less effort, and without the need to have any specific adaptation algorithms or infrastructure in place. We illustrate our approach by showing how it can be used to mitigate different types of uncertainty in contexts such as self-protecting systems, proactive
latency-aware adaptation, and human-in-the-loop.},
KEYWORDS = {Assurance, Formal Methods, Human-in-the-loop, Model Checking, Science of Security, Self-adaptation, Stochastic Games}
}
@InBook{2017/Weyns/PA,
AUTHOR = {Weyns, Danny and Bencomo, Nelly and Calinescu, Radu and C\'{a}mara, Javier and Ghezzi, Carlo and Grassi, Vincenzo and Grunske, Larse and Inverardi, Paola and Jezequel, Jean-Marc and Malek, Sam and Mirandola, Raffaela and Mori, Marco and Tambrrellii, Giordano},
TITLE = {Perpetual Assurances for Self-Adaptive Systems},
YEAR = {2017},
BOOKTITLE = {Software Engineering for Self-Adaptive Systems III. Assurances},
NUMBER = {9640},
EDITOR = {de Lemos, Rog\'{e}rio and Garlan, David and Ghezzi, Carlo and Giese, Holger},
PUBLISHER = {Springer},
TYPE = {Lecture Notes in Computer Science},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/2016SEfSAS-final-perpetual-assurances.pdf},
ABSTRACT = {Providing assurances for self-adaptive systems is challenging. A
primary underlying problem is uncertainty that may stem from a variety of different
sources, ranging from incomplete knowledge to sensor noise and uncertain
behavior of humans in the loop. Providing assurances that the self-adaptive
system complies with its requirements calls for an enduring process spanning
the whole lifetime of the system. In this process, humans and the system jointly
derive and integrate new evidence and arguments, which we coined perpetual
assurances for self-adaptive systems. In this paper, we provide a background
framework and the foundation for perpetual assurances for self-adaptive systems.
We elaborate on the concrete challenges of offering perpetual assurances,
requirements for solutions, realization techniques and mechanisms to make solutions
suitable. We also present benchmark criteria to compare solutions. We
then present a concrete exemplar that researchers can use to assess and compare
approaches for perpetual assurances for self-adaptation},
KEYWORDS = {Assurance, Self-adaptation, Self-awareness & Adaptation}
}
@InBook{2017:Schmerl:Decomp,
AUTHOR = {Schmerl, Bradley and Andersson, Jesper and Vogel, Thomas and Cohen, Myra and Rubira, Cecilia M. F. and Brun, Yuriy and Gorla, Alessandra and Zambonelli, Franco and Baresi, Luciano},
TITLE = {Challenges in Composing and Decomposing Assurances for Self-Adaptive Systems},
YEAR = {2017},
BOOKTITLE = {Software Engineering for Self-Adaptive Systems III. Assurances},
NUMBER = {9640},
EDITOR = {de Lemos, Rog\'{e}rio and Garlan, David and Ghezzi, Carlo and Giese, Holger},
PUBLISHER = {Springer},
TYPE = {Lecture Notes in Computer Science},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/decomp-challenges.pdf},
ABSTRACT = {Self-adaptive software systems adapt to changes in the environment,
in the system itself, in their requirements, or in their business objectives. Typically,
these systems attempt to maintain system goals at run time and often
provide assurance that they will meet their goals under dynamic and uncertain
circumstances. While significant research has focused on ways to engineer selfadaptive
capabilities into both new and legacy software systems, less work has
been conducted on how to assure that self-adaptation maintains system goals.
For traditional, especially safety-critical software systems, assurance techniques
decompose assurances into sub-goals and evidence that can be provided by parts
of the system. Existing approaches also exist for composing assurances, in terms
of composing multiple goals and composing assurances in systems of systems.
While some of these techniques may be applied to self-adaptive systems, we argue
that several significant challenges remain in applying them to self-adaptive
systems in this chapter. We discuss how existing assurance techniques can be applied
to composing and decomposing assurances for self-adaptive systems, highlight
the challenges in applying them, summarize existing research to address
some of these challenges, and identify gaps and opportunities to be addressed by
future research.},
KEYWORDS = {Assurance, Self-adaptation}
}
@InProceedings{2017/Lamba/Cluster,
AUTHOR = {Lamba, Hemank and Glazier, Thomas J. and C\'{a}mara, Javier and Schmerl, Bradley and Garlan, David and Pfeffer, J\"{u}rgen},
TITLE = {Model-based cluster analysis for identifying suspicious activity sequences in software},
YEAR = {2017},
MONTH = {24 March},
BOOKTITLE = {Proceedings of the 3rd International Workshop on Security and Privacy Analytics (IWSPA 2017)},
ADDRESS = {Scottsdale, AZ},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/model-based-cluster-submitted.pdf},
ABSTRACT = {Large software systems have to contend with a significant number of
users who interact with different components of the system in various
ways. The sequences of components that are used as part of an interaction
define sets of behaviors that users have with the system. These
can be large in number. Among these users, it is possible that there are
some who exhibit anomalous behaviors – for example, they may have
found back doors into the system and are doing something malicious.
These anomalous behaviors can be hard to distinguish from normal
behavior because of the number of interactions a system may have,
or because traces may deviate only slightly from normal behavior. In
this paper we describe a model-based approach to cluster sequences of
user behaviors within a system and to find suspicious, or anomalous,
sequences. We exploit the underlying software architecture of a system
to define these sequences. We further show that our approach is better at
detecting suspicious activities than other approaches, specifically those
that use unigrams and bigrams for anomaly detection. We show this on
a simulation of a large scale system based on Amazon Web application
style architecture.},
KEYWORDS = {Science of Security}
}
@Article{2017:Camara:Resilience,
AUTHOR = {C\'{a}mara, Javier and de Lemos, Rog\'{e}rio and Laranjeiro, Nuno and Ventura, Rafael and Vieira, Marco},
TITLE = {Robustness-Driven Resilience Evaluation of Self-Adaptive Systems},
YEAR = {2017},
JOURNAL = {IEEE Transactions on Dependable and Secure Computing },
VOLUME = {14},
NUMBER = {1},
PAGES = {50-64},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/RD-RE-SAS-TDSC.pdf},
ABSTRACT = {An increasingly important requirement for certain classes of software-intensive systems is the ability to self-adapt their structure and behavior at run-time when reacting to changes that may occur to the system, its environment, or its goals. A major challenge related to self-adaptive software systems is the ability to provide assurances of their resilience when facing changes. Since in these systems, the components that act as controllers of a target system incorporate highly complex software, there is the need to analyze the impact that controller failures might have on the services delivered by the system. In this paper, we present a novel approach for evaluating the resilience of self-adaptive software systems by applying robustness testing techniques to the controller to uncover failures that can affect system resilience. The approach for evaluating resilience, which is based on probabilistic model checking, quantifies the probability of satisfaction of system properties when the target system is subject to controller failures. The feasibility of the proposed approach is evaluated in the context of an industrial middleware system used to monitor and manage highly populated networks of devices, which was implemented using the Rainbow framework for architecture-based self-adaptation.},
KEYWORDS = {Assurance, Landmark, Rainbow, Resilience, Self-adaptation}
}
@InProceedings{2017:Moreno:CobRAPLA,
AUTHOR = {Moreno, Gabriel A. and Papadopoulos, Alessandro V. and Angelopoulo, Konstantinos and C\'{a}mara, Javier and Schmerl, Bradley},
TITLE = {Comparing Model-Based Predictive Approaches to Self-Adaptation: CobRA and PLA},
YEAR = {2017},
MONTH = {22-23 May},
BOOKTITLE = {Proceedings of the 12th International Symposium on Software Engineering for Adaptive and Self-Managing Systems (SEAMS 2017)},
ADDRESS = {Buenos Aires, Argentina},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/PLAvsCobRA.pdf},
ABSTRACT = {Modern software-intensive systems must often guarantee
certain quality requirements under changing run-time
conditions and high levels of uncertainty. Self-adaptation has
proven to be an effective way to engineer systems that can
address such challenges, but many of these approaches are purely
reactive and adapt only after a failure has taken place. To
overcome some of the limitations of reactive approaches (e.g.,
lagging behind environment changes and favoring short-term
improvements), recent proactive self-adaptation mechanisms apply
ideas from control theory, such as model predictive control
(MPC), to improve adaptation. When selecting which MPC
approach to apply, the improvement that can be obtained with
each approach is scenario-dependent, and so guidance is needed
to better understand how to choose an approach for a given
situation. In this paper, we compare CobRA and PLA, two
approaches that are inspired by MPC. CobRA is a requirements-based
approach that applies control theory, whereas PLA is
architecture-based and applies stochastic analysis. We compare
the two approaches applied to RUBiS, a benchmark system for
web and cloud application performance, discussing the required
expertise needed to use both approaches and comparing their
run-time performance with respect to different metrics.},
KEYWORDS = {Self-adaptation}
}
@InProceedings{2017:Pandey:HybridPlanningFormalization,
AUTHOR = {Pandey, Ashutosh and Ruchkin, Ivan and Schmerl, Bradley and C\'{a}mara, Javier},
TITLE = {Towards a Formal Framework for Hybrid Planning in Self-Adaptation},
YEAR = {2017},
MONTH = {22-23 May},
BOOKTITLE = {Proceedings of the 12th International Symposium on Software Engineering for Adaptive and Self-Managing Systems (SEAMS 2017)},
ADDRESS = {Buenos Aires, Argentina},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/hybrid-planning-seams2017.pdf},
ABSTRACT = {Approaches to self-adaptation face a fundamental
trade-off between quality and timeliness in decision-making. Due
to this trade-off, designers of self-adaptive systems often have
to find a fixed and suboptimal compromise between these two
requirements. Recent work has proposed the hybrid planning approach
that can resolve this trade-off dynamically and potentially
in an optimal way. The promise of hybrid planning is to combine
multiple planners at run time to produce adaptation plans of
the highest quality within given time constraints. However, since
decision-making approaches are complex and diverse, the problem
of combining them is even more difficult, and no frameworks for
hybrid planning. This paper makes an important step in simplifying
the problem of hybrid planning by formalizing it and decomposing
it into four simpler subproblems. These formalizations
will serve as a foundation for creating and evaluating engineering
solutions to the hybrid planning problem.},
KEYWORDS = {Planning, Self-adaptation}
}
@InProceedings{2017:Camara:ArchitectureSynthesis,
AUTHOR = {C\'{a}mara, Javier and Garlan, David and Schmerl, Bradley},
TITLE = {Synthesis and Quantitative Verification of Tradeoff Spaces for Families of Software Systems},
YEAR = {2017},
MONTH = {11-15 September},
BOOKTITLE = {Proceedings of the 11th European Conference on Software Architecture},
ADDRESS = {Cantebury, UK},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/cgsecsa17.pdf},
ABSTRACT = {Designing software in a way that provides guarantees about run-time behavior while achieving an acceptable balance between multiple extra-functional properties is still an open problem. Tools and techniques to inform engineers about poorly-understood design spaces are needed. To tackle this problem, we propose an approach that combines synthesis of spaces of system design alternatives from formal specifications of architectural styles with quantitative verification applied to every point of the generated solution space. The main contribution of this paper is a formal framework for specification-driven synthesis and analysis of design spaces that provides formal guarantees about the correctness of system behaviors and satisfies quantitative properties (e.g., defined over system qualities). We illustrate our proposal by analyzing the configuration space of a Tele Assistance System (TAS) using a prototype implementation of our approach.},
NOTE = {Winner of the ECSA Best Paper Award},
KEYWORDS = {Architectural Style, Formal Methods, Model Checking, Software Architecture}
}
@InProceedings{Dwivedi:EUC:2017,
AUTHOR = {Dwivedi, Vishal and Herbsleb, James and Garlan, David},
TITLE = {What Ails End-User Composition: A Cross-Domain Qualitative Study},
YEAR = {2017},
BOOKTITLE = {End-User Development. IS-EUD 2017},
VOLUME = {10303},
SERIES = {Lecture Notes in Computer Science},
PUBLISHER = {Springer},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/Dwivedi2017_Chapter_WhatAilsEnd-UserCompositionACr.pdf},
ABSTRACT = {Across many domains, end-users need to compose computational elements into novel configurations to perform their day-to-day tasks. End-user composition is a common programming activity performed by such end-users to accomplish this composition task. While there have been many studies on end-user programming, we still need a better understanding of activities involved in end-user composition and environments to support them. In this paper we report a qualitative study of four popular composition environments belonging to diverse application domains, including: Taverna workflow environment for life sciences, Loni Pipeline for brain imaging, SimMan3G for medical simulations and Kepler for scientific simulations. We interview end-users of these environments to explore their experiences while performing common compositions tasks. We use “Content Analysis” technique to analyze these interviews to explore what are the barriers to end-user composition in these domains. Furthermore, our findings show that there are some unique differences in the requirements of naive end-users vs. expert programmers. We believe that not only are these findings useful to improve the quality of end-user composition environments, but they can also help towards development of better end-user composition frameworks.},
KEYWORDS = {End-user Architecture}
}
@InProceedings{2017:Uncertainty:Camara,
AUTHOR = {C\'{a}mara, Javier and Peng, Wenxin and Garlan, David and Schmerl, Bradley},
TITLE = {Reasoning about Sensing Uncertainty in Decision-Making for Self-Adaptation},
YEAR = {2017},
MONTH = {5 September},
BOOKTITLE = {Proceedings of the 15th International Workshop on Foundations of Coordination Languages and Self-Adaptive Systems (FOCLASA 2017)},
VOLUME = {10729},
SERIES = {Lecture Notes in Computer Science},
PUBLISHER = {Springer},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/CPGS-CR-foclasa17.pdf},
ABSTRACT = {Self-Adaptive systems are expected to adapt to unanticipated run-time
events using imperfect information about their environment. This entails handling
the effects of uncertainties in decision-making, which are not always considered
as a first-class concern. This paper contributes a formal analysis technique that
explicitly considers uncertainty in sensing when reasoning about the best way to
adapt. We illustrate our approach on a Denial of Service (DoS) attack scenario
and present some preliminary results that show the benefits of uncertainty-aware
decision-making with respect to using an uncertainty-ignorant approach.},
KEYWORDS = {Self-adaptation, uncertainty}
}
@InProceedings{2017:Pandey:IBLHP,
AUTHOR = {Pandey, Ashutosh and Schmerl, Bradley and Garlan, David},
TITLE = {Instance-based Learning for Hybrid Planning},
YEAR = {2017},
MONTH = {18-22 September},
BOOKTITLE = {Proceedings of the 3rd International Workshop on Data-driven Self-regulating Systems (DSS 2017)},
ADDRESS = {Tucson, AZ, USA},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/IBL-DSS2017.pdf},
ABSTRACT = {Due to the fundamental trade-off between quality and timeliness of planning, designers of self-adaptive systems often have to compromise between an approach that is quick to find an adaptation plan and an approach that is slow but finds a quality adaptation plan. To deal with this trade-off, in our previous work, we proposed a hybrid planning approach that combines a deliberative and a reactive planning approach to find a balance between quality and timeliness of planning.
However, when reactive and deliberative planning is combined to
instantiate a hybrid planner, the key challenge is to decide which
approach(es) should be invoked to solve a planning problem. To
this end, this paper proposes to use a data-driven instance-based
learning to find an appropriate combination of the two planning
approaches when solving a planning problem. As an initial proof
of concept, the paper presents results of a small experiment that
demonstrates the potential of the proposed approach to identify a
combination of the two planning approaches to solve a planning
problem.},
KEYWORDS = {Planning, Self-adaptation}
}
@InProceedings{2017:Camara:RTSS,
AUTHOR = {Seetanandi, Gautham Nayak and C\'{a}mara, Javier and Almeida, Luis and Årz\'{e}n, Karl-Erik and Maggio, Martina},
TITLE = {Event-Driven Bandwidth Allocation with Formal Guarantees for Camera Networks},
YEAR = {2017},
MONTH = {5-8 December},
BOOKTITLE = {Proceedings of the 2017 IEEE Real-Time Systems Symposium},
ADDRESS = {Paris, France},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/RTSS17_Gautham.pdf},
ABSTRACT = {Modern computing systems are often formed by
multiple components that interact with each other through the
use of shared resources (e.g., CPU, network bandwidth, storage).
In this paper, we consider a representative scenario of one such
system in the context of an Internet of Things application. The
system consists of a network of self-adaptive cameras that share
a communication channel, transmitting streams of frames to a
central node. The cameras can modify a quality parameter to
adapt the amount of information encoded and to affect their
bandwidth requirements and usage. A critical design choice for
such a system is scheduling channel access, i.e., how to determine
the amount of channel capacity that should be used by each of the
cameras at any point in time. Two main issues have to be considered
for the choice of a bandwidth allocation scheme: (i) camera
adaptation and network access scheduling may interfere with
one another, (ii) bandwidth distribution should be triggered only
when necessary, to limit additional overhead. This paper proposes
the first formally verified event-triggered adaptation scheme for
bandwidth allocation, designed to minimize additional overhead
in the network. Desired properties of the system are verified using
model checking. The paper also describes experimental results
obtained with an implementation of the scheme.
},
KEYWORDS = {Formal Methods, Self-adaptation}
}
@TechReport{Wagner:APT:2017,
AUTHOR = {Wagner, Ryan and Fredrikson, Matthew and Garlan, David},
TITLE = {An Advanced Persistent Threat Exemplar},
YEAR = {2017},
MONTH = {July},
NUMBER = {CMU-ISR-17-100},
INSTITUTION = {Institute of Software Research, Carnegie Mellon University},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/CMU-ISR-17-100.pdf},
ABSTRACT = {Security researchers do not have sufficient example systems for conducting research on advanced persistent threats,
and companies and agencies that experience attacks in the wild are reluctant to release detailed information that can be
examined. In this paper, we describe an Advanced Persistent Threat Exemplar that is intended to provide a real-world
attack scenario with sufficient complexity for reasoning about defensive system adaptation, while not containing so
much information as to be too complex. It draws from actual published attacks and experiences as a security engineer
by the authors.},
NOTE = {http://reports-archive.adm.cs.cmu.edu/anon/isr2017/abstracts/17-100.html},
KEYWORDS = {Science of Security}
}
@TechReport{Camara/UncertaintyTR/2017,
AUTHOR = {C\'{a}mara, Javier and Garlan, David and Kang, Won Gu and Peng, Wenxin and Schmerl, Bradley},
TITLE = {Uncertainty in Self-Adaptive Systems},
YEAR = {2017},
MONTH = {July},
NUMBER = {CMU-ISR-17-110},
INSTITUTION = {Institute for Software Research, Carnegie Mellon University},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/CMU-ISR-17-110.pdf},
ABSTRACT = {Self-Adaptive systems are expected to adapt to unanticipated run-time events using imperfect information
about their environment. This entails handling the effects of uncertainties in decision-making, which are
not always considered as a first-class concern. This technical report summarizes a set of existing techniques
and insights into addressing uncertainty in self-adaptive systems and outlines a future research agenda on
uncertainty management in self-adaptive systems. The material in this report is strongly informed by our
own research in the area, and is therefore not necessarily representative of other works.},
NOTE = {http://reports-archive.adm.cs.cmu.edu/anon/isr2017/abstracts/17-110.html},
KEYWORDS = {Human-in-the-loop, Self-adaptation, uncertainty}
}
@InProceedings{Pandey:FAS2017:Hybrid,
AUTHOR = {Pandey, Ashutosh},
TITLE = {Hybrid Planning in Self-Adaptive Systems},
YEAR = {2017},
MONTH = {18-22 September},
BOOKTITLE = {FAS* Foundations and Applications of Self* Systems (FAS* 2017) Doctoral Symposium},
ADDRESS = {Tucson, AZ, USA},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/HybridPlanningDoctoralSymposium.pdf},
ABSTRACT = {Self-adaptive software systems make decisions at
run time that seek to change their behavior in response to
faults, changing environments and attacks. Therefore, having an
appropriate planning approach to find an adaptation plan is
critical to successful self-adaptation. For many realistic systems,
ideally one would like to have a planning approach that is both
quick and finds an optimal adaptation plan. However, due to the
fundamental trade-off between quality and timeliness of planning,
often designers have to compromise between an approach that is
quick to find a plan and an approach that is slow but finds an
optimal plan. To deal with this trade-off, this work proposes a
hybrid planning approach that combines more than one planning
approach to bring their benefits together.},
KEYWORDS = {Planning, Self-adaptation}
}
@TechReport{2017:Shonan:CASAS,
AUTHOR = {Garlan, David and D’Ippolito, Nicolas and Tei, Kenji},
TITLE = {The 2nd Controlled Adaptation of Self-Adaptive Systems Workshop (CASaS2017)},
YEAR = {2017},
MONTH = {24-28 July},
NUMBER = {NII-2017-10},
INSTITUTION = {National Institute of Informatics},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/No.2017-10-1.pdf},
ABSTRACT = {Self-adaptive systems are required to adapt its behaviour in the face of
changes in their environment and goals. Such a requirement is typically achieved
by developing a system as a closed-loop system following a Monitor-AnalysePlan-Act
(MAPE) scheme. MAPE loops are a mechanism that allows systems
to monitor their state and produce changes aiming to guarantee that the goals
are met. In practice it is often the case that to achieve their desired goals,
self-adaptive systems must combine a number of MAPE loops with different
responsibilities and at different abstraction levels.
Higher-level goals require decision-level mechanisms to produce a plan in
terms of the high-level system actions to be performed. Various mechanisms
have been proposed and developed for automatically generating decision-level
plans (e.g., event-based controller synthesis), providing guarantees about the
satisfaction of hard goals (e.g., providing a certain level of service), and supporting
improvements in soft goals (e.g., doing this in an efficient or cost-effective
manner). These decisions are often made at a time scale of seconds to minutes.
Lower-level goals, on the other hand, typically require control mechanisms
that sense the state of the system and environment and react at a fine time
granularity of milliseconds. Solutions to this problem are typically based on
classical control theory techniques such as discrete-time control.
A successful adaptive system, then, must find ways to integrate these multiple
levels of control, leading to an important question of how best to do that, and
what concepts. Additionally, concepts from classical control theory (typically
applied at low levels of control) can also be useful in understanding higher-level
control.
Recently the software engineering community has begun to study the application
of control theory and the formal guarantees it provides in the context of
software engineering. For example, the 2014 Dagstuhl Seminar “Control Theory
meets Software Engineering”, is an example of such recent interest. That
seminar discussed a variety of possible applications of control theory to software
engineering problems.
Also, and perhaps more relevant, is the first CASaS Shonan seminar held
in 2016. The seminar focused on formal guarantees that can be provided in
self-adaptive systems via the use of control theory (e.g., event-based controller
synthesis and discrete-time control). The seminar was a success in many respects.
It had over 30 attendees from more than 10 countries. The seminar was
an active gathering of outstanding researchers in both control theory and software
engineering, and provided a forum in which discussions on the connections
between control theory and software engineering for self-adaptive systems could
be held. Most of the attendees expressed their intention to continue studying
and discussing the relation between control theory and software engineering,
which was highlighted as key to address with the requirements of self-adaptive
systems.
As in the first edition we expected to involve a group of active researchers in
key areas such as Self-Adaptive Systems, Control theory, Game theory, Software
Engineering, and Requirements Engineering, creating an ideal environment to
discuss current and future applications and possibilities of control theory as a
mechanism to provide formal guarantees for self-adaptive systems (e.g., convergence,
safety, stability). Encouraged by the success of the first CASaS, we
expected to have a number of participants from a wide variety of research areas
to further explore the benefits of incorporating the application capabilities and formal framework provided by control theory to self-adaptive systems.
Among the research questions that we expected to discuss are: How to
coordinate multiple levels of adaptive control? What kinds of properties from
classical control theory can be applied at higher levels to guarantee certain
properties? To what extent does the domain and contest of use influence the
design of a control regime for adaptation? In what ways can AI techniques of
planning and machine learning be applied to adaptive systems? How can one
deal with uncertainty in a systematic fashion? How can control theory inform
our decisions about ways to incorporate humans into self-adaptive systems?
We envisaged the 5-day meeting to be organised in two main parts. During
the first day, participants presented their background and what they are interested
in, and there were three lectures about continuous control, discrete-event
control, and hybrid approach were given. Then, for the remaining four days, we
identified and discussed the most relevant topics selected by the participants in
working groups. In the end, we decided to discuss about two topics: “cooperation
and coordination” and “properties”. The first topic is concerned with ways
to incorporate components with ‘’classical” control implementation into larger
systems, which will typically be a mixture of discrete and continuous control,
and may need to adapt at an architectural level at run time in response to environmental
conditions. The second topic is concerned with ways to formalize
properties that are used in control theory in terms that would be useful for
systems that reason in terms of discrete control. We divided into two groups,
discussed the topics, and created draft reports about the discussion. These reports
were further edited and improved, and now constitute the main body of
this report.},
KEYWORDS = {Control Theory, Self-adaptation}
}
@PhdThesis{Moreno:Thesis:2017,
AUTHOR = {Moreno, Gabriel A.},
TITLE = {Adaptation Timing in Self-Adaptive Systems},
YEAR = {2017},
MONTH = {April},
SCHOOL = {Institute for Software Research, School of Computer Science,Carnegie Mellon University},
URL = {http://reports-archive.adm.cs.cmu.edu/anon/isr2017/abstracts/17-103.html},
ABSTRACT = {Software-intensive systems are increasingly expected to operate under changing
and uncertain conditions, including not only varying user needs and workloads, but
also fluctuating resource capacity. Self-adaptation is an approach that aims to address
this problem, giving systems the ability to change their behavior and structure
to adapt to changes in themselves and their operating environment without human
intervention.
Self-adaptive systems tend to be reactive and myopic, adapting in response to
changes without anticipating what the subsequent adaptation needs will be. Adapting
reactively can result in inefficiencies due to the system performing a suboptimal
sequence of adaptations. Furthermore, some adaptation tactics—atomic adaptation
actions that leave the system in a consistent state—have latency and take some time
to produce their effect. In that case, reactive adaptation causes the system to lag
behind environment changes. What is worse, a long running adaptation action may
prevent the system from performing other adaptations until it completes, further limiting
its ability to effectively deal with the environment changes.
To address these limitations and improve the effectiveness of self-adaptation, we
present proactive latency-aware adaptation, an approach that considers the timing
of adaptation (i) leveraging predictions of the near future state of the environment
to adapt proactively; (ii) considering the latency of adaptation tactics when deciding
how to adapt; and (iii) executing tactics concurrently. We have developed three different
solution approaches embodying these principles. One is based on probabilistic
model checking, making it inherently able to deal with the stochastic behavior
of the environment, and guaranteeing optimal adaptation choices over a finite decision
horizon. The second approach uses stochastic dynamic programming to make
adaptation decisions, and thanks to performing part of the computations required
to make those decisions off-line, it achieves a speedup of an order of magnitude
over the first solution approach without compromising optimality. A third solution
approach makes adaptation decisions based on repertoires of adaptation strategies—
predefined compositions of adaptation tactics. This approach is more scalable than
the other two because the solution space is smaller, allowing an adaptive system to
reap some of the benefits of proactive latency-aware adaptation even if the number
of ways in which it could adapt is too large for the other approaches to consider all
these possibilities.
We evaluate the approach using two different classes of systems with different
adaptation goals, and different repertoires of adaptation strategies. One of them is a
web system, with the adaptation goal of utility maximization. The other is a cyberphysical
system operating in a hostile environment. In that system, self-adaptation
must not only maximize the reward gained, but also keep the probability of surviving
a mission above a threshold. In both cases, our results show that proactive
latency-aware adaptation improves the effectiveness of self-adaptation with respect
to reactive time-agnostic adaptation.},
NOTE = {Technical Report CMU-ISR-17-103},
KEYWORDS = {Latency-aware, Rainbow, Self-adaptation}
}