% % GENERATED FROM http://acme.able.cs.cmu.edu % by : anonymous % IP : ec2-18-220-137-35.us-east-2.compute.amazonaws.com % at : Sat, 19 Apr 2025 05:01:50 -0400 GMT % % Selection : Author: Shihong_Huang % @InProceedings{2020:Control:SEAMS, AUTHOR = {C\'{a}mara, Javier and Papadopoulos, Alessandro V. and Vogel, Thomas and Weyns, Danny and Garlan, David and Huang, Shihong and Tei, Kenji}, TITLE = {Towards Bridging the Gap between Control and Self-Adaptive System Properties}, YEAR = {2020}, MONTH = {29 June - 3 July}, BOOKTITLE = {Proceedings of the 15th International Symposium on Software Engineering for Adaptive and Self-managing Systems (SEAMS)}, PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/2004.118462020_Control_SEAMS.pdf}, ABSTRACT = {Two of the main paradigms used to build adaptive software employ different types of properties to capture relevant aspects of the system’s run-time behavior. On the one hand, control systems consider properties that concern static aspects like stability, as well as dynamic properties that capture the transient evolution of variables such as settling time. On the other hand, self-adaptive systems consider mostly non-functional properties that capture concerns such as performance, reliability, and cost. In general, it is not easy to reconcile these two types of properties or identify under which conditions they constitute a good fit to provide run-time guarantees. There is a need of identifying the key properties in the areas of control and self-adaptation, as well as of characterizing and mapping them to better understand how they relate and possibly complement each other. In this paper, we take a first step to tackle this problem by: (1) identifying a set of key properties in control theory, (2) illustrating the formalization of some of these properties employing temporal logic languages commonly used to engineer self-adaptive software systems, and (3) illustrating how to map key properties that characterize self-adaptive software systems into control properties, leveraging their formalization in temporal logics. We illustrate the different steps of the mapping on an exemplar case in the cloud computing domain and conclude with identifying open challenges in the area.}, NOTE = {Talk}, KEYWORDS = {Control Theory, Self-adaptation} } @InProceedings{CASA21, AUTHOR = {Alharbi, Mohammed and Huang, Shihong and Garlan, David}, TITLE = {A Probabilistic Model for Personality Trait Focused Explainability}, YEAR = {2021}, MONTH = {13-17 September}, BOOKTITLE = {Proceedings of the 4th international Workshop on Context-aware, Autonomous and Smart Architecture (CASA 2021), co-located with the 15th European Conference on Software Architecture}, ADDRESS = {Virtual (Originally V\"axjö Sweden)}, PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/CASA-paper5.pdf}, ABSTRACT = {Explainability refers to the degree to which a software system’s actions or solutions can be understood by humans. Giving humans the right amount of explanation at the right time is an important factor in maximizing the effective collaboration between an adaptive system and humans during interaction. However, explanations come with costs, such as the required time of explanation and humans’ response time. Hence it is not always clear whether explanations will improve overall system utility and, if so, how the system should effectively provide explanation to humans, particularly given that different humans may benefit from different amounts and frequency of explanation. To provide a partial basis for making such decisions, this paper defines a formal framework that incorporates human personality traits as one of the important elements in guiding automated decision- making about the proper amount of explanation that should be given to the human to improve the overall system utility. Specifically, we use probabilistic model analysis to determine how to utilize explanations in an effective way. To illustrate our approach, Grid – a virtual human and system interaction game -- is developed to represent scenarios for human-systems collaboration and to demonstrate how a human’s personality traits can be used as a factor to consider for systems in providing appropriate explanations.}, KEYWORDS = {Explainable Software} }