663 |
@InProceedings{Casimiro:ACSOS:2022,
AUTHOR = {Casimiro, Maria and Romano, Paolo and Garlan, David and Rodrigues, Luis},
TITLE = {Towards a Framework for Adapting Machine Learning Components},
YEAR = {2022},
BOOKTITLE = {2022 IEEE International Conference on Autonomic Computing and Self-Organizing Systems (ACSOS)},
PDF = {http://acme.able.cs.cmu.edu/pubs/uploads/pdf/ACSOS2022.pdf},
ABSTRACT = {Machine Learning (ML) models are now commonly
used as components in systems. As any other component, ML
components can produce erroneous outputs that may penalize
system utility. In this context, self-adaptive systems emerge as
a natural approach to cope with ML mispredictions, through
the execution of adaptation tactics such as model retraining. To
synthesize an adaptation strategy, the self-adaptation manager
needs to reason about the cost-benefit tradeoffs of the applicable
tactics, which is a non-trivial task for tactics such as model
retraining, whose benefits are both context- and data-dependent.
To address this challenge, this paper proposes a probabilistic
modeling framework that supports automated reasoning about
the cost/benefit tradeoffs associated with improving ML components of ML-based systems. The key idea of the proposed approach is to decouple the problems of (i) estimating the expected
performance improvement after retrain and (ii) estimating the
impact of ML improved predictions on overall system utility.
We demonstrate the application of the proposed framework by
using it to self-adapt a state-of-the-art ML-based fraud-detection
system, which we evaluate using a publicly-available, real fraud
detection dataset. We show that by predicting system utility stemming from retraining a ML component, the probabilistic model
checker can generate adaptation strategies that are significantly
closer to the optimal, as compared against baselines such as
periodic retraining, or reactive retraining.},
NOTE = {Presentation Video},
KEYWORDS = {Machine Learning, Self-adaptation} }
|
|