@inproceedings{229c2108e850481e9f548e40d3726880,
title = "A Safety Framework for Critical Systems Utilising Deep Neural Networks",
abstract = "Increasingly sophisticated mathematical modelling processes from Machine Learning are being used to analyse complex data. However, the performance and explainability of these models within practical critical systems requires a rigorous and continuous verification of their safe utilisation. Working towards addressing this challenge, this paper presents a principled novel safety argument framework for critical systems that utilise deep neural networks. The approach allows various forms of predictions, e.g., future reliability of passing some demands, or confidence on a required reliability level. It is supported by a Bayesian analysis using operational data and the recent verification and validation techniques for deep learning. The prediction is conservative - it starts with partial prior knowledge obtained from lifecycle activities and then determines the worst-case prediction. Open challenges are also identified.",
keywords = "Assurance arguments, Bayesian inference, Deep learning verification, Quantitative claims, Reliability claims, Safe AI, Safety cases",
author = "Xingyu Zhao and Alec Banks and James Sharp and Valentin Robu and David Flynn and Michael Fisher and Xiaowei Huang",
year = "2020",
doi = "10.1007/978-3-030-54549-9_16",
language = "English",
isbn = "9783030545482",
series = "Lecture Notes in Computer Science",
publisher = "Springer",
pages = "244--259",
editor = "Ant{\'o}nio Casimiro and Pedro Ferreira and Frank Ortmeier and Friedemann Bitsch",
booktitle = "Computer Safety, Reliability, and Security. SAFECOMP 2020",
address = "United States",
note = "39th International Conference on Computer Safety, Reliability and Security 2020, SAFECOMP 2020 ; Conference date: 16-09-2020 Through 18-09-2020",
}