@inproceedings{8e1082a9277a455a9aeae58fd6b7b726,
title = "Relative Robustness of Quantized Neural Networks Against Adversarial Attacks",
abstract = "Neural networks are increasingly being moved to edge computing devices and smart sensors, to reduce latency and save bandwidth. Neural network compression such as quantization is necessary to fit trained neural networks into these resource constrained devices. At the same time, their use in safety-critical applications raises the need to verify properties of neural networks. Adversarial perturbations have potential to be used as an attack mechanism on neural networks, leading to {"}obviously wrong{"} misclassification. SMT solvers have been proposed to formally prove robustness guarantees against such adversarial perturbations. We investigate how well these robustness guarantees are preserved when the precision of a neural network is quantized. We also evaluate how effectively adversarial attacks transfer to quantized neural networks. Our results show that quantized neural networks are generally robust relative to their full precision counterpart (98.6%-99.7%), and the transfer of adversarial attacks decreases to as low as 52.05% when the subtlety of perturbation increases. These results show that quantization introduces resilience against transfer of adversarial attacks whilst causing negligible loss of robustness.",
keywords = "adversarial attack, neural network, verification",
author = "Kirsty Duncan and Ekaterina Komendantskaya and Robert Stewart and Michael Lones",
year = "2020",
month = sep,
day = "28",
doi = "10.1109/IJCNN48605.2020.9207596",
language = "English",
series = "International Joint Conference on Neural Networks",
publisher = "IEEE",
booktitle = "2020 International Joint Conference on Neural Networks (IJCNN)",
address = "United States",
}