@article{7567, author = {Miles Brundage and Shahar Avin and Jasmine Wang and Haydn Belfield and Gretchen Krueger and Gillian Hadfield and Heidy Khlaaf and Jingying Yang and Helen Toner and Ruth Fong and Tegan Maharaj and Pang Koh and Sara Hooker and Jade Leung and Andrew Trask and Emma Bluemke and Jonathan Lebensold and Cullen O'Keefe and Mark Koren and Théo Ryffel and JB Rubinovitz and Tamay Besiroglu and Federica Carugati and Jack Clark and Peter Eckersley and Sarah de Haas and Maritza Johnson and Ben Laurie and Alex Ingerman and Igor Krawczuk and Amanda Askell and Rosario Cammarota and Andrew Lohn and David Krueger and Charlotte Stix and Peter Henderson and Logan Graham and Carina Prunkl and Bianca Martin and Elizabeth Seger and Noa Zilberman and Seán HÉigeartaigh and Frens Kroeger and Girish Sastry and Rebecca Kagan and Adrian Weller and Brian Tse and Elizabeth Barnes and Allan Dafoe and Paul Scharre and Ariel Herbert-Voss and Martijn Rasser and Shagun Sodhani and Carrick Flynn and Thomas Gilbert and Lisa Dyer and Saif Khan and Yoshua Bengio and Markus Anderljung}, title = {Toward Trustworthy AI Development: Mechanisms for Supporting Verifiable Claims}, abstract = {With the recent wave of progress in artificial intelligence (AI) has come a growing awareness of the large-scale impacts of AI systems, and recognition that existing regulations and norms in industry and academia are insufficient to ensure responsible AI development. In order for AI developers to earn trust from system users, customers, civil society, governments, and other stakeholders that they are building AI responsibly, they will need to make verifiable claims to which they can be held accountable. Those outside of a given organization also need effective means of scrutinizing such claims. This report suggests various steps that different stakeholders can take to improve the verifiability of claims made about AI systems and their associated development processes, with a focus on providing evidence about the safety, security, fairness, and privacy protection of AI systems. We analyze ten mechanisms for this purpose–spanning institutions, software, and hardware–and make recommendations aimed at implementing, exploring, or improving those mechanisms.}, year = {2020}, pages = {1–9}, url = {http://arxiv.org/abs/2004.07213}, language = {eng}, }