@phdthesis{ahmadi_mehri_towards_2019, address = {Karlskrona}, type = {Licentiate {Thesis}}, title = {Towards {Secure} {Collaborative} {AI} {Service} {Chains}}, url = {http://urn.kb.se/resolve?urn=urn%3Anbn%3Ase%3Abth-18531}, abstract = {At present, Artificial Intelligence (AI) systems have been adopted in many different domains such as healthcare, robotics, automotive, telecommunication systems, security, and finance for integrating intelligence in their services and applications. The intelligent personal assistant such as Siri and Alexa are examples of AI systems making an impact on our daily lives. Since many AI systems are data-driven systems, they require large volumes of data for training and validation, advanced algorithms, computing power and storage in their development process. Collaboration in the AI development process (AI engineering process) will reduce cost and time for the AI applications in the market. However, collaboration introduces the concern of privacy and piracy of intellectual properties, which can be caused by the actors who collaborate in the engineering process. This work investigates the non-functional requirements, such as privacy and security, for enabling collaboration in AI service chains. It proposes an architectural design approach for collaborative AI engineering and explores the concept of the pipeline (service chain) for chaining AI functions. In order to enable controlled collaboration between AI artefacts in a pipeline, this work makes use of virtualisation technology to define and implement Virtual Premises (VPs), which act as protection wrappers for AI pipelines. A VP is a virtual policy enforcement point for a pipeline and requires access permission and authenticity for each element in a pipeline before the pipeline can be used. Furthermore, the proposed architecture is evaluated in use-case approach that enables quick detection of design flaw during the initial stage of implementation. To evaluate the security level and compliance with security requirements, threat modeling was used to identify potential threats and vulnerabilities of the system and analyses their possible effects. The output of threat modeling was used to define countermeasure to threats related to unauthorised access and execution of AI artefacts.}, language = {eng}, urldate = {2020-01-28}, school = {Blekinge Institute of Technology, Faculty of Computing, Department of Computer Science.}, author = {Ahmadi Mehri, Vida}, year = {2019}, }