<?xml version="1.0"?>
<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en">
	<id>https://c4d.lias-lab.fr/api.php?action=feedcontributions&amp;feedformat=atom&amp;user=Hib</id>
	<title>COMP4DRONES - User contributions [en]</title>
	<link rel="self" type="application/atom+xml" href="https://c4d.lias-lab.fr/api.php?action=feedcontributions&amp;feedformat=atom&amp;user=Hib"/>
	<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php/Special:Contributions/Hib"/>
	<updated>2026-04-07T00:46:10Z</updated>
	<subtitle>User contributions</subtitle>
	<generator>MediaWiki 1.37.1</generator>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=File:Wp4-39-06.png&amp;diff=336</id>
		<title>File:Wp4-39-06.png</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=File:Wp4-39-06.png&amp;diff=336"/>
		<updated>2022-05-11T09:23:35Z</updated>

		<summary type="html">&lt;p&gt;Hib: Hib uploaded a new version of File:Wp4-39-06.png&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=File:Wp4-39_05.png&amp;diff=335</id>
		<title>File:Wp4-39 05.png</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=File:Wp4-39_05.png&amp;diff=335"/>
		<updated>2022-05-11T09:22:39Z</updated>

		<summary type="html">&lt;p&gt;Hib: Hib uploaded a new version of File:Wp4-39 05.png&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=File:Wp4-39_03.png&amp;diff=334</id>
		<title>File:Wp4-39 03.png</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=File:Wp4-39_03.png&amp;diff=334"/>
		<updated>2022-05-11T09:20:07Z</updated>

		<summary type="html">&lt;p&gt;Hib: Hib uploaded a new version of File:Wp4-39 03.png&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=File:Wp4-39_03.png&amp;diff=333</id>
		<title>File:Wp4-39 03.png</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=File:Wp4-39_03.png&amp;diff=333"/>
		<updated>2022-05-11T09:18:34Z</updated>

		<summary type="html">&lt;p&gt;Hib: Hib uploaded a new version of File:Wp4-39 03.png&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=File:Wp4-39_02.png&amp;diff=332</id>
		<title>File:Wp4-39 02.png</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=File:Wp4-39_02.png&amp;diff=332"/>
		<updated>2022-05-11T09:16:57Z</updated>

		<summary type="html">&lt;p&gt;Hib: Hib uploaded a new version of File:Wp4-39 02.png&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=File:Wp4-39_02.png&amp;diff=331</id>
		<title>File:Wp4-39 02.png</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=File:Wp4-39_02.png&amp;diff=331"/>
		<updated>2022-05-11T09:16:04Z</updated>

		<summary type="html">&lt;p&gt;Hib: Hib uploaded a new version of File:Wp4-39 02.png&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=File:Wp4-39_02.png&amp;diff=330</id>
		<title>File:Wp4-39 02.png</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=File:Wp4-39_02.png&amp;diff=330"/>
		<updated>2022-05-11T09:14:31Z</updated>

		<summary type="html">&lt;p&gt;Hib: Hib uploaded a new version of File:Wp4-39 02.png&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=WP4-39&amp;diff=329</id>
		<title>WP4-39</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=WP4-39&amp;diff=329"/>
		<updated>2022-05-10T12:16:29Z</updated>

		<summary type="html">&lt;p&gt;Hib: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;=Simulated data aggregator supporting intelligent decision in computer vision components=&lt;br /&gt;
&lt;br /&gt;
{|class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
|  ID|| WP4-39&lt;br /&gt;
|-&lt;br /&gt;
|   Contributor	|| HI-IBERIA&lt;br /&gt;
|-&lt;br /&gt;
|   Levels	|| Functional&lt;br /&gt;
|-&lt;br /&gt;
|   Require	|| 	AirSim built on Unreal Engine&lt;br /&gt;
|-&lt;br /&gt;
|   Provide		|| Simulator-based data aggregator built over AirSim generating high amounts of training data (RGB images) to support any computer vision component for intelligent decision in drones.&lt;br /&gt;
|-&lt;br /&gt;
|   Input	|| 	Simulation scenario parameters: &lt;br /&gt;
*Digital terrain model imported in Unreal plugin (optional, but recommendable) &lt;br /&gt;
*Objects to be detected in CAD format (Traffic signs in CAD format)&lt;br /&gt;
Drone configuration parameters in JSON format: &lt;br /&gt;
*Vision angle&lt;br /&gt;
*Inclination &lt;br /&gt;
*Viewing depth&lt;br /&gt;
*Flight height &lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|   Output		|| Simulated RGB images (or point-cloud as a flat array of floats)&lt;br /&gt;
|-&lt;br /&gt;
|   C4D building block		|| (Simulated) Data Acquisition&lt;br /&gt;
|-&lt;br /&gt;
|   TRL		|| 6&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
==Detailed Description==&lt;br /&gt;
&lt;br /&gt;
Computer vision is a significant driver in the new era of drone applications, but developing and testing computer vision components for drones in real world is an expensive and time-consuming process. This issue is further exacerbated by the fact that drones are often unsafe and expensive to operate during the training phase. Additionally, one of the key challenges with these techniques is the high sample complexity - the amount of training data needed to learn useful behaviors is often prohibitively high, but in fact, it is needed to collect a large amount of annotated training data in a variety of conditions and environments in order to utilize recent advances in machine intelligence and deep learning. This unfortunately involves not only developing the proposed ML algorithms but also requires vast amounts of time dedicated for the development of an infrastructure able to generate training models in a variety of environments. Consequently, the design, deployment, and evaluation of  computer vision components for drones becomes a complex and costly endeavor for researchers and engineers, since it requires the exploration of multiple conditions and parameters through repeatable and controllable experiments.&lt;br /&gt;
&lt;br /&gt;
In the context of C4D, the proposed component is a simulator-based data aggregator built over AirSim which intends to generate high amounts of training data with the objective of supporting any computer vision component for intelligent decision in drones. This component will allow speeding up the constructive process of a civil infrastructure while saving costs, by reducing the need to perform multiple data collection campaigns to get real training data. Additionally, this component could assess the need of different cameras/sensors (RGB, LIDAR, ...) integrated in the real drone, starting from the simulated clouds of points generated by the component. This would allow detecting which kind of camera could provide more suitable results for the analysis before launching the real drone flight.&lt;br /&gt;
&lt;br /&gt;
In the context of UC2 – Construction, the specific role of the simulated data aggregator is to generate a vast amount of annotated training data which allows us to train the convolutional neural networks which will be implemented in the Computer Vision Component for Drones, which is being developed within the task 3.3. Particularly, this simulated data aggregator will generate a vast amount of RGB images (or point-cloud as a flat array of floats along with the timestamp of the capture and position) which intend to represent the real scenario from scenario-tailored input data, that is, the specific drone configuration parameters, the digital terrain model and the 3D models (CAD files) of the objects to detect, being traffic signs in the particular case of the UC2 - Demo 1. Such RGB images will serve to train the convolutional neural networks, and hence, the DL algorithms of the Computer Vision Component for Drones in an early way.&lt;br /&gt;
&lt;br /&gt;
[[File:wp4-39_01.png|frame|center|High-level architecture of the simulator-based data aggregator|150 px]]&lt;br /&gt;
&lt;br /&gt;
Considering the C4D Reference Architecture Context, the role of the simulator-based data aggregator is clearly referred to the '''''Data acquisition''''' building block in the ''Payload management'' block. Particularly, this component contributes to the reference architecture by simulating the behaviour of the '''''Data acquisition''''' building block, since it allows generating simulated payload data which serves as training payload data for  the ''Data Analytics'' building block in the ''Data Management'' block.&lt;br /&gt;
&lt;br /&gt;
Additionally, the simulator-based data aggregator contributes to the ''UC2-D1-FUN-09-Specialized software for georeferenced point-cloud creation'' as well as to the following requirements of the UC2-DEM1:&lt;br /&gt;
*	UC2-DEM1-FUN-01 - The drone system shall capture a high-density point cloud.&lt;br /&gt;
*	UC2-DEM1-FUN-02 - The drone system shall capture RGB data of the surface.&lt;br /&gt;
*	UC2-DEM1-FUN-03 - The point cloud shall be in an open format such as LAS.&lt;br /&gt;
Therefore, such component also addresses the following KPIs for the UC2-DEM1: &lt;br /&gt;
*	UC2-D1-KPI-T1 - Recognition of work elements through AI: Detection of main work elements position in the road through point cloud&lt;br /&gt;
*	UC2-D1-KPI-T2 - Recognition of work elements through AI: Detection of the total number of elements.&lt;br /&gt;
&lt;br /&gt;
==Technical specification==&lt;br /&gt;
&lt;br /&gt;
From a technical point of view, the simulated data aggregator has been implemented over already existing tools: '''Unreal Engine''' and '''AirSim'''. '''Unreal Engine''' is a complete suite of creation tools for game development, architectural and automotive visualization, linear film and television content creation, broadcast and live event production, training and simulation, and other real-time applications. '''AirSim''' is a simulator for drones, cars and more, built on '''Unreal Engine'''. It is open-source, cross platform, and supports software-in-the-loop simulation with popular flight controllers. It is developed as an Unreal plugin that can simply be dropped into any Unreal environment.&lt;br /&gt;
The recommended hardware requirements specified for the simulation tools are:&lt;br /&gt;
* '''Unreal Engine:'''&lt;br /&gt;
** Operating System: Windows 10 64-bit&lt;br /&gt;
** Processor Quad-core Intel or AMD, 2.5 GHz or faster&lt;br /&gt;
** Memory 8 GB RAM&lt;br /&gt;
** Graphics Card DirectX 11 or 12 compatible graphics card&lt;br /&gt;
**	RHI Version:&lt;br /&gt;
***	DirectX 11: Latest drivers &lt;br /&gt;
*** DirectX 12: Latest drivers &lt;br /&gt;
*** Vulkan: AMD (21.11.3+) and NVIDIA (496.76+)&lt;br /&gt;
* '''AirSim Simulator:'''&lt;br /&gt;
** Operating System: Windows 10 64bit&lt;br /&gt;
** CPU: Intel Core i7&lt;br /&gt;
** GPU: Nvidia GTX 1080&lt;br /&gt;
** RAM: 32 GB&lt;br /&gt;
However, for the implementation of the Unreal+AirSim simulator server under the Comp4Drones domain, the technical specifications required are the following regarding software and hardware infrastructures: &lt;br /&gt;
*'''Software:'''&lt;br /&gt;
** Windows 10&lt;br /&gt;
** Python 3.7.9&lt;br /&gt;
** Unreal engine Version 4.25&lt;br /&gt;
** AirSim 1.3.0&lt;br /&gt;
*'''Hardware:'''&lt;br /&gt;
** Operating System: Windows 10 64bit&lt;br /&gt;
** CPU: Intel Core i5&lt;br /&gt;
** GPU: Nvidia GTX 980&lt;br /&gt;
** RAM: 32 GB&lt;br /&gt;
Regarding the interfaces, there are two interfaces: the scenery creator based on Unreal Engine and AirSim for the simulation itself. Each of the interfaces requires specific inputs and provides specific outputs as it is descripted below:&lt;br /&gt;
&lt;br /&gt;
* Interface of scenery creator, Unreal engine:&lt;br /&gt;
** Input: CAD 3D model signals. &lt;br /&gt;
** Output: Different scenarios of a road construction.&lt;br /&gt;
*AirSim Simulator:&lt;br /&gt;
**Input:&lt;br /&gt;
***Road construction scenery compiled (digital terrain model).&lt;br /&gt;
***AirSim setting file which contains:, the type of vehicle has to be specified, in this case, a drone; the camera settings (Resolution, angle degree), and the LIDAR sensor settings (Points per seconds of the cloud points, and number of channels).&lt;br /&gt;
***Drone fly script: Python file that describes the movement of the drone, the speed, and the images per second taken.&lt;br /&gt;
**Output: RGB images of drone’s fly simulated. Array of LIDAR cloud points in float32 (in case of using a LiDAR camera).&lt;br /&gt;
&lt;br /&gt;
Considering the inputs required as well as the outputs obtained, the data flow of the Simulated Data Aggregator component is as follows:&lt;br /&gt;
&lt;br /&gt;
#Inputs required are: drone configuration parameters, digital terrain model and 3D models of traffic signs (objects to be detected). &lt;br /&gt;
#Creation of a settings file to configure the simulation parameters: simulation mode, view mode, time of day to simulate the sun position, origin geo-point, camera settings (selection of Lidar or RGB), and vehicle settings among others. (*Vehicle settings parameter is the one including the drone configuration parameters taken as input.). &lt;br /&gt;
#The digital terrain model is compiled through an Unreal plug-in provided by Airsim. &lt;br /&gt;
#The settings file together with de digital terrain model and the 3D models of traffic signs to be represented are sent to the AirSim simulator through the AirSim APIs.&lt;br /&gt;
#The scenario is simulated containing the traffic signs placed in the terrain provided and it is recorded from a drone (with an RGB or a Lidar Sensor) considering the parameters indicated in the settings. &lt;br /&gt;
#The simulation is retrieved in the Scenario Adaptor from the base simulator and it provides a Point-Cloud as a flat array of floats along with the timestamp of the capture and position or RGB images (depending on the component needs) to be used as input for the CNN network in the computer vision system developed under WP3.&lt;br /&gt;
&lt;br /&gt;
Following this, for the development process of the simulation environments has consisted of the following phases:&lt;br /&gt;
&lt;br /&gt;
# First of all, a study phase of the real environments has been carried out. In this phase,  the images provided of real drone flights captured during the flight campaigns performed in Jaén (Spain) have been analysed. The study includes also the logic about  the signals position which has been evaluated thanks to the analysis of road real works in order to enlarge the variety of real images.&lt;br /&gt;
# Once the study was completed, simulated environments of different realistic construction site scenarios were performed manually. Thanks to these environments, it has been possible to perform different flights in a short period of time, without the need of carrying out and deploying real flights with drones in real pilot sites. These simulated flights have been performed with different degree of luminosity and so, different levels of variability of the same scenarios in different periods of the day have been obtained. The configuration of the simulations with different luminosity levels has been very relevant since shadows can alter the result of the recognitions producing false positives. Otherwise, with real flights, this would have been to be accomplish with many identical flights at different times of the day and so, with time-consuming and a high economic cost. Some examples of simulated environments are:&lt;br /&gt;
[[File:wp4-39_02.png|frame|center|Simulated environment deployed with Unreal Engine|50px]]&lt;br /&gt;
# Once these environment examples have been compiled and executed on the server, the next step has been to use a python client with the AirSim library running in the flight script mentioned above, in order to create a dataset. This script in the development process is essential, as it automatically generates the images to expand the dataset. Following figures show some examples:&lt;br /&gt;
[[File:wp4-39_03.png|frame|center|Flight performed in simulated environment with different levels of luminosity  ]]&lt;br /&gt;
# Finally, the last step of the development process is to label the generated images to retrain the existing model.&lt;br /&gt;
&lt;br /&gt;
==Application and Improvements==&lt;br /&gt;
&lt;br /&gt;
Particularly, in the scope of UC2 (and in concrete in Demo 1), the simulated data aggregator has contributed to speed up the data collection phase, which has been cancelled for several issues: the COVID-19 pandemia delayed the initial planning on the drones flights, and the withdrawal of the drone and scenario providers partners within the UC2 only allowed to perform one data collection campaign. Then, the component allows to generate a high amount of data from scenario-tailored simulated drone flihts, that have support the training of the computer vision system, without the need to perform multiple data collection campaigns to get training data. In consequence, cost savings and the possibilitiy of continuing with the initial plans and developments envisaged for the project have been accomplished thanks to the data aggregator simulator. &lt;br /&gt;
Beyond the scope of UC2, the data aggregator simulator is motivated by the high costs of performing a drone flight campaign for training a Computer Vision System, and by the time-consuming for collecting a large amount of annotated training data in a variety of real conditions and environments for covering a higher number of scenarios. In consequence, the application of the data aggregator simulator for generating data can be tailored to any domain for the training of IA systems which needs a wide amount of raw data to train the algorithms. &lt;br /&gt;
Considering the work carried out for the development and deployment of the simulated data aggregator, one of the most relevant tasks to carry out in Artificial Intelligence field is the collection and labelling of data that will be used to train the model. &lt;br /&gt;
In this case, the labelled traffic sign datasets available for training AI models for detection and classification are composed of images with frontal perspective, as shown in Figure 6. However, the perspectives of the drone images will range from zenith to swooping depending on the angle at which the camera is positioned. This configuration parameter changes the image perspective and it can impact on how the AI model detects and classifies objects in the image.&lt;br /&gt;
&lt;br /&gt;
[[File:wp4-39_04.png|frame|center|Example of traffic signs dataset with frontal perspective  ]]&lt;br /&gt;
&lt;br /&gt;
Therefore, it is necessary to train the models with sets of frontal perspective images and to test them with images in a top-down perspective to check if it performs the detections and classifications correctly. In this case, the simulator provides a solution to obtain test environments to learn about the behaviour of the model in this type of images, being able to make changes such as the altitude of the drone or the pitch angle of the drone's camera in order to adapt the model as much as possible to the real world. Figure 7 and Figure 8 show some examples of this test.&lt;br /&gt;
&lt;br /&gt;
[[File:wp4-39_05.png|frame|center|Detections in drone vision imagery (simulation flight) with trained model ]]&lt;br /&gt;
&lt;br /&gt;
Therefore, the use of simulated environments enables the inclusion of variations in the different simulated scenarios in a very simple way, which can be translated into advantages and expected improvements over imaging with drone flights in real environments:&lt;br /&gt;
*Adaptation to different environments&lt;br /&gt;
An essential aspect when generating a dataset is the variability in the data. In order to achieve this in an image dataset, it is necessary to change elements such as element arrangement (Figure 3, Figure 4 and Figure 5), luminosity, opacities, meteorological effects such as rain or snow, etc.&lt;br /&gt;
In a simulated environment, it is sufficient to apply changes in the simulator configuration, changes in the layout or creation of new elements to obtain the desire conditions. However, in a real environment several flights have to be performed based on the weather conditions, and manually change the layout by moving objects around again and again which implies high costs in money and time. &lt;br /&gt;
&lt;br /&gt;
*Cost and Time savings &lt;br /&gt;
The ease of generating changes in the simulated environments is translated directly into economic and time savings, since the physical elements required to create the environment are reduced, such as the personnel necessary for the recording of flights, drones renting, personnel specialized in flying these drones, etc. In addition to significantly reducing the cost related to labelling, as described in the following point.&lt;br /&gt;
&lt;br /&gt;
*Automation of labelling&lt;br /&gt;
Automating a laborious task such as labelling, allows to significantly reduce the time required to obtain a sufficient data set for training and dispense with cost of personnel to carry out this task. In the specific case of the AirSim simulator used for this project, it is possible to obtain segmented images of the environment, such as the one represented in Figure 9, which allow labelling of each object.&lt;br /&gt;
[[File:wp4-39-06.png|frame|center|Image with segmented objects obtained in the AirSim simulator]]&lt;br /&gt;
However, since the user is not yet allowed to control the assignment of RGB colour codes to the objects in the environment, this process would be in a &amp;quot;semi-automated&amp;quot; phase. According to the documentation of the simulator itself , work is in progress to fully automate this task.&lt;br /&gt;
Once labelling automation is achieved, the generation of new datasets would be accelerated and both the time and monetary cost would be significantly reduced by avoiding human-generated labelling.&lt;br /&gt;
&lt;br /&gt;
*Transfer Learning&lt;br /&gt;
The adaptation of pre-trained base architectures with data sets very close to the target data is essential to achieve a model fully prepared for the task at hand.&lt;br /&gt;
For this, thousands of labelled images of the same type and composition as the target images are required, a task in which the simulator is of great help in order to generate these images quickly and economically, as well as to accelerate the labelling process as previously indicated.&lt;br /&gt;
&lt;br /&gt;
*Adaptation to future changes&lt;br /&gt;
Once the objective of the AI model has been achieved, it must continue to evolve according to future changes in the real environment, for which it must be retrained with the modifications and variations in the environment. Therefore, in order to represent such changes in the training data sets, the simulator plays a key role.&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=WP4-39&amp;diff=328</id>
		<title>WP4-39</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=WP4-39&amp;diff=328"/>
		<updated>2022-05-10T11:56:04Z</updated>

		<summary type="html">&lt;p&gt;Hib: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;=Simulated data aggregator supporting intelligent decision in computer vision components=&lt;br /&gt;
&lt;br /&gt;
{|class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
|  ID|| WP4-39&lt;br /&gt;
|-&lt;br /&gt;
|   Contributor	|| HI-IBERIA&lt;br /&gt;
|-&lt;br /&gt;
|   Levels	|| Functional&lt;br /&gt;
|-&lt;br /&gt;
|   Require	|| 	AirSim built on Unreal Engine&lt;br /&gt;
|-&lt;br /&gt;
|   Provide		|| Simulator-based data aggregator built over AirSim generating high amounts of training data (RGB images) to support any computer vision component for intelligent decision in drones.&lt;br /&gt;
|-&lt;br /&gt;
|   Input	|| 	Simulation scenario parameters: &lt;br /&gt;
*Digital terrain model imported in Unreal plugin (optional, but recommendable) &lt;br /&gt;
*Objects to be detected in CAD format (Traffic signs in CAD format)&lt;br /&gt;
Drone configuration parameters in JSON format: &lt;br /&gt;
*Vision angle&lt;br /&gt;
*Inclination &lt;br /&gt;
*Viewing depth&lt;br /&gt;
*Flight height &lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|   Output		|| Simulated RGB images (or point-cloud as a flat array of floats)&lt;br /&gt;
|-&lt;br /&gt;
|   C4D building block		|| (Simulated) Data Acquisition&lt;br /&gt;
|-&lt;br /&gt;
|   TRL		|| 6&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
==Detailed Description==&lt;br /&gt;
&lt;br /&gt;
Computer vision is a significant driver in the new era of drone applications, but developing and testing computer vision components for drones in real world is an expensive and time-consuming process. This issue is further exacerbated by the fact that drones are often unsafe and expensive to operate during the training phase. Additionally, one of the key challenges with these techniques is the high sample complexity - the amount of training data needed to learn useful behaviors is often prohibitively high, but in fact, it is needed to collect a large amount of annotated training data in a variety of conditions and environments in order to utilize recent advances in machine intelligence and deep learning. This unfortunately involves not only developing the proposed ML algorithms but also requires vast amounts of time dedicated for the development of an infrastructure able to generate training models in a variety of environments. Consequently, the design, deployment, and evaluation of  computer vision components for drones becomes a complex and costly endeavor for researchers and engineers, since it requires the exploration of multiple conditions and parameters through repeatable and controllable experiments.&lt;br /&gt;
&lt;br /&gt;
In the context of C4D, the proposed component is a simulator-based data aggregator built over AirSim which intends to generate high amounts of training data with the objective of supporting any computer vision component for intelligent decision in drones. This component will allow speeding up the constructive process of a civil infrastructure while saving costs, by reducing the need to perform multiple data collection campaigns to get real training data. Additionally, this component could assess the need of different cameras/sensors (RGB, LIDAR, ...) integrated in the real drone, starting from the simulated clouds of points generated by the component. This would allow detecting which kind of camera could provide more suitable results for the analysis before launching the real drone flight.&lt;br /&gt;
&lt;br /&gt;
In the context of UC2 – Construction, the specific role of the simulated data aggregator is to generate a vast amount of annotated training data which allows us to train the convolutional neural networks which will be implemented in the Computer Vision Component for Drones, which is being developed within the task 3.3. Particularly, this simulated data aggregator will generate a vast amount of RGB images (or point-cloud as a flat array of floats along with the timestamp of the capture and position) which intend to represent the real scenario from scenario-tailored input data, that is, the specific drone configuration parameters, the digital terrain model and the 3D models (CAD files) of the objects to detect, being traffic signs in the particular case of the UC2 - Demo 1. Such RGB images will serve to train the convolutional neural networks, and hence, the DL algorithms of the Computer Vision Component for Drones in an early way.&lt;br /&gt;
&lt;br /&gt;
[[File:wp4-39_01.png|frame|center|High-level architecture of the simulator-based data aggregator]]&lt;br /&gt;
&lt;br /&gt;
Considering the C4D Reference Architecture Context, the role of the simulator-based data aggregator is clearly referred to the '''''Data acquisition''''' building block in the ''Payload management'' block. Particularly, this component contributes to the reference architecture by simulating the behaviour of the '''''Data acquisition''''' building block, since it allows generating simulated payload data which serves as training payload data for  the ''Data Analytics'' building block in the ''Data Management'' block.&lt;br /&gt;
&lt;br /&gt;
Additionally, the simulator-based data aggregator contributes to the ''UC2-D1-FUN-09-Specialized software for georeferenced point-cloud creation'' as well as to the following requirements of the UC2-DEM1:&lt;br /&gt;
*	UC2-DEM1-FUN-01 - The drone system shall capture a high-density point cloud.&lt;br /&gt;
*	UC2-DEM1-FUN-02 - The drone system shall capture RGB data of the surface.&lt;br /&gt;
*	UC2-DEM1-FUN-03 - The point cloud shall be in an open format such as LAS.&lt;br /&gt;
Therefore, such component also addresses the following KPIs for the UC2-DEM1: &lt;br /&gt;
*	UC2-D1-KPI-T1 - Recognition of work elements through AI: Detection of main work elements position in the road through point cloud&lt;br /&gt;
*	UC2-D1-KPI-T2 - Recognition of work elements through AI: Detection of the total number of elements.&lt;br /&gt;
&lt;br /&gt;
==Technical specification==&lt;br /&gt;
&lt;br /&gt;
From a technical point of view, the simulated data aggregator has been implemented over already existing tools: '''Unreal Engine''' and '''AirSim'''. '''Unreal Engine''' is a complete suite of creation tools for game development, architectural and automotive visualization, linear film and television content creation, broadcast and live event production, training and simulation, and other real-time applications. '''AirSim''' is a simulator for drones, cars and more, built on '''Unreal Engine'''. It is open-source, cross platform, and supports software-in-the-loop simulation with popular flight controllers. It is developed as an Unreal plugin that can simply be dropped into any Unreal environment.&lt;br /&gt;
The recommended hardware requirements specified for the simulation tools are:&lt;br /&gt;
* '''Unreal Engine:'''&lt;br /&gt;
** Operating System: Windows 10 64-bit&lt;br /&gt;
** Processor Quad-core Intel or AMD, 2.5 GHz or faster&lt;br /&gt;
** Memory 8 GB RAM&lt;br /&gt;
** Graphics Card DirectX 11 or 12 compatible graphics card&lt;br /&gt;
**	RHI Version:&lt;br /&gt;
***	DirectX 11: Latest drivers &lt;br /&gt;
*** DirectX 12: Latest drivers &lt;br /&gt;
*** Vulkan: AMD (21.11.3+) and NVIDIA (496.76+)&lt;br /&gt;
* '''AirSim Simulator:'''&lt;br /&gt;
** Operating System: Windows 10 64bit&lt;br /&gt;
** CPU: Intel Core i7&lt;br /&gt;
** GPU: Nvidia GTX 1080&lt;br /&gt;
** RAM: 32 GB&lt;br /&gt;
However, for the implementation of the Unreal+AirSim simulator server under the Comp4Drones domain, the technical specifications required are the following regarding software and hardware infrastructures: &lt;br /&gt;
*'''Software:'''&lt;br /&gt;
** Windows 10&lt;br /&gt;
** Python 3.7.9&lt;br /&gt;
** Unreal engine Version 4.25&lt;br /&gt;
** AirSim 1.3.0&lt;br /&gt;
*'''Hardware:'''&lt;br /&gt;
** Operating System: Windows 10 64bit&lt;br /&gt;
** CPU: Intel Core i5&lt;br /&gt;
** GPU: Nvidia GTX 980&lt;br /&gt;
** RAM: 32 GB&lt;br /&gt;
Regarding the interfaces, there are two interfaces: the scenery creator based on Unreal Engine and AirSim for the simulation itself. Each of the interfaces requires specific inputs and provides specific outputs as it is descripted below:&lt;br /&gt;
&lt;br /&gt;
* Interface of scenery creator, Unreal engine:&lt;br /&gt;
** Input: CAD 3D model signals. &lt;br /&gt;
** Output: Different scenarios of a road construction.&lt;br /&gt;
*AirSim Simulator:&lt;br /&gt;
**Input:&lt;br /&gt;
***Road construction scenery compiled (digital terrain model).&lt;br /&gt;
***AirSim setting file which contains:, the type of vehicle has to be specified, in this case, a drone; the camera settings (Resolution, angle degree), and the LIDAR sensor settings (Points per seconds of the cloud points, and number of channels).&lt;br /&gt;
***Drone fly script: Python file that describes the movement of the drone, the speed, and the images per second taken.&lt;br /&gt;
**Output: RGB images of drone’s fly simulated. Array of LIDAR cloud points in float32 (in case of using a LiDAR camera).&lt;br /&gt;
&lt;br /&gt;
Considering the inputs required as well as the outputs obtained, the data flow of the Simulated Data Aggregator component is as follows:&lt;br /&gt;
&lt;br /&gt;
#Inputs required are: drone configuration parameters, digital terrain model and 3D models of traffic signs (objects to be detected). &lt;br /&gt;
#Creation of a settings file to configure the simulation parameters: simulation mode, view mode, time of day to simulate the sun position, origin geo-point, camera settings (selection of Lidar or RGB), and vehicle settings among others. (*Vehicle settings parameter is the one including the drone configuration parameters taken as input.). &lt;br /&gt;
#The digital terrain model is compiled through an Unreal plug-in provided by Airsim. &lt;br /&gt;
#The settings file together with de digital terrain model and the 3D models of traffic signs to be represented are sent to the AirSim simulator through the AirSim APIs.&lt;br /&gt;
#The scenario is simulated containing the traffic signs placed in the terrain provided and it is recorded from a drone (with an RGB or a Lidar Sensor) considering the parameters indicated in the settings. &lt;br /&gt;
#The simulation is retrieved in the Scenario Adaptor from the base simulator and it provides a Point-Cloud as a flat array of floats along with the timestamp of the capture and position or RGB images (depending on the component needs) to be used as input for the CNN network in the computer vision system developed under WP3.&lt;br /&gt;
&lt;br /&gt;
Following this, for the development process of the simulation environments has consisted of the following phases:&lt;br /&gt;
&lt;br /&gt;
# First of all, a study phase of the real environments has been carried out. In this phase,  the images provided of real drone flights captured during the flight campaigns performed in Jaén (Spain) have been analysed. The study includes also the logic about  the signals position which has been evaluated thanks to the analysis of road real works in order to enlarge the variety of real images.&lt;br /&gt;
# Once the study was completed, simulated environments of different realistic construction site scenarios were performed manually. Thanks to these environments, it has been possible to perform different flights in a short period of time, without the need of carrying out and deploying real flights with drones in real pilot sites. These simulated flights have been performed with different degree of luminosity and so, different levels of variability of the same scenarios in different periods of the day have been obtained. The configuration of the simulations with different luminosity levels has been very relevant since shadows can alter the result of the recognitions producing false positives. Otherwise, with real flights, this would have been to be accomplish with many identical flights at different times of the day and so, with time-consuming and a high economic cost. Some examples of simulated environments are:&lt;br /&gt;
[[File:wp4-39_02.png|frame|center|Simulated environment deployed with Unreal Engine]]&lt;br /&gt;
# Once these environment examples have been compiled and executed on the server, the next step has been to use a python client with the AirSim library running in the flight script mentioned above, in order to create a dataset. This script in the development process is essential, as it automatically generates the images to expand the dataset. Following figures show some examples:&lt;br /&gt;
[[File:wp4-39_03.png|frame|center|Flight performed in simulated environment with different levels of luminosity  ]]&lt;br /&gt;
# Finally, the last step of the development process is to label the generated images to retrain the existing model.&lt;br /&gt;
&lt;br /&gt;
==Application and Improvements==&lt;br /&gt;
&lt;br /&gt;
Particularly, in the scope of UC2 (and in concrete in Demo 1), the simulated data aggregator has contributed to speed up the data collection phase, which has been cancelled for several issues: the COVID-19 pandemia delayed the initial planning on the drones flights, and the withdrawal of the drone and scenario providers partners within the UC2 only allowed to perform one data collection campaign. Then, the component allows to generate a high amount of data from scenario-tailored simulated drone flihts, that have support the training of the computer vision system, without the need to perform multiple data collection campaigns to get training data. In consequence, cost savings and the possibilitiy of continuing with the initial plans and developments envisaged for the project have been accomplished thanks to the data aggregator simulator. &lt;br /&gt;
Beyond the scope of UC2, the data aggregator simulator is motivated by the high costs of performing a drone flight campaign for training a Computer Vision System, and by the time-consuming for collecting a large amount of annotated training data in a variety of real conditions and environments for covering a higher number of scenarios. In consequence, the application of the data aggregator simulator for generating data can be tailored to any domain for the training of IA systems which needs a wide amount of raw data to train the algorithms. &lt;br /&gt;
Considering the work carried out for the development and deployment of the simulated data aggregator, one of the most relevant tasks to carry out in Artificial Intelligence field is the collection and labelling of data that will be used to train the model. &lt;br /&gt;
In this case, the labelled traffic sign datasets available for training AI models for detection and classification are composed of images with frontal perspective, as shown in Figure 6. However, the perspectives of the drone images will range from zenith to swooping depending on the angle at which the camera is positioned. This configuration parameter changes the image perspective and it can impact on how the AI model detects and classifies objects in the image.&lt;br /&gt;
&lt;br /&gt;
[[File:wp4-39_04.png|frame|center|Example of traffic signs dataset with frontal perspective  ]]&lt;br /&gt;
&lt;br /&gt;
Therefore, it is necessary to train the models with sets of frontal perspective images and to test them with images in a top-down perspective to check if it performs the detections and classifications correctly. In this case, the simulator provides a solution to obtain test environments to learn about the behaviour of the model in this type of images, being able to make changes such as the altitude of the drone or the pitch angle of the drone's camera in order to adapt the model as much as possible to the real world. Figure 7 and Figure 8 show some examples of this test.&lt;br /&gt;
&lt;br /&gt;
[[File:wp4-39_05.png|frame|center|Detections in drone vision imagery (simulation flight) with trained model ]]&lt;br /&gt;
&lt;br /&gt;
Therefore, the use of simulated environments enables the inclusion of variations in the different simulated scenarios in a very simple way, which can be translated into advantages and expected improvements over imaging with drone flights in real environments:&lt;br /&gt;
*Adaptation to different environments&lt;br /&gt;
An essential aspect when generating a dataset is the variability in the data. In order to achieve this in an image dataset, it is necessary to change elements such as element arrangement (Figure 3, Figure 4 and Figure 5), luminosity, opacities, meteorological effects such as rain or snow, etc.&lt;br /&gt;
In a simulated environment, it is sufficient to apply changes in the simulator configuration, changes in the layout or creation of new elements to obtain the desire conditions. However, in a real environment several flights have to be performed based on the weather conditions, and manually change the layout by moving objects around again and again which implies high costs in money and time. &lt;br /&gt;
&lt;br /&gt;
*Cost and Time savings &lt;br /&gt;
The ease of generating changes in the simulated environments is translated directly into economic and time savings, since the physical elements required to create the environment are reduced, such as the personnel necessary for the recording of flights, drones renting, personnel specialized in flying these drones, etc. In addition to significantly reducing the cost related to labelling, as described in the following point.&lt;br /&gt;
&lt;br /&gt;
*Automation of labelling&lt;br /&gt;
Automating a laborious task such as labelling, allows to significantly reduce the time required to obtain a sufficient data set for training and dispense with cost of personnel to carry out this task. In the specific case of the AirSim simulator used for this project, it is possible to obtain segmented images of the environment, such as the one represented in Figure 9, which allow labelling of each object.&lt;br /&gt;
[[File:wp4-39-06.png|frame|center|Image with segmented objects obtained in the AirSim simulator]]&lt;br /&gt;
However, since the user is not yet allowed to control the assignment of RGB colour codes to the objects in the environment, this process would be in a &amp;quot;semi-automated&amp;quot; phase. According to the documentation of the simulator itself , work is in progress to fully automate this task.&lt;br /&gt;
Once labelling automation is achieved, the generation of new datasets would be accelerated and both the time and monetary cost would be significantly reduced by avoiding human-generated labelling.&lt;br /&gt;
&lt;br /&gt;
*Transfer Learning&lt;br /&gt;
The adaptation of pre-trained base architectures with data sets very close to the target data is essential to achieve a model fully prepared for the task at hand.&lt;br /&gt;
For this, thousands of labelled images of the same type and composition as the target images are required, a task in which the simulator is of great help in order to generate these images quickly and economically, as well as to accelerate the labelling process as previously indicated.&lt;br /&gt;
&lt;br /&gt;
*Adaptation to future changes&lt;br /&gt;
Once the objective of the AI model has been achieved, it must continue to evolve according to future changes in the real environment, for which it must be retrained with the modifications and variations in the environment. Therefore, in order to represent such changes in the training data sets, the simulator plays a key role.&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=WP4-39&amp;diff=327</id>
		<title>WP4-39</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=WP4-39&amp;diff=327"/>
		<updated>2022-05-10T11:53:14Z</updated>

		<summary type="html">&lt;p&gt;Hib: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;=Simulated data aggregator supporting intelligent decision in computer vision components=&lt;br /&gt;
&lt;br /&gt;
{|class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
|  ID|| WP4-39&lt;br /&gt;
|-&lt;br /&gt;
|   Contributor	|| HI-IBERIA&lt;br /&gt;
|-&lt;br /&gt;
|   Levels	|| Functional&lt;br /&gt;
|-&lt;br /&gt;
|   Require	|| 	AirSim built on Unreal Engine&lt;br /&gt;
|-&lt;br /&gt;
|   Provide		|| Simulator-based data aggregator built over AirSim generating high amounts of training data (RGB images) to support any computer vision component for intelligent decision in drones.&lt;br /&gt;
|-&lt;br /&gt;
|   Input	|| 	Simulation scenario parameters: &lt;br /&gt;
*Digital terrain model imported in Unreal plugin (optional, but recommendable) &lt;br /&gt;
*Objects to be detected in CAD format (Traffic signs in CAD format)&lt;br /&gt;
Drone configuration parameters in JSON format: &lt;br /&gt;
*Vision angle&lt;br /&gt;
*Inclination &lt;br /&gt;
*Viewing depth&lt;br /&gt;
*Flight height &lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|   Output		|| Simulated RGB images (or point-cloud as a flat array of floats)&lt;br /&gt;
|-&lt;br /&gt;
|   C4D building block		|| (Simulated) Data Acquisition&lt;br /&gt;
|-&lt;br /&gt;
|   TRL		|| 6&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
==Detailed Description==&lt;br /&gt;
&lt;br /&gt;
Computer vision is a significant driver in the new era of drone applications, but developing and testing computer vision components for drones in real world is an expensive and time-consuming process. This issue is further exacerbated by the fact that drones are often unsafe and expensive to operate during the training phase. Additionally, one of the key challenges with these techniques is the high sample complexity - the amount of training data needed to learn useful behaviors is often prohibitively high, but in fact, it is needed to collect a large amount of annotated training data in a variety of conditions and environments in order to utilize recent advances in machine intelligence and deep learning. This unfortunately involves not only developing the proposed ML algorithms but also requires vast amounts of time dedicated for the development of an infrastructure able to generate training models in a variety of environments. Consequently, the design, deployment, and evaluation of  computer vision components for drones becomes a complex and costly endeavor for researchers and engineers, since it requires the exploration of multiple conditions and parameters through repeatable and controllable experiments.&lt;br /&gt;
&lt;br /&gt;
In the context of C4D, the proposed component is a simulator-based data aggregator built over AirSim which intends to generate high amounts of training data with the objective of supporting any computer vision component for intelligent decision in drones. This component will allow speeding up the constructive process of a civil infrastructure while saving costs, by reducing the need to perform multiple data collection campaigns to get real training data. Additionally, this component could assess the need of different cameras/sensors (RGB, LIDAR, ...) integrated in the real drone, starting from the simulated clouds of points generated by the component. This would allow detecting which kind of camera could provide more suitable results for the analysis before launching the real drone flight.&lt;br /&gt;
&lt;br /&gt;
In the context of UC2 – Construction, the specific role of the simulated data aggregator is to generate a vast amount of annotated training data which allows us to train the convolutional neural networks which will be implemented in the Computer Vision Component for Drones, which is being developed within the task 3.3. Particularly, this simulated data aggregator will generate a vast amount of RGB images (or point-cloud as a flat array of floats along with the timestamp of the capture and position) which intend to represent the real scenario from scenario-tailored input data, that is, the specific drone configuration parameters, the digital terrain model and the 3D models (CAD files) of the objects to detect, being traffic signs in the particular case of the UC2 - Demo 1. Such RGB images will serve to train the convolutional neural networks, and hence, the DL algorithms of the Computer Vision Component for Drones in an early way.&lt;br /&gt;
&lt;br /&gt;
[[File:wp4-39_01.png|frame|center|High-level architecture of the simulator-based data aggregator]]&lt;br /&gt;
&lt;br /&gt;
Considering the C4D Reference Architecture Context, the role of the simulator-based data aggregator is clearly referred to the '''''Data acquisition''''' building block in the ''Payload management'' block. Particularly, this component contributes to the reference architecture by simulating the behaviour of the '''''Data acquisition''''' building block, since it allows generating simulated payload data which serves as training payload data for  the ''Data Analytics'' building block in the ''Data Management'' block.&lt;br /&gt;
&lt;br /&gt;
Additionally, the simulator-based data aggregator contributes to the ''UC2-D1-FUN-09-Specialized software for georeferenced point-cloud creation'' as well as to the following requirements of the UC2-DEM1:&lt;br /&gt;
*	UC2-DEM1-FUN-01 - The drone system shall capture a high-density point cloud.&lt;br /&gt;
*	UC2-DEM1-FUN-02 - The drone system shall capture RGB data of the surface.&lt;br /&gt;
*	UC2-DEM1-FUN-03 - The point cloud shall be in an open format such as LAS.&lt;br /&gt;
Therefore, such component also addresses the following KPIs for the UC2-DEM1: &lt;br /&gt;
*	UC2-D1-KPI-T1 - Recognition of work elements through AI: Detection of main work elements position in the road through point cloud&lt;br /&gt;
*	UC2-D1-KPI-T2 - Recognition of work elements through AI: Detection of the total number of elements.&lt;br /&gt;
&lt;br /&gt;
==Technical specification==&lt;br /&gt;
&lt;br /&gt;
From a technical point of view, the simulated data aggregator has been implemented over already existing tools: '''Unreal Engine''' and '''AirSim'''. '''Unreal Engine''' is a complete suite of creation tools for game development, architectural and automotive visualization, linear film and television content creation, broadcast and live event production, training and simulation, and other real-time applications. '''AirSim''' is a simulator for drones, cars and more, built on '''Unreal Engine'''. It is open-source, cross platform, and supports software-in-the-loop simulation with popular flight controllers. It is developed as an Unreal plugin that can simply be dropped into any Unreal environment.&lt;br /&gt;
The recommended hardware requirements specified for the simulation tools are:&lt;br /&gt;
* '''Unreal Engine:'''&lt;br /&gt;
** Operating System: Windows 10 64-bit&lt;br /&gt;
** Processor Quad-core Intel or AMD, 2.5 GHz or faster&lt;br /&gt;
** Memory 8 GB RAM&lt;br /&gt;
** Graphics Card DirectX 11 or 12 compatible graphics card&lt;br /&gt;
**	RHI Version:&lt;br /&gt;
***	DirectX 11: Latest drivers &lt;br /&gt;
*** DirectX 12: Latest drivers &lt;br /&gt;
*** Vulkan: AMD (21.11.3+) and NVIDIA (496.76+)&lt;br /&gt;
* '''AirSim Simulator:'''&lt;br /&gt;
** Operating System: Windows 10 64bit&lt;br /&gt;
** CPU: Intel Core i7&lt;br /&gt;
** GPU: Nvidia GTX 1080&lt;br /&gt;
** RAM: 32 GB&lt;br /&gt;
However, for the implementation of the Unreal+AirSim simulator server under the Comp4Drones domain, the technical specifications required are the following regarding software and hardware infrastructures: &lt;br /&gt;
*'''Software:'''&lt;br /&gt;
** Windows 10&lt;br /&gt;
** Python 3.7.9&lt;br /&gt;
** Unreal engine Version 4.25&lt;br /&gt;
** AirSim 1.3.0&lt;br /&gt;
*'''Hardware:'''&lt;br /&gt;
** Operating System: Windows 10 64bit&lt;br /&gt;
** CPU: Intel Core i5&lt;br /&gt;
** GPU: Nvidia GTX 980&lt;br /&gt;
** RAM: 32 GB&lt;br /&gt;
Regarding the interfaces, there are two interfaces: the scenery creator based on Unreal Engine and AirSim for the simulation itself. Each of the interfaces requires specific inputs and provides specific outputs as it is descripted below:&lt;br /&gt;
&lt;br /&gt;
* Interface of scenery creator, Unreal engine:&lt;br /&gt;
** Input: CAD 3D model signals. &lt;br /&gt;
** Output: Different scenarios of a road construction.&lt;br /&gt;
*AirSim Simulator:&lt;br /&gt;
**Input:&lt;br /&gt;
***Road construction scenery compiled (digital terrain model).&lt;br /&gt;
***AirSim setting file which contains:, the type of vehicle has to be specified, in this case, a drone; the camera settings (Resolution, angle degree), and the LIDAR sensor settings (Points per seconds of the cloud points, and number of channels).&lt;br /&gt;
***Drone fly script: Python file that describes the movement of the drone, the speed, and the images per second taken.&lt;br /&gt;
**Output: RGB images of drone’s fly simulated. Array of LIDAR cloud points in float32 (in case of using a LiDAR camera).&lt;br /&gt;
&lt;br /&gt;
Considering the inputs required as well as the outputs obtained, the data flow of the Simulated Data Aggregator component is as follows:&lt;br /&gt;
&lt;br /&gt;
#Inputs required are: drone configuration parameters, digital terrain model and 3D models of traffic signs (objects to be detected). &lt;br /&gt;
#Creation of a settings file to configure the simulation parameters: simulation mode, view mode, time of day to simulate the sun position, origin geo-point, camera settings (selection of Lidar or RGB), and vehicle settings among others. (*Vehicle settings parameter is the one including the drone configuration parameters taken as input.). &lt;br /&gt;
#The digital terrain model is compiled through an Unreal plug-in provided by Airsim. &lt;br /&gt;
#The settings file together with de digital terrain model and the 3D models of traffic signs to be represented are sent to the AirSim simulator through the AirSim APIs.&lt;br /&gt;
#The scenario is simulated containing the traffic signs placed in the terrain provided and it is recorded from a drone (with an RGB or a Lidar Sensor) considering the parameters indicated in the settings. &lt;br /&gt;
#The simulation is retrieved in the Scenario Adaptor from the base simulator and it provides a Point-Cloud as a flat array of floats along with the timestamp of the capture and position or RGB images (depending on the component needs) to be used as input for the CNN network in the computer vision system developed under WP3.&lt;br /&gt;
&lt;br /&gt;
Following this, for the development process of the simulation environments has consisted of the following phases:&lt;br /&gt;
&lt;br /&gt;
# First of all, a study phase of the real environments has been carried out. In this phase,  the images provided of real drone flights captured during the flight campaigns performed in Jaén (Spain) have been analysed. The study includes also the logic about  the signals position which has been evaluated thanks to the analysis of road real works in order to enlarge the variety of real images.&lt;br /&gt;
# Once the study was completed, simulated environments of different realistic construction site scenarios were performed manually. Thanks to these environments, it has been possible to perform different flights in a short period of time, without the need of carrying out and deploying real flights with drones in real pilot sites. These simulated flights have been performed with different degree of luminosity and so, different levels of variability of the same scenarios in different periods of the day have been obtained. The configuration of the simulations with different luminosity levels has been very relevant since shadows can alter the result of the recognitions producing false positives. Otherwise, with real flights, this would have been to be accomplish with many identical flights at different times of the day and so, with time-consuming and a high economic cost. Some examples of simulated environments are:&lt;br /&gt;
[[File:wp4-39_02.png|frame|center|Simulated environment deployed with Unreal Engine]]&lt;br /&gt;
# Once these environment examples have been compiled and executed on the server, the next step has been to use a python client with the AirSim library running in the flight script mentioned above, in order to create a dataset. This script in the development process is essential, as it automatically generates the images to expand the dataset. Following figures show some examples:&lt;br /&gt;
[[File:wp4-39_03.png|frame|center|Flight performed in simulated environment with different levels of luminosity  ]]&lt;br /&gt;
# Finally, the last step of the development process is to label the generated images to retrain the existing model.&lt;br /&gt;
&lt;br /&gt;
==Application and Improvements==&lt;br /&gt;
&lt;br /&gt;
Particularly, in the scope of UC2 (and in concrete in Demo 1), the simulated data aggregator has contributed to speed up the data collection phase, which has been cancelled for several issues: the COVID-19 pandemia delayed the initial planning on the drones flights, and the withdrawal of the drone and scenario providers partners within the UC2 only allowed to perform one data collection campaign. Then, the component allows to generate a high amount of data from scenario-tailored simulated drone flihts, that have support the training of the computer vision system, without the need to perform multiple data collection campaigns to get training data. In consequence, cost savings and the possibilitiy of continuing with the initial plans and developments envisaged for the project have been accomplished thanks to the data aggregator simulator. &lt;br /&gt;
Beyond the scope of UC2, the data aggregator simulator is motivated by the high costs of performing a drone flight campaign for training a Computer Vision System, and by the time-consuming for collecting a large amount of annotated training data in a variety of real conditions and environments for covering a higher number of scenarios. In consequence, the application of the data aggregator simulator for generating data can be tailored to any domain for the training of IA systems which needs a wide amount of raw data to train the algorithms. &lt;br /&gt;
Considering the work carried out for the development and deployment of the simulated data aggregator, one of the most relevant tasks to carry out in Artificial Intelligence field is the collection and labelling of data that will be used to train the model. &lt;br /&gt;
In this case, the labelled traffic sign datasets available for training AI models for detection and classification are composed of images with frontal perspective, as shown in Figure 6. However, the perspectives of the drone images will range from zenith to swooping depending on the angle at which the camera is positioned. This configuration parameter changes the image perspective and it can impact on how the AI model detects and classifies objects in the image.&lt;br /&gt;
&lt;br /&gt;
[[File:wp4-39_04.png|frame|center|Example of traffic signs dataset with frontal perspective  ]]&lt;br /&gt;
&lt;br /&gt;
Therefore, it is necessary to train the models with sets of frontal perspective images and to test them with images in a top-down perspective to check if it performs the detections and classifications correctly. In this case, the simulator provides a solution to obtain test environments to learn about the behaviour of the model in this type of images, being able to make changes such as the altitude of the drone or the pitch angle of the drone's camera in order to adapt the model as much as possible to the real world. Figure 7 and Figure 8 show some examples of this test.&lt;br /&gt;
&lt;br /&gt;
[[File:wp4-39_05.png|frame|center|Detections in drone vision imagery (simulation flight) with trained model ]]&lt;br /&gt;
&lt;br /&gt;
Therefore, the use of simulated environments enables the inclusion of variations in the different simulated scenarios in a very simple way, which can be translated into advantages and expected improvements over imaging with drone flights in real environments:&lt;br /&gt;
*Adaptation to different environments&lt;br /&gt;
An essential aspect when generating a dataset is the variability in the data. In order to achieve this in an image dataset, it is necessary to change elements such as element arrangement (Figure 3, Figure 4 and Figure 5), luminosity, opacities, meteorological effects such as rain or snow, etc.&lt;br /&gt;
In a simulated environment, it is sufficient to apply changes in the simulator configuration, changes in the layout or creation of new elements to obtain the desire conditions. However, in a real environment several flights have to be performed based on the weather conditions, and manually change the layout by moving objects around again and again which implies high costs in money and time. &lt;br /&gt;
&lt;br /&gt;
*Cost and Time savings &lt;br /&gt;
The ease of generating changes in the simulated environments is translated directly into economic and time savings, since the physical elements required to create the environment are reduced, such as the personnel necessary for the recording of flights, drones renting, personnel specialized in flying these drones, etc. In addition to significantly reducing the cost related to labelling, as described in the following point.&lt;br /&gt;
&lt;br /&gt;
*Automation of labelling&lt;br /&gt;
Automating a laborious task such as labelling, allows to significantly reduce the time required to obtain a sufficient data set for training and dispense with cost of personnel to carry out this task. In the specific case of the AirSim simulator used for this project, it is possible to obtain segmented images of the environment, such as the one represented in Figure 9, which allow labelling of each object.&lt;br /&gt;
[[File:wp4-39_06.png|frame|center|Image with segmented objects obtained in the AirSim simulator]]&lt;br /&gt;
However, since the user is not yet allowed to control the assignment of RGB colour codes to the objects in the environment, this process would be in a &amp;quot;semi-automated&amp;quot; phase. According to the documentation of the simulator itself , work is in progress to fully automate this task.&lt;br /&gt;
Once labelling automation is achieved, the generation of new datasets would be accelerated and both the time and monetary cost would be significantly reduced by avoiding human-generated labelling.&lt;br /&gt;
&lt;br /&gt;
*Transfer Learning&lt;br /&gt;
The adaptation of pre-trained base architectures with data sets very close to the target data is essential to achieve a model fully prepared for the task at hand.&lt;br /&gt;
For this, thousands of labelled images of the same type and composition as the target images are required, a task in which the simulator is of great help in order to generate these images quickly and economically, as well as to accelerate the labelling process as previously indicated.&lt;br /&gt;
&lt;br /&gt;
*Adaptation to future changes&lt;br /&gt;
Once the objective of the AI model has been achieved, it must continue to evolve according to future changes in the real environment, for which it must be retrained with the modifications and variations in the environment. Therefore, in order to represent such changes in the training data sets, the simulator plays a key role.&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=File:Wp4-39-06.png&amp;diff=326</id>
		<title>File:Wp4-39-06.png</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=File:Wp4-39-06.png&amp;diff=326"/>
		<updated>2022-05-10T11:39:01Z</updated>

		<summary type="html">&lt;p&gt;Hib: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=File:Wp4-39_05.png&amp;diff=325</id>
		<title>File:Wp4-39 05.png</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=File:Wp4-39_05.png&amp;diff=325"/>
		<updated>2022-05-10T11:38:48Z</updated>

		<summary type="html">&lt;p&gt;Hib: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=File:Wp4-39_04.png&amp;diff=324</id>
		<title>File:Wp4-39 04.png</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=File:Wp4-39_04.png&amp;diff=324"/>
		<updated>2022-05-10T11:38:36Z</updated>

		<summary type="html">&lt;p&gt;Hib: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=File:Wp4-39_03.png&amp;diff=323</id>
		<title>File:Wp4-39 03.png</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=File:Wp4-39_03.png&amp;diff=323"/>
		<updated>2022-05-10T11:38:14Z</updated>

		<summary type="html">&lt;p&gt;Hib: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=File:Wp4-39_02.png&amp;diff=322</id>
		<title>File:Wp4-39 02.png</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=File:Wp4-39_02.png&amp;diff=322"/>
		<updated>2022-05-10T11:37:50Z</updated>

		<summary type="html">&lt;p&gt;Hib: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=File:Wp4-39_01.png&amp;diff=321</id>
		<title>File:Wp4-39 01.png</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=File:Wp4-39_01.png&amp;diff=321"/>
		<updated>2022-05-10T11:37:15Z</updated>

		<summary type="html">&lt;p&gt;Hib: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=WP4-39&amp;diff=320</id>
		<title>WP4-39</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=WP4-39&amp;diff=320"/>
		<updated>2022-05-10T11:21:29Z</updated>

		<summary type="html">&lt;p&gt;Hib: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;=Simulated data aggregator supporting intelligent decision in computer vision components=&lt;br /&gt;
&lt;br /&gt;
{|class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
|  ID|| WP4-39&lt;br /&gt;
|-&lt;br /&gt;
|   Contributor	|| HI-IBERIA&lt;br /&gt;
|-&lt;br /&gt;
|   Levels	|| Functional&lt;br /&gt;
|-&lt;br /&gt;
|   Require	|| 	AirSim built on Unreal Engine&lt;br /&gt;
|-&lt;br /&gt;
|   Provide		|| Simulator-based data aggregator built over AirSim generating high amounts of training data (RGB images) to support any computer vision component for intelligent decision in drones.&lt;br /&gt;
|-&lt;br /&gt;
|   Input	|| 	Simulation scenario parameters: &lt;br /&gt;
*Digital terrain model imported in Unreal plugin (optional, but recommendable) &lt;br /&gt;
*Objects to be detected in CAD format (Traffic signs in CAD format)&lt;br /&gt;
Drone configuration parameters in JSON format: &lt;br /&gt;
*Vision angle&lt;br /&gt;
*Inclination &lt;br /&gt;
*Viewing depth&lt;br /&gt;
*Flight height &lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|   Output		|| Simulated RGB images (or point-cloud as a flat array of floats)&lt;br /&gt;
|-&lt;br /&gt;
|   C4D building block		|| (Simulated) Data Acquisition&lt;br /&gt;
|-&lt;br /&gt;
|   TRL		|| 6&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
==Detailed Description==&lt;br /&gt;
&lt;br /&gt;
Computer vision is a significant driver in the new era of drone applications, but developing and testing computer vision components for drones in real world is an expensive and time-consuming process. This issue is further exacerbated by the fact that drones are often unsafe and expensive to operate during the training phase. Additionally, one of the key challenges with these techniques is the high sample complexity - the amount of training data needed to learn useful behaviors is often prohibitively high, but in fact, it is needed to collect a large amount of annotated training data in a variety of conditions and environments in order to utilize recent advances in machine intelligence and deep learning. This unfortunately involves not only developing the proposed ML algorithms but also requires vast amounts of time dedicated for the development of an infrastructure able to generate training models in a variety of environments. Consequently, the design, deployment, and evaluation of  computer vision components for drones becomes a complex and costly endeavor for researchers and engineers, since it requires the exploration of multiple conditions and parameters through repeatable and controllable experiments.&lt;br /&gt;
&lt;br /&gt;
In the context of C4D, the proposed component is a simulator-based data aggregator built over AirSim which intends to generate high amounts of training data with the objective of supporting any computer vision component for intelligent decision in drones. This component will allow speeding up the constructive process of a civil infrastructure while saving costs, by reducing the need to perform multiple data collection campaigns to get real training data. Additionally, this component could assess the need of different cameras/sensors (RGB, LIDAR, ...) integrated in the real drone, starting from the simulated clouds of points generated by the component. This would allow detecting which kind of camera could provide more suitable results for the analysis before launching the real drone flight.&lt;br /&gt;
&lt;br /&gt;
In the context of UC2 – Construction, the specific role of the simulated data aggregator is to generate a vast amount of annotated training data which allows us to train the convolutional neural networks which will be implemented in the Computer Vision Component for Drones, which is being developed within the task 3.3. Particularly, this simulated data aggregator will generate a vast amount of RGB images (or point-cloud as a flat array of floats along with the timestamp of the capture and position) which intend to represent the real scenario from scenario-tailored input data, that is, the specific drone configuration parameters, the digital terrain model and the 3D models (CAD files) of the objects to detect, being traffic signs in the particular case of the UC2 - Demo 1. Such RGB images will serve to train the convolutional neural networks, and hence, the DL algorithms of the Computer Vision Component for Drones in an early way.&lt;br /&gt;
&lt;br /&gt;
Considering the C4D Reference Architecture Context, the role of the simulator-based data aggregator is clearly referred to the '''''Data acquisition''''' building block in the ''Payload management'' block. Particularly, this component contributes to the reference architecture by simulating the behaviour of the '''''Data acquisition''''' building block, since it allows generating simulated payload data which serves as training payload data for  the ''Data Analytics'' building block in the ''Data Management'' block.&lt;br /&gt;
&lt;br /&gt;
Additionally, the simulator-based data aggregator contributes to the ''UC2-D1-FUN-09-Specialized software for georeferenced point-cloud creation'' as well as to the following requirements of the UC2-DEM1:&lt;br /&gt;
*	UC2-DEM1-FUN-01 - The drone system shall capture a high-density point cloud.&lt;br /&gt;
*	UC2-DEM1-FUN-02 - The drone system shall capture RGB data of the surface.&lt;br /&gt;
*	UC2-DEM1-FUN-03 - The point cloud shall be in an open format such as LAS.&lt;br /&gt;
Therefore, such component also addresses the following KPIs for the UC2-DEM1: &lt;br /&gt;
*	UC2-D1-KPI-T1 - Recognition of work elements through AI: Detection of main work elements position in the road through point cloud&lt;br /&gt;
*	UC2-D1-KPI-T2 - Recognition of work elements through AI: Detection of the total number of elements.&lt;br /&gt;
&lt;br /&gt;
==Technical specification==&lt;br /&gt;
&lt;br /&gt;
From a technical point of view, the simulated data aggregator has been implemented over already existing tools: '''Unreal Engine''' and '''AirSim'''. '''Unreal Engine''' is a complete suite of creation tools for game development, architectural and automotive visualization, linear film and television content creation, broadcast and live event production, training and simulation, and other real-time applications. '''AirSim''' is a simulator for drones, cars and more, built on '''Unreal Engine'''. It is open-source, cross platform, and supports software-in-the-loop simulation with popular flight controllers. It is developed as an Unreal plugin that can simply be dropped into any Unreal environment.&lt;br /&gt;
The recommended hardware requirements specified for the simulation tools are:&lt;br /&gt;
* '''Unreal Engine:'''&lt;br /&gt;
** Operating System: Windows 10 64-bit&lt;br /&gt;
** Processor Quad-core Intel or AMD, 2.5 GHz or faster&lt;br /&gt;
** Memory 8 GB RAM&lt;br /&gt;
** Graphics Card DirectX 11 or 12 compatible graphics card&lt;br /&gt;
**	RHI Version:&lt;br /&gt;
***	DirectX 11: Latest drivers &lt;br /&gt;
*** DirectX 12: Latest drivers &lt;br /&gt;
*** Vulkan: AMD (21.11.3+) and NVIDIA (496.76+)&lt;br /&gt;
* '''AirSim Simulator:'''&lt;br /&gt;
** Operating System: Windows 10 64bit&lt;br /&gt;
** CPU: Intel Core i7&lt;br /&gt;
** GPU: Nvidia GTX 1080&lt;br /&gt;
** RAM: 32 GB&lt;br /&gt;
However, for the implementation of the Unreal+AirSim simulator server under the Comp4Drones domain, the technical specifications required are the following regarding software and hardware infrastructures: &lt;br /&gt;
*'''Software:'''&lt;br /&gt;
** Windows 10&lt;br /&gt;
** Python 3.7.9&lt;br /&gt;
** Unreal engine Version 4.25&lt;br /&gt;
** AirSim 1.3.0&lt;br /&gt;
*'''Hardware:'''&lt;br /&gt;
** Operating System: Windows 10 64bit&lt;br /&gt;
** CPU: Intel Core i5&lt;br /&gt;
** GPU: Nvidia GTX 980&lt;br /&gt;
** RAM: 32 GB&lt;br /&gt;
Regarding the interfaces, there are two interfaces: the scenery creator based on Unreal Engine and AirSim for the simulation itself. Each of the interfaces requires specific inputs and provides specific outputs as it is descripted below:&lt;br /&gt;
&lt;br /&gt;
* Interface of scenery creator, Unreal engine:&lt;br /&gt;
** Input: CAD 3D model signals. &lt;br /&gt;
** Output: Different scenarios of a road construction.&lt;br /&gt;
*AirSim Simulator:&lt;br /&gt;
**Input:&lt;br /&gt;
***Road construction scenery compiled (digital terrain model).&lt;br /&gt;
***AirSim setting file which contains:, the type of vehicle has to be specified, in this case, a drone; the camera settings (Resolution, angle degree), and the LIDAR sensor settings (Points per seconds of the cloud points, and number of channels).&lt;br /&gt;
***Drone fly script: Python file that describes the movement of the drone, the speed, and the images per second taken.&lt;br /&gt;
**Output: RGB images of drone’s fly simulated. Array of LIDAR cloud points in float32 (in case of using a LiDAR camera).&lt;br /&gt;
&lt;br /&gt;
Considering the inputs required as well as the outputs obtained, the data flow of the Simulated Data Aggregator component is as follows:&lt;br /&gt;
&lt;br /&gt;
#Inputs required are: drone configuration parameters, digital terrain model and 3D models of traffic signs (objects to be detected). &lt;br /&gt;
#Creation of a settings file to configure the simulation parameters: simulation mode, view mode, time of day to simulate the sun position, origin geo-point, camera settings (selection of Lidar or RGB), and vehicle settings among others. (*Vehicle settings parameter is the one including the drone configuration parameters taken as input.). &lt;br /&gt;
#The digital terrain model is compiled through an Unreal plug-in provided by Airsim. &lt;br /&gt;
#The settings file together with de digital terrain model and the 3D models of traffic signs to be represented are sent to the AirSim simulator through the AirSim APIs.&lt;br /&gt;
#The scenario is simulated containing the traffic signs placed in the terrain provided and it is recorded from a drone (with an RGB or a Lidar Sensor) considering the parameters indicated in the settings. &lt;br /&gt;
#The simulation is retrieved in the Scenario Adaptor from the base simulator and it provides a Point-Cloud as a flat array of floats along with the timestamp of the capture and position or RGB images (depending on the component needs) to be used as input for the CNN network in the computer vision system developed under WP3.&lt;br /&gt;
&lt;br /&gt;
Following this, for the development process of the simulation environments has consisted of the following phases:&lt;br /&gt;
&lt;br /&gt;
# First of all, a study phase of the real environments has been carried out. In this phase,  the images provided of real drone flights captured during the flight campaigns performed in Jaén (Spain) have been analysed. The study includes also the logic about  the signals position which has been evaluated thanks to the analysis of road real works in order to enlarge the variety of real images.&lt;br /&gt;
# Once the study was completed, simulated environments of different realistic construction site scenarios were performed manually. Thanks to these environments, it has been possible to perform different flights in a short period of time, without the need of carrying out and deploying real flights with drones in real pilot sites. These simulated flights have been performed with different degree of luminosity and so, different levels of variability of the same scenarios in different periods of the day have been obtained. The configuration of the simulations with different luminosity levels has been very relevant since shadows can alter the result of the recognitions producing false positives. Otherwise, with real flights, this would have been to be accomplish with many identical flights at different times of the day and so, with time-consuming and a high economic cost. Some examples of simulated environments are:&lt;br /&gt;
# Once these environment examples have been compiled and executed on the server, the next step has been to use a python client with the AirSim library running in the flight script mentioned above, in order to create a dataset. This script in the development process is essential, as it automatically generates the images to expand the dataset. Following figures show some examples:&lt;br /&gt;
# Finally, the last step of the development process is to label the generated images to retrain the existing model.&lt;br /&gt;
&lt;br /&gt;
==Application and Improvements==&lt;br /&gt;
&lt;br /&gt;
Particularly, in the scope of UC2 (and in concrete in Demo 1), the simulated data aggregator has contributed to speed up the data collection phase, which has been cancelled for several issues: the COVID-19 pandemia delayed the initial planning on the drones flights, and the withdrawal of the drone and scenario providers partners within the UC2 only allowed to perform one data collection campaign. Then, the component allows to generate a high amount of data from scenario-tailored simulated drone flihts, that have support the training of the computer vision system, without the need to perform multiple data collection campaigns to get training data. In consequence, cost savings and the possibilitiy of continuing with the initial plans and developments envisaged for the project have been accomplished thanks to the data aggregator simulator. &lt;br /&gt;
Beyond the scope of UC2, the data aggregator simulator is motivated by the high costs of performing a drone flight campaign for training a Computer Vision System, and by the time-consuming for collecting a large amount of annotated training data in a variety of real conditions and environments for covering a higher number of scenarios. In consequence, the application of the data aggregator simulator for generating data can be tailored to any domain for the training of IA systems which needs a wide amount of raw data to train the algorithms. &lt;br /&gt;
Considering the work carried out for the development and deployment of the simulated data aggregator, one of the most relevant tasks to carry out in Artificial Intelligence field is the collection and labelling of data that will be used to train the model. &lt;br /&gt;
In this case, the labelled traffic sign datasets available for training AI models for detection and classification are composed of images with frontal perspective, as shown in Figure 6. However, the perspectives of the drone images will range from zenith to swooping depending on the angle at which the camera is positioned. This configuration parameter changes the image perspective and it can impact on how the AI model detects and classifies objects in the image.&lt;br /&gt;
&lt;br /&gt;
Therefore, it is necessary to train the models with sets of frontal perspective images and to test them with images in a top-down perspective to check if it performs the detections and classifications correctly. In this case, the simulator provides a solution to obtain test environments to learn about the behaviour of the model in this type of images, being able to make changes such as the altitude of the drone or the pitch angle of the drone's camera in order to adapt the model as much as possible to the real world. Figure 7 and Figure 8 show some examples of this test.&lt;br /&gt;
&lt;br /&gt;
Therefore, the use of simulated environments enables the inclusion of variations in the different simulated scenarios in a very simple way, which can be translated into advantages and expected improvements over imaging with drone flights in real environments:&lt;br /&gt;
*Adaptation to different environments&lt;br /&gt;
An essential aspect when generating a dataset is the variability in the data. In order to achieve this in an image dataset, it is necessary to change elements such as element arrangement (Figure 3, Figure 4 and Figure 5), luminosity, opacities, meteorological effects such as rain or snow, etc.&lt;br /&gt;
In a simulated environment, it is sufficient to apply changes in the simulator configuration, changes in the layout or creation of new elements to obtain the desire conditions. However, in a real environment several flights have to be performed based on the weather conditions, and manually change the layout by moving objects around again and again which implies high costs in money and time. &lt;br /&gt;
&lt;br /&gt;
*Cost and Time savings &lt;br /&gt;
The ease of generating changes in the simulated environments is translated directly into economic and time savings, since the physical elements required to create the environment are reduced, such as the personnel necessary for the recording of flights, drones renting, personnel specialized in flying these drones, etc. In addition to significantly reducing the cost related to labelling, as described in the following point.&lt;br /&gt;
&lt;br /&gt;
*Automation of labelling&lt;br /&gt;
Automating a laborious task such as labelling, allows to significantly reduce the time required to obtain a sufficient data set for training and dispense with cost of personnel to carry out this task. In the specific case of the AirSim simulator used for this project, it is possible to obtain segmented images of the environment, such as the one represented in Figure 9, which allow labelling of each object.&lt;br /&gt;
However, since the user is not yet allowed to control the assignment of RGB colour codes to the objects in the environment, this process would be in a &amp;quot;semi-automated&amp;quot; phase. According to the documentation of the simulator itself , work is in progress to fully automate this task.&lt;br /&gt;
Once labelling automation is achieved, the generation of new datasets would be accelerated and both the time and monetary cost would be significantly reduced by avoiding human-generated labelling.&lt;br /&gt;
&lt;br /&gt;
*Transfer Learning&lt;br /&gt;
The adaptation of pre-trained base architectures with data sets very close to the target data is essential to achieve a model fully prepared for the task at hand.&lt;br /&gt;
For this, thousands of labelled images of the same type and composition as the target images are required, a task in which the simulator is of great help in order to generate these images quickly and economically, as well as to accelerate the labelling process as previously indicated.&lt;br /&gt;
&lt;br /&gt;
*Adaptation to future changes&lt;br /&gt;
Once the objective of the AI model has been achieved, it must continue to evolve according to future changes in the real environment, for which it must be retrained with the modifications and variations in the environment. Therefore, in order to represent such changes in the training data sets, the simulator plays a key role.&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=WP4-39&amp;diff=319</id>
		<title>WP4-39</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=WP4-39&amp;diff=319"/>
		<updated>2022-05-10T11:17:24Z</updated>

		<summary type="html">&lt;p&gt;Hib: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;=Simulated data aggregator supporting intelligent decision in computer vision components=&lt;br /&gt;
&lt;br /&gt;
{|class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
|  ID|| WP4-39&lt;br /&gt;
|-&lt;br /&gt;
|   Contributor	|| HI-IBERIA&lt;br /&gt;
|-&lt;br /&gt;
|   Levels	|| Functional&lt;br /&gt;
|-&lt;br /&gt;
|   Require	|| 	AirSim built on Unreal Engine&lt;br /&gt;
|-&lt;br /&gt;
|   Provide		|| Simulator-based data aggregator built over AirSim generating high amounts of training data (RGB images) to support any computer vision component for intelligent decision in drones.&lt;br /&gt;
|-&lt;br /&gt;
|   Input	|| 	Simulation scenario parameters: &lt;br /&gt;
*Digital terrain model imported in Unreal plugin (optional, but recommendable) &lt;br /&gt;
*Objects to be detected in CAD format (Traffic signs in CAD format)&lt;br /&gt;
Drone configuration parameters in JSON format: &lt;br /&gt;
*Vision angle&lt;br /&gt;
*Inclination &lt;br /&gt;
*Viewing depth&lt;br /&gt;
*Flight height &lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|   Output		|| Simulated RGB images (or point-cloud as a flat array of floats)&lt;br /&gt;
|-&lt;br /&gt;
|   C4D building block		|| (Simulated) Data Acquisition&lt;br /&gt;
|-&lt;br /&gt;
|   TRL		|| 6&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
==Detailed Description==&lt;br /&gt;
&lt;br /&gt;
Computer vision is a significant driver in the new era of drone applications, but developing and testing computer vision components for drones in real world is an expensive and time-consuming process. This issue is further exacerbated by the fact that drones are often unsafe and expensive to operate during the training phase. Additionally, one of the key challenges with these techniques is the high sample complexity - the amount of training data needed to learn useful behaviors is often prohibitively high, but in fact, it is needed to collect a large amount of annotated training data in a variety of conditions and environments in order to utilize recent advances in machine intelligence and deep learning. This unfortunately involves not only developing the proposed ML algorithms but also requires vast amounts of time dedicated for the development of an infrastructure able to generate training models in a variety of environments. Consequently, the design, deployment, and evaluation of  computer vision components for drones becomes a complex and costly endeavor for researchers and engineers, since it requires the exploration of multiple conditions and parameters through repeatable and controllable experiments.&lt;br /&gt;
&lt;br /&gt;
In the context of C4D, the proposed component is a simulator-based data aggregator built over AirSim which intends to generate high amounts of training data with the objective of supporting any computer vision component for intelligent decision in drones. This component will allow speeding up the constructive process of a civil infrastructure while saving costs, by reducing the need to perform multiple data collection campaigns to get real training data. Additionally, this component could assess the need of different cameras/sensors (RGB, LIDAR, ...) integrated in the real drone, starting from the simulated clouds of points generated by the component. This would allow detecting which kind of camera could provide more suitable results for the analysis before launching the real drone flight.&lt;br /&gt;
&lt;br /&gt;
In the context of UC2 – Construction, the specific role of the simulated data aggregator is to generate a vast amount of annotated training data which allows us to train the convolutional neural networks which will be implemented in the Computer Vision Component for Drones, which is being developed within the task 3.3. Particularly, this simulated data aggregator will generate a vast amount of RGB images (or point-cloud as a flat array of floats along with the timestamp of the capture and position) which intend to represent the real scenario from scenario-tailored input data, that is, the specific drone configuration parameters, the digital terrain model and the 3D models (CAD files) of the objects to detect, being traffic signs in the particular case of the UC2 - Demo 1. Such RGB images will serve to train the convolutional neural networks, and hence, the DL algorithms of the Computer Vision Component for Drones in an early way.&lt;br /&gt;
&lt;br /&gt;
Considering the C4D Reference Architecture Context, the role of the simulator-based data aggregator is clearly referred to the '''''Data acquisition''''' building block in the ''Payload management'' block. Particularly, this component contributes to the reference architecture by simulating the behaviour of the '''''Data acquisition''''' building block, since it allows generating simulated payload data which serves as training payload data for  the ''Data Analytics'' building block in the ''Data Management'' block.&lt;br /&gt;
&lt;br /&gt;
Additionally, the simulator-based data aggregator contributes to the ''UC2-D1-FUN-09-Specialized software for georeferenced point-cloud creation'' as well as to the following requirements of the UC2-DEM1:&lt;br /&gt;
*	UC2-DEM1-FUN-01 - The drone system shall capture a high-density point cloud.&lt;br /&gt;
*	UC2-DEM1-FUN-02 - The drone system shall capture RGB data of the surface.&lt;br /&gt;
*	UC2-DEM1-FUN-03 - The point cloud shall be in an open format such as LAS.&lt;br /&gt;
Therefore, such component also addresses the following KPIs for the UC2-DEM1: &lt;br /&gt;
*	UC2-D1-KPI-T1 - Recognition of work elements through AI: Detection of main work elements position in the road through point cloud&lt;br /&gt;
*	UC2-D1-KPI-T2 - Recognition of work elements through AI: Detection of the total number of elements.&lt;br /&gt;
&lt;br /&gt;
==Technical specification==&lt;br /&gt;
&lt;br /&gt;
From a technical point of view, the simulated data aggregator has been implemented over already existing tools: '''Unreal Engine''' and '''AirSim'''. '''Unreal Engine''' is a complete suite of creation tools for game development, architectural and automotive visualization, linear film and television content creation, broadcast and live event production, training and simulation, and other real-time applications. '''AirSim''' is a simulator for drones, cars and more, built on '''Unreal Engine'''. It is open-source, cross platform, and supports software-in-the-loop simulation with popular flight controllers. It is developed as an Unreal plugin that can simply be dropped into any Unreal environment.&lt;br /&gt;
The recommended hardware requirements specified for the simulation tools are:&lt;br /&gt;
* '''Unreal Engine:'''&lt;br /&gt;
** Operating System: Windows 10 64-bit&lt;br /&gt;
** Processor Quad-core Intel or AMD, 2.5 GHz or faster&lt;br /&gt;
** Memory 8 GB RAM&lt;br /&gt;
** Graphics Card DirectX 11 or 12 compatible graphics card&lt;br /&gt;
**	RHI Version:&lt;br /&gt;
***	DirectX 11: Latest drivers &lt;br /&gt;
*** DirectX 12: Latest drivers &lt;br /&gt;
*** Vulkan: AMD (21.11.3+) and NVIDIA (496.76+)&lt;br /&gt;
* '''AirSim Simulator:'''&lt;br /&gt;
** Operating System: Windows 10 64bit&lt;br /&gt;
** CPU: Intel Core i7&lt;br /&gt;
** GPU: Nvidia GTX 1080&lt;br /&gt;
** RAM: 32 GB&lt;br /&gt;
However, for the implementation of the Unreal+AirSim simulator server under the Comp4Drones domain, the technical specifications required are the following regarding software and hardware infrastructures: &lt;br /&gt;
*'''Software:'''&lt;br /&gt;
** Windows 10&lt;br /&gt;
** Python 3.7.9&lt;br /&gt;
** Unreal engine Version 4.25&lt;br /&gt;
** AirSim 1.3.0&lt;br /&gt;
*'''Hardware:'''&lt;br /&gt;
** Operating System: Windows 10 64bit&lt;br /&gt;
** CPU: Intel Core i5&lt;br /&gt;
** GPU: Nvidia GTX 980&lt;br /&gt;
** RAM: 32 GB&lt;br /&gt;
Regarding the interfaces, there are two interfaces: the scenery creator based on Unreal Engine and AirSim for the simulation itself. Each of the interfaces requires specific inputs and provides specific outputs as it is descripted below:&lt;br /&gt;
&lt;br /&gt;
* Interface of scenery creator, Unreal engine:&lt;br /&gt;
** Input: CAD 3D model signals. &lt;br /&gt;
** Output: Different scenarios of a road construction.&lt;br /&gt;
*AirSim Simulator:&lt;br /&gt;
**Input:&lt;br /&gt;
***Road construction scenery compiled (digital terrain model).&lt;br /&gt;
***AirSim setting file which contains:, the type of vehicle has to be specified, in this case, a drone; the camera settings (Resolution, angle degree), and the LIDAR sensor settings (Points per seconds of the cloud points, and number of channels).&lt;br /&gt;
***Drone fly script: Python file that describes the movement of the drone, the speed, and the images per second taken.&lt;br /&gt;
**Output: RGB images of drone’s fly simulated. Array of LIDAR cloud points in float32 (in case of using a LiDAR camera).&lt;br /&gt;
&lt;br /&gt;
Considering the inputs required as well as the outputs obtained, the data flow of the Simulated Data Aggregator component is as follows:&lt;br /&gt;
&lt;br /&gt;
#Inputs required are: drone configuration parameters, digital terrain model and 3D models of traffic signs (objects to be detected). &lt;br /&gt;
#Creation of a settings file to configure the simulation parameters: simulation mode, view mode, time of day to simulate the sun position, origin geo-point, camera settings (selection of Lidar or RGB), and vehicle settings among others. (*Vehicle settings parameter is the one including the drone configuration parameters taken as input.). &lt;br /&gt;
#The digital terrain model is compiled through an Unreal plug-in provided by Airsim. &lt;br /&gt;
#The settings file together with de digital terrain model and the 3D models of traffic signs to be represented are sent to the AirSim simulator through the AirSim APIs.&lt;br /&gt;
#The scenario is simulated containing the traffic signs placed in the terrain provided and it is recorded from a drone (with an RGB or a Lidar Sensor) considering the parameters indicated in the settings. &lt;br /&gt;
#The simulation is retrieved in the Scenario Adaptor from the base simulator and it provides a Point-Cloud as a flat array of floats along with the timestamp of the capture and position or RGB images (depending on the component needs) to be used as input for the CNN network in the computer vision system developed under WP3.&lt;br /&gt;
&lt;br /&gt;
Following this, for the development process of the simulation environments has consisted of the following phases:&lt;br /&gt;
&lt;br /&gt;
# First of all, a study phase of the real environments has been carried out. In this phase,  the images provided of real drone flights captured during the flight campaigns performed in Jaén (Spain) have been analysed. The study includes also the logic about  the signals position which has been evaluated thanks to the analysis of road real works in order to enlarge the variety of real images.&lt;br /&gt;
# Once the study was completed, simulated environments of different realistic construction site scenarios were performed manually. Thanks to these environments, it has been possible to perform different flights in a short period of time, without the need of carrying out and deploying real flights with drones in real pilot sites. These simulated flights have been performed with different degree of luminosity and so, different levels of variability of the same scenarios in different periods of the day have been obtained. The configuration of the simulations with different luminosity levels has been very relevant since shadows can alter the result of the recognitions producing false positives. Otherwise, with real flights, this would have been to be accomplish with many identical flights at different times of the day and so, with time-consuming and a high economic cost. Some examples of simulated environments are:&lt;br /&gt;
# Once these environment examples have been compiled and executed on the server, the next step has been to use a python client with the AirSim library running in the flight script mentioned above, in order to create a dataset. This script in the development process is essential, as it automatically generates the images to expand the dataset. Following figures show some examples:&lt;br /&gt;
# Finally, the last step of the development process is to label the generated images to retrain the existing model.&lt;br /&gt;
&lt;br /&gt;
==Application and Improvements==&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=WP4-39&amp;diff=318</id>
		<title>WP4-39</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=WP4-39&amp;diff=318"/>
		<updated>2022-05-09T14:52:36Z</updated>

		<summary type="html">&lt;p&gt;Hib: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;=Simulated data aggregator supporting intelligent decision in computer vision components=&lt;br /&gt;
&lt;br /&gt;
{|class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
|  ID|| WP4-39&lt;br /&gt;
|-&lt;br /&gt;
|   Contributor	|| HI-IBERIA&lt;br /&gt;
|-&lt;br /&gt;
|   Levels	|| Functional&lt;br /&gt;
|-&lt;br /&gt;
|   Require	|| 	AirSim built on Unreal Engine&lt;br /&gt;
|-&lt;br /&gt;
|   Provide		|| Simulator-based data aggregator built over AirSim generating high amounts of training data (RGB images) to support any computer vision component for intelligent decision in drones.&lt;br /&gt;
|-&lt;br /&gt;
|   Input	|| 	Simulation scenario parameters: &lt;br /&gt;
*Digital terrain model imported in Unreal plugin (optional, but recommendable) &lt;br /&gt;
*Objects to be detected in CAD format (Traffic signs in CAD format)&lt;br /&gt;
Drone configuration parameters in JSON format: &lt;br /&gt;
*Vision angle&lt;br /&gt;
*Inclination &lt;br /&gt;
*Viewing depth&lt;br /&gt;
*Flight height &lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|   Output		|| Simulated RGB images (or point-cloud as a flat array of floats)&lt;br /&gt;
|-&lt;br /&gt;
|   C4D building block		|| (Simulated) Data Acquisition&lt;br /&gt;
|-&lt;br /&gt;
|   TRL		|| 6&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
==Detailed Description==&lt;br /&gt;
&lt;br /&gt;
Computer vision is a significant driver in the new era of drone applications, but developing and testing computer vision components for drones in real world is an expensive and time-consuming process. This issue is further exacerbated by the fact that drones are often unsafe and expensive to operate during the training phase. Additionally, one of the key challenges with these techniques is the high sample complexity - the amount of training data needed to learn useful behaviors is often prohibitively high, but in fact, it is needed to collect a large amount of annotated training data in a variety of conditions and environments in order to utilize recent advances in machine intelligence and deep learning. This unfortunately involves not only developing the proposed ML algorithms but also requires vast amounts of time dedicated for the development of an infrastructure able to generate training models in a variety of environments. Consequently, the design, deployment, and evaluation of  computer vision components for drones becomes a complex and costly endeavor for researchers and engineers, since it requires the exploration of multiple conditions and parameters through repeatable and controllable experiments.&lt;br /&gt;
&lt;br /&gt;
In the context of C4D, the proposed component is a simulator-based data aggregator built over AirSim which intends to generate high amounts of training data with the objective of supporting any computer vision component for intelligent decision in drones. This component will allow speeding up the constructive process of a civil infrastructure while saving costs, by reducing the need to perform multiple data collection campaigns to get real training data. Additionally, this component could assess the need of different cameras/sensors (RGB, LIDAR, ...) integrated in the real drone, starting from the simulated clouds of points generated by the component. This would allow detecting which kind of camera could provide more suitable results for the analysis before launching the real drone flight.&lt;br /&gt;
&lt;br /&gt;
In the context of UC2 – Construction, the specific role of the simulated data aggregator is to generate a vast amount of annotated training data which allows us to train the convolutional neural networks which will be implemented in the Computer Vision Component for Drones, which is being developed within the task 3.3. Particularly, this simulated data aggregator will generate a vast amount of RGB images (or point-cloud as a flat array of floats along with the timestamp of the capture and position) which intend to represent the real scenario from scenario-tailored input data, that is, the specific drone configuration parameters, the digital terrain model and the 3D models (CAD files) of the objects to detect, being traffic signs in the particular case of the UC2 - Demo 1. Such RGB images will serve to train the convolutional neural networks, and hence, the DL algorithms of the Computer Vision Component for Drones in an early way.&lt;br /&gt;
&lt;br /&gt;
Considering the C4D Reference Architecture Context, the role of the simulator-based data aggregator is clearly referred to the '''''Data acquisition''''' building block in the ''Payload management'' block. Particularly, this component contributes to the reference architecture by simulating the behaviour of the '''''Data acquisition''''' building block, since it allows generating simulated payload data which serves as training payload data for  the ''Data Analytics'' building block in the ''Data Management'' block.&lt;br /&gt;
&lt;br /&gt;
Additionally, the simulator-based data aggregator contributes to the ''UC2-D1-FUN-09-Specialized software for georeferenced point-cloud creation'' as well as to the following requirements of the UC2-DEM1:&lt;br /&gt;
*	UC2-DEM1-FUN-01 - The drone system shall capture a high-density point cloud.&lt;br /&gt;
*	UC2-DEM1-FUN-02 - The drone system shall capture RGB data of the surface.&lt;br /&gt;
*	UC2-DEM1-FUN-03 - The point cloud shall be in an open format such as LAS.&lt;br /&gt;
Therefore, such component also addresses the following KPIs for the UC2-DEM1: &lt;br /&gt;
*	UC2-D1-KPI-T1 - Recognition of work elements through AI: Detection of main work elements position in the road through point cloud&lt;br /&gt;
*	UC2-D1-KPI-T2 - Recognition of work elements through AI: Detection of the total number of elements.&lt;br /&gt;
&lt;br /&gt;
==Technical specification==&lt;br /&gt;
&lt;br /&gt;
From a technical point of view, the simulated data aggregator has been implemented over already existing tools: '''Unreal Engine''' and '''AirSim'''. '''Unreal Engine''' is a complete suite of creation tools for game development, architectural and automotive visualization, linear film and television content creation, broadcast and live event production, training and simulation, and other real-time applications. '''AirSim''' is a simulator for drones, cars and more, built on '''Unreal Engine'''. It is open-source, cross platform, and supports software-in-the-loop simulation with popular flight controllers. It is developed as an Unreal plugin that can simply be dropped into any Unreal environment.&lt;br /&gt;
The recommended hardware requirements specified for the simulation tools are:&lt;br /&gt;
* '''Unreal Engine:'''&lt;br /&gt;
** Operating System: Windows 10 64-bit&lt;br /&gt;
** Processor Quad-core Intel or AMD, 2.5 GHz or faster&lt;br /&gt;
** Memory 8 GB RAM&lt;br /&gt;
** Graphics Card DirectX 11 or 12 compatible graphics card&lt;br /&gt;
**	RHI Version:&lt;br /&gt;
***	DirectX 11: Latest drivers &lt;br /&gt;
*** DirectX 12: Latest drivers &lt;br /&gt;
*** Vulkan: AMD (21.11.3+) and NVIDIA (496.76+)&lt;br /&gt;
* '''AirSim Simulator:'''&lt;br /&gt;
** Operating System: Windows 10 64bit&lt;br /&gt;
** CPU: Intel Core i7&lt;br /&gt;
** GPU: Nvidia GTX 1080&lt;br /&gt;
** RAM: 32 GB&lt;br /&gt;
However, for the implementation of the Unreal+AirSim simulator server under the Comp4Drones domain, the technical specifications required are the following regarding software and hardware infrastructures: &lt;br /&gt;
*'''Software:'''&lt;br /&gt;
** Windows 10&lt;br /&gt;
** Python 3.7.9&lt;br /&gt;
** Unreal engine Version 4.25&lt;br /&gt;
** AirSim 1.3.0&lt;br /&gt;
*'''Hardware:'''&lt;br /&gt;
** Operating System: Windows 10 64bit&lt;br /&gt;
** CPU: Intel Core i5&lt;br /&gt;
** GPU: Nvidia GTX 980&lt;br /&gt;
** RAM: 32 GB&lt;br /&gt;
Regarding the interfaces, there are two interfaces: the scenery creator based on Unreal Engine and AirSim for the simulation itself. Each of the interfaces requires specific inputs and provides specific outputs as it is descripted below: &lt;br /&gt;
&lt;br /&gt;
==Application and Improvements==&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=WP4-39&amp;diff=317</id>
		<title>WP4-39</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=WP4-39&amp;diff=317"/>
		<updated>2022-05-09T14:06:20Z</updated>

		<summary type="html">&lt;p&gt;Hib: Created page with &amp;quot;=Simulated data aggregator supporting intelligent decision in computer vision components=  {|class=&amp;quot;wikitable&amp;quot; |  ID|| WP4-39 |- |   Contributor	|| HI-IBERIA |- |   Levels	|| Functional |- |   Require	|| 	AirSim built on Unreal Engine |- |   Provide		|| Simulator-based data aggregator built over AirSim generating high amounts of training data (RGB images) to support any computer vision component for intelligent decision in drones. |- |   Input	|| 	Simulation scenario par...&amp;quot;&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;=Simulated data aggregator supporting intelligent decision in computer vision components=&lt;br /&gt;
&lt;br /&gt;
{|class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
|  ID|| WP4-39&lt;br /&gt;
|-&lt;br /&gt;
|   Contributor	|| HI-IBERIA&lt;br /&gt;
|-&lt;br /&gt;
|   Levels	|| Functional&lt;br /&gt;
|-&lt;br /&gt;
|   Require	|| 	AirSim built on Unreal Engine&lt;br /&gt;
|-&lt;br /&gt;
|   Provide		|| Simulator-based data aggregator built over AirSim generating high amounts of training data (RGB images) to support any computer vision component for intelligent decision in drones.&lt;br /&gt;
|-&lt;br /&gt;
|   Input	|| 	Simulation scenario parameters: &lt;br /&gt;
*Digital terrain model imported in Unreal plugin (optional, but recommendable) &lt;br /&gt;
*Objects to be detected in CAD format (Traffic signs in CAD format)&lt;br /&gt;
Drone configuration parameters in JSON format: &lt;br /&gt;
*Vision angle&lt;br /&gt;
*Inclination &lt;br /&gt;
*Viewing depth&lt;br /&gt;
*Flight height &lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|   Output		|| Simulated RGB images (or point-cloud as a flat array of floats)&lt;br /&gt;
|-&lt;br /&gt;
|   C4D building block		|| (Simulated) Data Acquisition&lt;br /&gt;
|&lt;br /&gt;
|   TRL		|| 6&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
==Detailed Description==&lt;br /&gt;
&lt;br /&gt;
==Application and Improvements==&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=Component_repository&amp;diff=316</id>
		<title>Component repository</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=Component_repository&amp;diff=316"/>
		<updated>2022-04-29T08:37:16Z</updated>

		<summary type="html">&lt;p&gt;Hib: /* Components list */&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;This repository aims at providing common components usable in different application domains, in particular those covered by project use-cases.&lt;br /&gt;
&lt;br /&gt;
The requirements for using a components will be listed, as well as a documentation on how to use it. The component itself will be hosted by the partner who provides it.&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
==Components list==&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
|ID &lt;br /&gt;
|Contributor &lt;br /&gt;
|Title&lt;br /&gt;
|-&lt;br /&gt;
|[[WP3-01]]&lt;br /&gt;
|IKERLAN&lt;br /&gt;
|Safety function - Pre-Certified SOM&lt;br /&gt;
|- &lt;br /&gt;
|[[WP3-02]] &lt;br /&gt;
|EDI &lt;br /&gt;
|Modular SoC-based embedded reference architecture&lt;br /&gt;
|- &lt;br /&gt;
|[[WP3-03]]&lt;br /&gt;
|BUT	&lt;br /&gt;
|Sensor information algorithms&lt;br /&gt;
|- &lt;br /&gt;
|[[WP3-04]]	&lt;br /&gt;
|HIB	&lt;br /&gt;
|Computer Vision Components for drones&lt;br /&gt;
|- &lt;br /&gt;
|[[WP3-10]]	&lt;br /&gt;
|IFAT	&lt;br /&gt;
|Component for trusted communication&lt;br /&gt;
|- &lt;br /&gt;
|[[WP3-13]]	&lt;br /&gt;
|ENAC	&lt;br /&gt;
|Paparazzi UAV&lt;br /&gt;
|- &lt;br /&gt;
|[[WP3-14_1]]	&lt;br /&gt;
|ENSMA	&lt;br /&gt;
|Collision avoidance and geo-fencing&lt;br /&gt;
|- &lt;br /&gt;
|[[WP3-14_2]]	&lt;br /&gt;
|ENSMA	&lt;br /&gt;
|Distributed control of multi-drone system&lt;br /&gt;
|- &lt;br /&gt;
|[[WP3-15_1]]	&lt;br /&gt;
|ACORDE	&lt;br /&gt;
|UWB based indoor positioning&lt;br /&gt;
|- &lt;br /&gt;
|[[WP3-15_2]]&lt;br /&gt;
|ACORDE	&lt;br /&gt;
|Multi-antenna GNSS/INS based navigation&lt;br /&gt;
|- &lt;br /&gt;
|[[WP3-16]]	&lt;br /&gt;
|SCALIAN	&lt;br /&gt;
|EZ_Chains Fleet Architecture&lt;br /&gt;
|- &lt;br /&gt;
|[[WP3-19_1]]	&lt;br /&gt;
|IMEC	&lt;br /&gt;
|Hyperspectral payload&lt;br /&gt;
|- &lt;br /&gt;
|[[WP3-19_2]]	&lt;br /&gt;
|IMEC	&lt;br /&gt;
|Hyperspectral image processing&lt;br /&gt;
|- &lt;br /&gt;
|[[WP3-20]]	&lt;br /&gt;
|MODIS	&lt;br /&gt;
|Multi-sensor positioning&lt;br /&gt;
|- &lt;br /&gt;
|[[WP3-22]]	&lt;br /&gt;
|UNIMORE	&lt;br /&gt;
|Onboard Compute Platform Desing Methodology&lt;br /&gt;
|- &lt;br /&gt;
|[[WP3-24]]	&lt;br /&gt;
|UNIVAQ	&lt;br /&gt;
|Efficient digital implementation of controllers&lt;br /&gt;
|- &lt;br /&gt;
|[[WP3-26]]	&lt;br /&gt;
|UWB	&lt;br /&gt;
|Droneport: an autonomous drone battery management system&lt;br /&gt;
|- &lt;br /&gt;
|[[WP3-28]]	&lt;br /&gt;
|UNISS	&lt;br /&gt;
|Accelerator Design Methodology for OOCP&lt;br /&gt;
|- &lt;br /&gt;
|[[WP3-36_1]]	&lt;br /&gt;
|UDANET	&lt;br /&gt;
|Smart and predictive energy management system&lt;br /&gt;
|- &lt;br /&gt;
|[[WP3-36_2]]&lt;br /&gt;
|UDANET	&lt;br /&gt;
|AI drone system modules&lt;br /&gt;
|- &lt;br /&gt;
|[[WP3-37]]	&lt;br /&gt;
|Aitek	&lt;br /&gt;
|Video and data analytics&lt;br /&gt;
|- &lt;br /&gt;
|[[WP4-2]]	&lt;br /&gt;
|SCALIAN	&lt;br /&gt;
|EZ_Land Precision landing&lt;br /&gt;
|- &lt;br /&gt;
|[[WP4-5]]	&lt;br /&gt;
|SCALIAN	&lt;br /&gt;
|AI detection for clearance&lt;br /&gt;
|- &lt;br /&gt;
|[[WP4-16]]	&lt;br /&gt;
|ACORDE&lt;br /&gt;
|Enhanced Navigation Software&lt;br /&gt;
|- &lt;br /&gt;
|[[WP4-17]]	&lt;br /&gt;
|ACORDE&lt;br /&gt;
|Anchor&amp;amp;Tag firmware of the Indoor  Positioning System &lt;br /&gt;
|- &lt;br /&gt;
|[[WP4-18_A]]	&lt;br /&gt;
|TEKNE	&lt;br /&gt;
|Drone-Rover Transponder&lt;br /&gt;
|- &lt;br /&gt;
|[[WP4-32]]	&lt;br /&gt;
|SHERPA&lt;br /&gt;
|Dynamic control development for navigation and precision landing&lt;br /&gt;
|- &lt;br /&gt;
|[[WP4-39]]	&lt;br /&gt;
|HIB&lt;br /&gt;
|Simulated data aggregator supporting intelligent decision in computer vision components&lt;br /&gt;
|- &lt;br /&gt;
|[[WP4-42]]	&lt;br /&gt;
|SCALIAN	&lt;br /&gt;
|AI Stabilization&lt;br /&gt;
|- &lt;br /&gt;
|[[WP5-03]]	&lt;br /&gt;
|SCALIAN	&lt;br /&gt;
|EZ_Com Safe fleet communication&lt;br /&gt;
|- &lt;br /&gt;
&lt;br /&gt;
|[[WP4-33]]	&lt;br /&gt;
|UNIVAQ	&lt;br /&gt;
|Autonomy, cooperation, and awareness&lt;br /&gt;
|- &lt;br /&gt;
|[[WP5-05_A]]	&lt;br /&gt;
|TEKNE	&lt;br /&gt;
|LP-WAN for UAV identification and monitoring&lt;br /&gt;
|- &lt;br /&gt;
|[[WP5-11_ACO]]	&lt;br /&gt;
|ACORDE&lt;br /&gt;
|Navigation system with anti-jamming and anti-spoofing features&lt;br /&gt;
|- &lt;br /&gt;
|[[WP5-19_ACO]]	&lt;br /&gt;
|ACORDE&lt;br /&gt;
|Robust communication for an improved Indoor Positioning System&lt;br /&gt;
|- &lt;br /&gt;
|[[WP6-P4R]]	&lt;br /&gt;
|CEA	&lt;br /&gt;
|Model driven engineering&lt;br /&gt;
|- &lt;br /&gt;
|[[WP6-ESDE]]	&lt;br /&gt;
|ACORDE&lt;br /&gt;
|ESL embedded SW Design Environment (ESDE)&lt;br /&gt;
|- &lt;br /&gt;
|[[WP6-IPS-MAF]]	&lt;br /&gt;
|ACORDE&lt;br /&gt;
|Indoor Positioning System Modelling&amp;amp;Analysis Framework (IPS-MAF)&lt;br /&gt;
|- &lt;br /&gt;
|[[WP6-17]]	&lt;br /&gt;
|UNIVAQ&lt;br /&gt;
|HW/SW CO-DEsign of HEterogeneous Parallel dedicated Systems (HEPSYCODE)&lt;br /&gt;
|- &lt;br /&gt;
|[[WP6-34]]	&lt;br /&gt;
|UNIVAQ&lt;br /&gt;
|HEPSYCODE SystemC SIMulator Version 2.0 (HEPSIM2)&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=WP3-04&amp;diff=153</id>
		<title>WP3-04</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=WP3-04&amp;diff=153"/>
		<updated>2022-03-23T12:02:36Z</updated>

		<summary type="html">&lt;p&gt;Hib: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;=Computer Vision Components for drones=&lt;br /&gt;
&lt;br /&gt;
{|class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
|  ID|| WP3-04&lt;br /&gt;
|-&lt;br /&gt;
|   Contributor	|| HI-IBERIA&lt;br /&gt;
|-&lt;br /&gt;
|   Levels	|| Functional&lt;br /&gt;
|-&lt;br /&gt;
|   Require	|| 	Payload data (drone images)&lt;br /&gt;
|-&lt;br /&gt;
|   Provide		|| DL-based SW to detect and classify objects (road elements, traffic signs) from images captured by drones&lt;br /&gt;
|-&lt;br /&gt;
|   Input	|| 	RGB images captured by drone (Payload data)&lt;br /&gt;
|-&lt;br /&gt;
|   Output		|| Inventory of road elements with their position in the terrain&lt;br /&gt;
|-&lt;br /&gt;
|   C4D building block		|| (Video) Data Analytics&lt;br /&gt;
|-&lt;br /&gt;
|   TRL		|| 5&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
==Detailed Description==&lt;br /&gt;
&lt;br /&gt;
Machine learning and deep learning are an arising approach in dealing with large amount of data gained from drones. For infrastructure planning and design, typical data acquired through drones are images. For construction monitoring, either real time videos or 3D models are needed. Focusing on object recognition and detection in aerial images captured by drones, a major challenge with the integration of artificial intelligence and machine learning with autonomous drones’ operations is that these tasks are not executable in real-time or near-real-time due to the complexities of these tasks and their computational costs. In the last few years, deep convolutional neural networks have shown to be a reliable approach for image object detection and classification due to their relatively high accuracy and speed. Furthermore, a CNN algorithm enables drones to convert object information from the immediate environment into abstract information that can be interpreted by machines without human interference. Thereby, the main advantage of CNN algorithms is that they can detect and classify objects while being computationally less expensive and superior in performance when compared with other machine-learning methods. In the C4D project scope, the computer vision component for drones brings a deep learning-based software which uses a convolutional neural network (CNN) algorithm to detect, and classify objects from raw data in such a way that it brings to the project capabilities of interpreting surroundings and detecting scenarios from data captured with drones.&lt;br /&gt;
&lt;br /&gt;
==Specifications and contribution==&lt;br /&gt;
&lt;br /&gt;
Concretely, the proposed application scenario for the computer vision component for drones is the inspection, assessment and maintenance of civil infrastructures and construction elements, in which many inventories for damage and defects detection are carried out. Such inspections and the corresponding inventories are performed through human visual observations, being a tedious and time-consuming work prone to human errors, and therefore, an expensive work, so accelerating such inspections is a current challenge in the construction industry. Then, the Computer vision component for drones is a post-processing computer vision system based on previously trained CNN algorithms which enables the auto-detection and geo-referencing of different objects from RGB images captured by the drones’ on-board camera. Particularly, this computer vision system intends to improve the digitalization of the state of the constructive process of a Civil Infrastructure by auto-detecting and geo-referencing different road elements which can be found in any civil infrastructure or construction elements. Thereby, one of the main challenges in the construction industry like the road element inventory realization is solved.&lt;br /&gt;
&lt;br /&gt;
[[File:wp3-04_01.png|frame|center|Computer Vision Component in the application scenario]]&lt;br /&gt;
&lt;br /&gt;
Considering the C4D Reference Architecture Context, the computer vision component for drones supports the (Video) Data Analytics building block in the Data Management block by performing an offline data analysis over the RGB images (that is, payload data) captured by the UAV in order to provide an auto-detection and geo-referencing of different objects. Although this computer vision component for drones will provide to the Business domain an inventory of the road elements detected and their corresponding position in the terrain to serve as input for the creation of the BIM model, this computer vision component for drones could be extended for other business functions following the generic Building Block defined for the Payload Data Analytics.&lt;br /&gt;
&lt;br /&gt;
==Design and Implementation==&lt;br /&gt;
&lt;br /&gt;
The computer vision component for drones is being developed following the typical steps for a deep-learning-based SW:&lt;br /&gt;
&lt;br /&gt;
# Dataset creation&lt;br /&gt;
# Election of the most accurate object detection model.&lt;br /&gt;
# Training of the object detection model using previously created dataset.&lt;br /&gt;
# Testing of the object detection model already trained.&lt;br /&gt;
&lt;br /&gt;
So, it is possible to state that the computer vision component for drones is built upon two main axes: the dataset and the object detection model. For the dataset creation, several available datasets have been contemplated:&lt;br /&gt;
&lt;br /&gt;
* '''German Traffic Sign Detection Benchmark (GTSDB)''', which is a single-image detection dataset based on images taken in Germany. It contains around 900 images, 600 for training and 300 for test. The traffic sign classes in this dataset are shown below:&lt;br /&gt;
&lt;br /&gt;
[[File:wp3-04_02.png|frame|center|Traffic sign classes in GTSDB]]&lt;br /&gt;
&lt;br /&gt;
* Images from '''drone flight videos'''. This dataset is built with the images captured during the 1st data acquisition campaign performed in UC2 – Construction in Jaén (Spain). An example is shown below:&lt;br /&gt;
&lt;br /&gt;
[[File:wp3-04_03.png|frame|center|Example of images from drone flight videos]]&lt;br /&gt;
&lt;br /&gt;
* Dataset built from the videos generated with '''AirSim (drone simulation environment)'''.&lt;br /&gt;
&lt;br /&gt;
[[File:wp3-04_04.png|frame|center|Example of images from drone simulated flight videos]]&lt;br /&gt;
&lt;br /&gt;
* '''Mapillary Traffic Sign Dataset (MTSD)''', which it is a dataset with street-level images around the world. These images are annotated with bounding boxes and traffic sign classes. There are 100000 high-resolution images, 52000 of them fully annotated. Also, there are over 300 traffic sign classes, as it is shown below.&lt;br /&gt;
&lt;br /&gt;
[[File:wp3-04_05.png|frame|center|Overview of all traffic sign classes in MTSD]]&lt;br /&gt;
&lt;br /&gt;
Related to the object detection model, Faster R-CNN is the model used to date, and it is an object detection system proposed by Ren et al. in 2015. Its architecture is shown in below figure and it is formed by two components: Region Proposal Network and Fast Region-Convolutional Neural Network.&lt;br /&gt;
&lt;br /&gt;
[[File:wp3-04_06.png|frame|center|Faster R-CNN model general architecture]]&lt;br /&gt;
&lt;br /&gt;
Concretely, Region Proposal Network (RPN) is a deep convolutional network that proposes image regions where an object might be found. RPN component acts as an attention mechanism that accelerates region proposal phase, and in our use case, it creates new regions (bounding boxes) where a traffic signal exists. Complementarily, the Fast Region-Convolutional Neural Network (Fast R-CNN) takes the regions proposed by RPN and classifies them efficiently. In our use case, this component classifies traffic signal regions in ‘Mandatory’, ‘Danger’, ‘Forbidden’.&lt;br /&gt;
&lt;br /&gt;
Below, it is shown an implementation conceptual diagram for the computer vision component for drones.&lt;br /&gt;
&lt;br /&gt;
[[File:wp3-04_07.png|frame|center|Implementation conceptual diagram for computer vision component]]&lt;br /&gt;
&lt;br /&gt;
Based on the above figure, the data flow of computer vision component for drones is detailed as follows:&lt;br /&gt;
&lt;br /&gt;
# Setup of all prerequisites: software libraries are installed and loaded, and dataset and pretrained model are downloaded.&lt;br /&gt;
# The pretrained Faster R-CNN model is loaded into memory using TensorFlow Object Detection API &lt;br /&gt;
# The label map is loaded into memory. This object maps the three category indices to category names for the translation of the model’s predictions.&lt;br /&gt;
# The pretrained model performs detection over test set images.&lt;br /&gt;
## Each image is prepared with Pillow library.&lt;br /&gt;
## Images are transformed into Numpy arrays.&lt;br /&gt;
## Images are preprocessed for the model to make predictions on them.&lt;br /&gt;
## Detection takes place.&lt;br /&gt;
## Predictions are visualized with TensorFlow Object Detection API and Matplotlib’s Pyplot module.&lt;br /&gt;
&lt;br /&gt;
Below, it is possible to see traffic sign detections on images from real Drone Flight Videos by the Faster R-CNN model implemented in the computer vision component for drones.&lt;br /&gt;
&lt;br /&gt;
[[File:wp3-04_08.png|frame|center| Detections performed by Faster R-CNN on a Drone Flight image. (top) The model fails to perform any detection over the whole image. (bottom) After chopping the image, the model correctly detects and identifies the four traffic signs]]&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=WP3-04&amp;diff=152</id>
		<title>WP3-04</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=WP3-04&amp;diff=152"/>
		<updated>2022-03-23T12:01:18Z</updated>

		<summary type="html">&lt;p&gt;Hib: Created page with &amp;quot;=Computer Vision Components for drones=  {|class=&amp;quot;wikitable&amp;quot; |  ID|| WP3-04 |- |   Contributor	|| HI-IBERIA |- |   Levels	|| Functional |- |   Require	|| 	Payload data (drone images) |- |   Provide		|| DL-based SW to detect and classify objects (road elements, traffic signs) from images captured by drones |- |   Input	|| 	RGB images captured by drone (Payload data) |- |   Output		|| Inventory of road elements with their position in the terrain |- |   C4D building block...&amp;quot;&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;=Computer Vision Components for drones=&lt;br /&gt;
&lt;br /&gt;
{|class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
|  ID|| WP3-04&lt;br /&gt;
|-&lt;br /&gt;
|   Contributor	|| HI-IBERIA&lt;br /&gt;
|-&lt;br /&gt;
|   Levels	|| Functional&lt;br /&gt;
|-&lt;br /&gt;
|   Require	|| 	Payload data (drone images)&lt;br /&gt;
|-&lt;br /&gt;
|   Provide		|| DL-based SW to detect and classify objects (road elements, traffic signs) from images captured by drones&lt;br /&gt;
|-&lt;br /&gt;
|   Input	|| 	RGB images captured by drone (Payload data)&lt;br /&gt;
|-&lt;br /&gt;
|   Output		|| Inventory of road elements with their position in the terrain&lt;br /&gt;
|-&lt;br /&gt;
|   C4D building block		|| (Video) Data Analytics&lt;br /&gt;
|-&lt;br /&gt;
|   TRL		|| 5&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
==Detailed Description==&lt;br /&gt;
&lt;br /&gt;
Machine learning and deep learning are an arising approach in dealing with large amount of data gained from drones. For infrastructure planning and design, typical data acquired through drones are images. For construction monitoring, either real time videos or 3D models are needed. Focusing on object recognition and detection in aerial images captured by drones, a major challenge with the integration of artificial intelligence and machine learning with autonomous drones’ operations is that these tasks are not executable in real-time or near-real-time due to the complexities of these tasks and their computational costs. In the last few years, deep convolutional neural networks have shown to be a reliable approach for image object detection and classification due to their relatively high accuracy and speed. Furthermore, a CNN algorithm enables drones to convert object information from the immediate environment into abstract information that can be interpreted by machines without human interference. Thereby, the main advantage of CNN algorithms is that they can detect and classify objects while being computationally less expensive and superior in performance when compared with other machine-learning methods. In the C4D project scope, the computer vision component for drones brings a deep learning-based software which uses a convolutional neural network (CNN) algorithm to detect, and classify objects from raw data in such a way that it brings to the project capabilities of interpreting surroundings and detecting scenarios from data captured with drones.&lt;br /&gt;
&lt;br /&gt;
==Specifications and contribution==&lt;br /&gt;
&lt;br /&gt;
Concretely, the proposed application scenario for the computer vision component for drones is the inspection, assessment and maintenance of civil infrastructures and construction elements, in which many inventories for damage and defects detection are carried out. Such inspections and the corresponding inventories are performed through human visual observations, being a tedious and time-consuming work prone to human errors, and therefore, an expensive work, so accelerating such inspections is a current challenge in the construction industry. Then, the Computer vision component for drones is a post-processing computer vision system based on previously trained CNN algorithms which enables the auto-detection and geo-referencing of different objects from RGB images captured by the drones’ on-board camera. Particularly, this computer vision system intends to improve the digitalization of the state of the constructive process of a Civil Infrastructure by auto-detecting and geo-referencing different road elements which can be found in any civil infrastructure or construction elements. Thereby, one of the main challenges in the construction industry like the road element inventory realization is solved.&lt;br /&gt;
&lt;br /&gt;
[[File:wp3-04_01.png|frame|center|Computer Vision Component in the application scenario]]&lt;br /&gt;
&lt;br /&gt;
Considering the C4D Reference Architecture Context, the computer vision component for drones supports the (Video) Data Analytics building block in the Data Management block by performing an offline data analysis over the RGB images (that is, payload data) captured by the UAV in order to provide an auto-detection and geo-referencing of different objects. Although this computer vision component for drones will provide to the Business domain an inventory of the road elements detected and their corresponding position in the terrain to serve as input for the creation of the BIM model, this computer vision component for drones could be extended for other business functions following the generic Building Block defined for the Payload Data Analytics.&lt;br /&gt;
&lt;br /&gt;
==Design and Implementation==&lt;br /&gt;
&lt;br /&gt;
The computer vision component for drones is being developed following the typical steps for a deep-learning-based SW:&lt;br /&gt;
&lt;br /&gt;
# Dataset creation&lt;br /&gt;
# Election of the most accurate object detection model.&lt;br /&gt;
# Training of the object detection model using previously created dataset.&lt;br /&gt;
# Testing of the object detection model already trained.&lt;br /&gt;
&lt;br /&gt;
So, it is possible to state that the computer vision component for drones is built upon two main axes: the dataset and the object detection model. For the dataset creation, several available datasets have been contemplated:&lt;br /&gt;
&lt;br /&gt;
* '''German Traffic Sign Detection Benchmark (GTSDB)''', which is a single-image detection dataset based on images taken in Germany. It contains around 900 images, 600 for training and 300 for test. The traffic sign classes in this dataset are shown below:&lt;br /&gt;
&lt;br /&gt;
[[File:wp3-04_02.png|frame|center|Traffic sign classes in GTSDB]]&lt;br /&gt;
&lt;br /&gt;
* Images from '''drone flight videos'''. This dataset is built with the images captured during the 1st data acquisition campaign performed in UC2 – Construction in Jaén (Spain). An example is shown below:&lt;br /&gt;
&lt;br /&gt;
[[File:wp3-04_03.png|frame|center|Example of images from drone flight videos]]&lt;br /&gt;
&lt;br /&gt;
* Dataset built from the videos generated with '''AirSim (drone simulation environment)'''.&lt;br /&gt;
&lt;br /&gt;
[[File:wp3-04_04.png|frame|center|Example of images from drone simulated flight videos]]&lt;br /&gt;
&lt;br /&gt;
* '''Mapillary Traffic Sign Dataset (MTSD)''', which it is a dataset with street-level images around the world. These images are annotated with bounding boxes and traffic sign classes. There are 100000 high-resolution images, 52000 of them fully annotated. Also, there are over 300 traffic sign classes, as it is shown below.&lt;br /&gt;
&lt;br /&gt;
[[File:wp3-04_05.png|frame|center|Overview of all traffic sign classes in MTSD]]&lt;br /&gt;
&lt;br /&gt;
Related to the object detection model, Faster R-CNN is the model used to date, and it is an object detection system proposed by Ren et al. in 2015. Its architecture is shown in below figure and it is formed by two components: Region Proposal Network and Fast Region-Convolutional Neural Network.&lt;br /&gt;
&lt;br /&gt;
[[File:wp3-04_06.png|frame|center|Faster R-CNN model general architecture]]&lt;br /&gt;
&lt;br /&gt;
Concretely, Region Proposal Network (RPN) is a deep convolutional network that proposes image regions where an object might be found. RPN component acts as an attention mechanism that accelerates region proposal phase, and in our use case, it creates new regions (bounding boxes) where a traffic signal exists. Complementarily, the Fast Region-Convolutional Neural Network (Fast R-CNN) takes the regions proposed by RPN and classifies them efficiently. In our use case, this component classifies traffic signal regions in ‘Mandatory’, ‘Danger’, ‘Forbidden’. Below, it is shown an implementation conceptual diagram for the computer vision component for drones.&lt;br /&gt;
&lt;br /&gt;
[[File:wp3-04_07.png|frame|center|Implementation conceptual diagram for computer vision component]]&lt;br /&gt;
&lt;br /&gt;
Based on the above figure, the data flow of computer vision component for drones is detailed as follows:&lt;br /&gt;
&lt;br /&gt;
# Setup of all prerequisites: software libraries are installed and loaded, and dataset and pretrained model are downloaded.&lt;br /&gt;
# The pretrained Faster R-CNN model is loaded into memory using TensorFlow Object Detection API &lt;br /&gt;
# The label map is loaded into memory. This object maps the three category indices to category names for the translation of the model’s predictions.&lt;br /&gt;
# The pretrained model performs detection over test set images.&lt;br /&gt;
## Each image is prepared with Pillow library.&lt;br /&gt;
## Images are transformed into Numpy arrays.&lt;br /&gt;
## Images are preprocessed for the model to make predictions on them.&lt;br /&gt;
## Detection takes place.&lt;br /&gt;
## Predictions are visualized with TensorFlow Object Detection API and Matplotlib’s Pyplot module.&lt;br /&gt;
&lt;br /&gt;
Below, it is possible to see traffic sign detections on images from real Drone Flight Videos by the Faster R-CNN model implemented in the computer vision component for drones.&lt;br /&gt;
&lt;br /&gt;
[[File:wp3-04_08.png|frame|center| Detections performed by Faster R-CNN on a Drone Flight image. (top) The model fails to perform any detection over the whole image. (bottom) After chopping the image, the model correctly detects and identifies the four traffic signs]]&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=File:Wp3-04_08.png&amp;diff=151</id>
		<title>File:Wp3-04 08.png</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=File:Wp3-04_08.png&amp;diff=151"/>
		<updated>2022-03-23T11:18:01Z</updated>

		<summary type="html">&lt;p&gt;Hib: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=File:Wp3-04_07.png&amp;diff=150</id>
		<title>File:Wp3-04 07.png</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=File:Wp3-04_07.png&amp;diff=150"/>
		<updated>2022-03-23T11:17:13Z</updated>

		<summary type="html">&lt;p&gt;Hib: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=File:Wp3-04_06.png&amp;diff=149</id>
		<title>File:Wp3-04 06.png</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=File:Wp3-04_06.png&amp;diff=149"/>
		<updated>2022-03-23T11:17:02Z</updated>

		<summary type="html">&lt;p&gt;Hib: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=File:Wp3-04_05.png&amp;diff=148</id>
		<title>File:Wp3-04 05.png</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=File:Wp3-04_05.png&amp;diff=148"/>
		<updated>2022-03-23T11:16:45Z</updated>

		<summary type="html">&lt;p&gt;Hib: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=File:Wp3-04_04.png&amp;diff=147</id>
		<title>File:Wp3-04 04.png</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=File:Wp3-04_04.png&amp;diff=147"/>
		<updated>2022-03-23T11:16:36Z</updated>

		<summary type="html">&lt;p&gt;Hib: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=File:Wp3-04_03.png&amp;diff=146</id>
		<title>File:Wp3-04 03.png</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=File:Wp3-04_03.png&amp;diff=146"/>
		<updated>2022-03-23T11:16:20Z</updated>

		<summary type="html">&lt;p&gt;Hib: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=File:Wp3-04_02.png&amp;diff=145</id>
		<title>File:Wp3-04 02.png</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=File:Wp3-04_02.png&amp;diff=145"/>
		<updated>2022-03-23T11:16:11Z</updated>

		<summary type="html">&lt;p&gt;Hib: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
	<entry>
		<id>https://c4d.lias-lab.fr/index.php?title=File:Wp3-04_01.png&amp;diff=144</id>
		<title>File:Wp3-04 01.png</title>
		<link rel="alternate" type="text/html" href="https://c4d.lias-lab.fr/index.php?title=File:Wp3-04_01.png&amp;diff=144"/>
		<updated>2022-03-23T11:15:57Z</updated>

		<summary type="html">&lt;p&gt;Hib: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Hib</name></author>
	</entry>
</feed>