@conference {, title = {An agent-based WCET analysis for Top-View Person Re-Identification}, booktitle = {1st International Workshop on Real-Time Compliant Multi-Agent Systems (RTcMAS)}, year = {2018}, publisher = {CEUR Workshop Proceedings}, organization = {CEUR Workshop Proceedings}, address = {Stockholm}, abstract = {

Person re-identification is a challenging task for improving and personalising the shopping experience in an intelligent retail environment. A new Top View Person Re-Identification (TVPR) dataset of 100 persons has been collected and described in a previous work. This work estimates the Worst Case Execution Time (WCET) for the features extraction and classification steps. Such tasks should not exceed the WCET, in order to ensure the effectiveness of the proposed application. In fact, after the features extraction, the classification process is performed by selecting the first passage under the camera for training and using the others as the testing set. Furthermore, a gender classification is exploited for improving retail applications. We tested all feature sets using k-Nearest Neighbors, Support Vector Machine, Decision Tree and Random Forest classifiers. Experimental results prove the effectiveness of the proposed approach, achieving good performance in terms of Precision, Recall and F1-score.

}, keywords = {Person re-identification, Real-time, Retail, RGB-D camera, WCET}, url = {http://ceur-ws.org/Vol-2156/paper4.pdf}, author = {Marina Paolanti and Valerio Placidi and Michele Bernardini and Andrea Felicetti and Rocco Pietrini and Emanuele Frontoni} } @conference {Paolanti2016, title = {Accurate modeling of the microwave treatment in reverberating chamber. sanitation of agro food material}, booktitle = {Mediterranean Microwave Symposium}, volume = {2016-January}, year = {2016}, note = {cited By 0; Conference of 15th IEEE Mediterranean Microwave Symposium, MMS 2015 ; Conference Date: 30 November 2015 Through 2 December 2015; Conference Code:119001}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, abstract = {The microwave heating is useful for drying of foodstuff, disinfestation of works of art, phitosanitary treatment and disinfection of packaging according to current international guidelines. The computer simulation allows predicting and monitoring the heating process. The microwave treatment can nevertheless present some problems such as the presence of highly heated areas (hot spots) or areas with poor radiation due to particular shapes. Simulation of complex systems has evolved into a research discovery tool: such models and simulations, drawing upon the dramatic scale up of computational power and associated architectures and algorithmic innovation, can address complex systems with many degrees of freedom and with multiple length and time scales of interest. Using specific programs, the distribution of heating power in objects to be treated, even if complex shapes, can be predicted so as to be able to define the possibility, the time necessary to the processing, the power to be transmitted in the chamber and any repair or protection to cover the most sensitive areas. It can also predict the behavior of irradiation in the presence of other entities such as nails or pests. In order to perform simulation, important data are the geometry of the object or objects in the case of multiple loading and their dielectric characteristics. As a result we obtain the distribution of heating power. {\textcopyright} 2015 IEEE.}, keywords = {Computational power, Degrees of freedom (mechanics), Dielectric characteristics, Electromagnetic heating, Electromagnetics, Heating, Microwave heating, Microwave ovens, Microwave treatment, Microwaves, Rail-to-rail input, Reverberating chamber, Shape, Temperature measurement}, isbn = {9781467376020}, issn = {21579822}, doi = {10.1109/MMS.2015.7375447}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84962766321\&partnerID=40\&md5=baeea86f228c70b1b41ce30e1d94e5a3}, author = {Marina Paolanti and Bacchiani, R.a and Emanuele Frontoni and Adriano Mancini and De Leo, R.a and Primo Zingaretti and Bisceglia, B.b} } @conference {frontoni2016analysing, title = {Analysing human movements at mass events: A novel mobile-based management system based on active beacons and AVM}, booktitle = {Control and Automation (MED), 2016 24th Mediterranean Conference on}, year = {2016}, pages = {605{\textendash}610}, publisher = {IEEE}, organization = {IEEE}, author = {Emanuele Frontoni and Adriano Mancini and Roberto Pierdicca and Mirco Sturari and Primo Zingaretti} } @conference {naspetti2016automatic, title = {Automatic analysis of eye-tracking data for augmented reality applications: A prospective outlook}, booktitle = {International Conference on Augmented Reality, Virtual Reality and Computer Graphics}, year = {2016}, pages = {217{\textendash}230}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, author = {Naspetti, Simona and Roberto Pierdicca and Mandolesi, Serena and Marina Paolanti and Emanuele Frontoni and Zanoli, Raffaele} } @article {Pierdicca201538, title = {Advanced interaction with paintings by augmented reality and high resolution visualization: A real case exhibition}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {9254}, year = {2015}, note = {cited By 0}, pages = {38-50}, doi = {10.1007/978-3-319-22888-4_4}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84944686566\&partnerID=40\&md5=8c678ed32702d02aaa50edbf34433ca2}, author = {Roberto Pierdicca and Emanuele Frontoni and Primo Zingaretti and Mirco Sturari and Paolo Clini and Quattrini, R.} } @conference {paolanti2015automatic, title = {Automatic Classification for Anti Mixup Events in Advanced Manufacturing System}, booktitle = {ASME 2015 International Design Engineering Technical Conferences and Computers and Information in Engineering Conference}, year = {2015}, pages = {V009T07A061{\textendash}V009T07A061}, publisher = {American Society of Mechanical Engineers}, organization = {American Society of Mechanical Engineers}, author = {Marina Paolanti and Emanuele Frontoni and Adriano Mancini and Roberto Pierdicca and Primo Zingaretti} } @conference {Liciotti2014, title = {Advanced integration of multimedia assistive technologies: A prospective outlook}, booktitle = {MESA 2014 - 10th IEEE/ASME International Conference on Mechatronic and Embedded Systems and Applications, Conference Proceedings}, year = {2014}, note = {cited By 0}, abstract = {In the recent years several studies on population ageing in the most advanced countries argued that the share of people older than 65 years is steadily increasing. In order to tackle this phenomena, a significant effort has been devoted to the development of advanced technologies for supervising the domestic environments and their inhabitants to provide them assistance in their own home. In this context, the present paper aims to delineate a novel, highly-integrated system for advanced analysis of human behaviours. It is based on the fusion of the audio and vision frameworks, developed at the Multimedia Assistive Technology Laboratory (MATeLab) of the Universit{\`a} Politecnica delle Marche, in order to operate in the ambient assisted living context exploiting audio-visual domain features. The existing video framework exploits vertical RGB-D sensors for people tracking, interaction analysis and users activities detection in domestic scenarios. The depth information has been used to remove the affect of the appearance variation and to evaluate users activities inside the home and in front of the fixtures. In addition, group interactions are monitored and analysed. On the other side, the audio framework recognises voice commands by continuously monitoring the acoustic home environment. In addition, a hands-free communication to a relative or to a healthcare centre is automatically triggered when a distress call is detected. Echo and interference cancellation algorithms guarantee the high-quality communication and reliable speech recognition, respectively. The system we intend to delineate, thus, exploits multi-domain information, gathered from audio and video frameworks each, and stores them in a remote cloud for instant processing and analysis of the scene. Related actions are consequently performed.}, doi = {10.1109/MESA.2014.6935629}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84911977133\&partnerID=40\&md5=3a8fad94ccf6268631dbf553e9360956}, author = {Daniele Liciotti and Ferroni, G. and Emanuele Frontoni and Squartini, S. and Principi, E. and Bonfigli, R. and Primo Zingaretti and Francesco Piazza} } @article {Clini2014, title = {Augmented reality experience: From high-resolution acquisition to real time augmented contents}, journal = {Advances in Multimedia}, volume = {2014}, year = {2014}, note = {cited By 0}, abstract = {This paper presents results of a research project "dUcale" that experiments ICT solutions for the museum of Palazzo Ducale (Urbino). In this project, the famed painting the "Citt{\`a} Ideale" becomes a case to exemplify a specific approach to the digital mediation of cultural heritage. An augmented reality (AR) mobile application, able to enhance the museum visit experience, is presented. The computing technologies involved in the project (websites, desktop and social applications, mobile software, and AR) constitute a persuasive environment for the artwork knowledge. The overall goal of our research is to provide to cultural institutions best practices efficiently on low budgets. Therefore, we present a low cost method for high-resolution acquisition of paintings; the image is used as a base in AR approach. The proposed methodology consists of an improved SIFT extractor for real time image. The other novelty of this work is the multipoint probabilistic layer. Experimental results demonstrated the robustness of the proposed approach with extensive use of the AR application in front of the "Citt{\`a} Ideale" painting. To prove the usability of the application and to ensure a good user experience, we also carried out several users tests in the real scenario.}, doi = {10.1155/2014/597476}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84919754522\&partnerID=40\&md5=7e1c1da868b9c0927bb555d4151577ac}, author = {Paolo Clini and Emanuele Frontoni and Quattrini, R. and Roberto Pierdicca} } @conference {Mancini2012281, title = {Automatic road object extraction from Mobile Mapping Systems}, booktitle = {Proceedings of 2012 8th IEEE/ASME International Conference on Mechatronic and Embedded Systems and Applications, MESA 2012}, year = {2012}, note = {cited By 3}, pages = {281-286}, abstract = {Mobile Mapping Systems (MMSs) often represent the best choice to provide an accurate 3D modeling of the environment, especially in urban streets where the aerial/satellite surveys do not provide accurate data. MMSs are equipped with many kinds of sensors, and, in particular, laser scanners that allow 2D/3D environment modeling from very dense point clouds. Usually an operator manually explores the point cloud to discover and mark a particular feature of interest (e.g., road line, cross-walk). Obviously this procedure is tedious and expensive. One of the greater challenges is to automatically extract objects/features from co-registered data coming from LiDAR, optical and positioning sensors. This paper presents an automatic feature/object approach to extract and then to georeference with high accuracy/precision horizontal road signs, mainly lanes and crosswalks. The proposed approach exploits image processing techniques and methods for the 3D to 2D re-projection of data. The results obtained demonstrate that is possible to achieve accuracy and precision in the range of one centimeter. {\textcopyright} 2012 IEEE.}, doi = {10.1109/MESA.2012.6275575}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84867444429\&partnerID=40\&md5=bbdfcd587f93e0fdf60be30447684e89}, author = {Adriano Mancini and Emanuele Frontoni and Primo Zingaretti} } @conference {Cesetti2010125, title = {Autonomous safe landing of a vision guided helicopter}, booktitle = {Proceedings of 2010 IEEE/ASME International Conference on Mechatronic and Embedded Systems and Applications, MESA 2010}, year = {2010}, note = {cited By 2}, pages = {125-130}, abstract = {In this paper a vision-based system for safe autonomous landing of a helicopter-based Unmanned Aerial Vehicle (UAV) is presented. The remote user selects target areas from high resolution aerial or satellite images. These areas are tracked by a feature-based image matching algorithm that identifies natural landmarks and gives feedbacks for control purposes. {\textcopyright} 2010 IEEE.}, doi = {10.1109/MESA.2010.5552081}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-77957363095\&partnerID=40\&md5=105d5126c8256e58ab2a67b66bd8c271}, author = {Cesetti, A. and Emanuele Frontoni and Adriano Mancini and Primo Zingaretti} } @conference {Zingaretti2008227, title = {Autonomous helicopter for surveillance and security}, booktitle = {2007 Proceedings of the ASME International Design Engineering Technical Conferences and Computers and Information in Engineering Conference, DETC2007}, volume = {4}, year = {2008}, note = {cited By 0}, pages = {227-234}, abstract = {Unmanned Aerial Vehicles represent today an advanced and complex robotics platform for novel tasks. For example, UAVs can be used in applications for traffic monitoring and surveillance, emergency services assistance, photogrammetry and surveying. Generally, an UAV must be fully autonomous; autonomy is accomplished by a complex interconnection of systems related to a wide range of topics, e.g., flight low level control, navigation and task-based planning, elaboration of sensor signals, software architecture for reactive behaviours, communication. Today the challenge is the ability to insert UAVs in a cooperative network based on autonomous agents as UAV, UGV (Unmanned Ground Vehicle) to accomplish a specific task a priori defined. In this paper we introduce a prototype of autonomous aerial vehicle, the Helibot helicopter, specifically designed for applications as surveillance and security. Copyright {\textcopyright} 2007 by ASME.}, doi = {10.1115/DETC2007-35427}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-44849089281\&partnerID=40\&md5=722c3bbead7b4107ed7b860dd3c492b3}, author = {Primo Zingaretti and Adriano Mancini and Emanuele Frontoni and Monteri{\`u}, A. and Sauro Longhi} } @conference {Frontoni2007117, title = {Adaptive and fast scale invariant feature extraction}, booktitle = {Proceedings of the 1st International Workshop on Robot Vision; In Conjunction with VISAPP 2007}, year = {2007}, note = {cited By 6}, pages = {117-125}, abstract = {The Scale Invariant Feature Transform, SIFT, has been successfully applied to robot vision, object recognition, motion estimation, etc. Still, the parameter settings are not fully investigated, especially when dealing with variable lighting conditions. In this work, we propose a SIFT improvement that allows feature extraction and matching between images taken under different illumination. Also an interesting approach to reduce the SIFT computational time is presented. Finally, results of robot vision based localization experiments using the proposed approach are presented.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-58149132302\&partnerID=40\&md5=a3a9d9ab504182d6ad9d69f9dfbe0344}, author = {Emanuele Frontoni and Primo Zingaretti} } @conference {Zingaretti2007273, title = {Automatic extraction of LIDAR data classification rules}, booktitle = {Proceedings - 14th International conference on Image Analysis and Processing, ICIAP 2007}, year = {2007}, note = {cited By 4}, pages = {273-278}, abstract = {LIDAR (Light Detection And Ranging) data are a primary data source for digital terrain model (DTM) generation and 3D city models. This paper presents an AdaBoost algorithm for the identification of rules for the classification of raw LIDAR data mainly as buildings, ground and vegetation. First raw data are filtered, interpolated over a grid and segmented. Then geometric and topological relationships among regions resulting from segmentation constitute the input to the tree-structured classification algorithm. Results obtained on data sets gathered over the town of Pavia (Italy) are compared with those obtained by a rule-based approach previously presented by the authors for the classification of the regions. {\textcopyright} 2007 IEEE.}, doi = {10.1109/ICIAP.2007.4362791}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-48149101919\&partnerID=40\&md5=6578b3da6a7d08ffa58b7e9b97f98d95}, author = {Primo Zingaretti and Emanuele Frontoni and G. Forlani and C. Nardinocchi} } @article {Frontoni2006855, title = {Aliasing maps for robot global localization}, journal = {Frontiers in Artificial Intelligence and Applications}, volume = {141}, year = {2006}, note = {cited By 0}, pages = {855-856}, abstract = {In this paper we present a mobile robot localization system that integrates Monte-Carlo localization with an active action-selection approach based on an aliasing map. The main novelty of the approach is in the off-line evaluation of the perceptual aliasing of the environment and in the use of this knowledge to perform localization processes faster and better. Preliminary results show improved performances compared with the classic Monte-Carlo localization approach. {\textcopyright} 2006 The authors.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84886063653\&partnerID=40\&md5=cd93b50250c277363610aeec65edcaec}, author = {Emanuele Frontoni and Primo Zingaretti} } @article {Zingaretti200659, title = {Appearance-based robotics}, journal = {IEEE Robotics and Automation Magazine}, volume = {13}, number = {1}, year = {2006}, note = {cited By 22}, pages = {59-68}, abstract = {A novel appearance-based framework for active robot localization in partially explored environments is introduced. It provides qualitative measurements of the position of the robot, thus monitoring the progress of the overall task. The chromatic and spatial attributes of the color sets extracted from snapshots of the environment are used together with a stochastic evaluator based on partially observable Markov decision process (POMPD). Also, robot localization is performed without using explicit object models. The robustness of the appearance-based framework is demonstrated by a long series of experiments in each of the three environments with different characteristics and with different percentages of knowledge acquired during the visual tour. The proposed metric, based on color sets and weighted walkthrough, easily allows several variations to the similarity of the evaluation.}, doi = {10.1109/MRA.2006.1598054}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-33644674351\&partnerID=40\&md5=0399cae997333eacc85cce4d0f987671}, author = {Primo Zingaretti and Emanuele Frontoni} }