@article {Sturari2016, title = {Robust and affordable retail customer profiling by vision and radio beacon sensor fusion}, journal = {Pattern Recognition Letters}, year = {2016}, pages = {-}, abstract = {Abstract The development of reliable and precise indoor localization systems would considerably improve the ability to investigate shopper movements and behaviour inside retail environments. Previous approaches used either computer vision technologies or the analysis of signals emitted by communication devices (beacons). While computer vision approaches provide higher level of accuracy, beacons cover a wider operational area. In this paper, we propose a sensor fusion approach between active radio beacons and RGB-D cameras. This system, used in an intelligent retail environment where cameras are already installed for other purposes, allows an affordable environment set-up and a low operational costs for customer indoor localization and tracking. We adopted a Kalman filter to fuse localization data from radio signals emitted by beacons are used to track users{\textquoteright} mobile devices and RGB-D cameras used to refine position estimations. By combing coarse localization datasets from active beacons and RGB-D data from sparse cameras, we demonstrate that the indoor position estimation is strongly enhanced. The aim of this general framework is to provide retailers with useful information by analysing consumer activities inside the store. To prove the robustness of our approach, several tests were conducted into a real indoor showroom by analysing real customers behaviour with encouraging results.}, issn = {0167-8655}, doi = {http://dx.doi.org/10.1016/j.patrec.2016.02.010}, url = {http://www.sciencedirect.com/science/article/pii/S016786551600057X}, author = {Mirco Sturari and Daniele Liciotti and Roberto Pierdicca and Emanuele Frontoni and Adriano Mancini and Marco Contigiani and Primo Zingaretti} } @conference {Frontoni2014, title = {Real time out of shelf detection using embedded sensor network}, booktitle = {MESA 2014 - 10th IEEE/ASME International Conference on Mechatronic and Embedded Systems and Applications, Conference Proceedings}, year = {2014}, note = {cited By 0}, abstract = {Out-of-shelf problem is important to solve for retail store since the absence of products on the shelf can lead to a significant reduction of shoppers and a consequent drop on sales. For this purpose, it is necessary to study and to introduce approaches able to establish the lack of products on the shelves and thereby promptly ensuring their repositioning. In this context, the paper investigates the use of artificial intelligence techniques in detecting the out-of-shelf products. Particularly, having sales data, ordering info and product assortment of the store available, we study the development of low cost shelf detector that is based on wireless sensor network, and that can automatically discover out-of-shelf situations on a daily basis for all the stores of a retail chain. The use of an automatic method for detecting products that are not available on the shelf based on sales data would offer an accurate view of the shelf availability, both to retailers and to product suppliers. The tool presented is the first being installed for a long time in a high number of stores and products demonstrating the ability to gather data from there and extract interesting insights. This paper aims to present the hardware infrastructure of an embedded sensor network devoted to real time shelf out-of-stock management and to demonstrate the feasibility and the scalability of the system in providing a lot of data and interesting insights for store team and brand{\textquoteright}s marketing team.}, doi = {10.1109/MESA.2014.6935614}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84911958905\&partnerID=40\&md5=8aa2443592d484a806a041db808efd3e}, author = {Emanuele Frontoni and Adriano Mancini and Primo Zingaretti} } @conference {Massi2014, title = {A real-time reliability and durability testing framework}, booktitle = {MESA 2014 - 10th IEEE/ASME International Conference on Mechatronic and Embedded Systems and Applications, Conference Proceedings}, year = {2014}, note = {cited By 0}, abstract = {This paper presents a methodological framework for designing testing and measurement systems fully integrated with the enterprise information system. In comparison with the most common solutions for designing embedded testing platforms the proposed framework sets itself at a higher level of abstraction. The proposed framework allows getting different, programmable test benches that can run in parallel, and it does not restrict the choice of hardware, sensors and actuators, as it happens with commercial development systems for the same kind of machines. The framework is conceived to be used on embedded boards equipped with the GNU/Linux operating system and with at least one network interface. By using open data formats, the framework provides an easy way to exchange data with enterprise information systems, thus assuring interoperability with different IT solutions. The paper includes the description of a cooker hood testing system designed and implemented with this framework, and which highlights the advantages of the proposed development method.}, doi = {10.1109/MESA.2014.6935592}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84911979615\&partnerID=40\&md5=9e44e4e4151cf14b313983a811b7c3f1}, author = {Gionata Massi and Morganti, G. and Claudi, A. and Primo Zingaretti} } @conference {Mancini20131589, title = {Road pavement crack automatic detection by MMS images}, booktitle = {2013 21st Mediterranean Conference on Control and Automation, MED 2013 - Conference Proceedings}, year = {2013}, note = {cited By 0}, pages = {1589-1596}, abstract = {The research topic was to test different feature extraction methods to localize road pavement cracks useful to construct a spatial database for the pavement distress monitoring. Several images were acquired by means of a line scan camera that assembled in a Mobile Mapping System (MMS) allows tracking directly the position of the images by a GPS-INS system. Following an automatic digital image processing was performed by means of several algorithms based on different approaches (edge detection and fuzzy set theory). The detected cracks were described with some parameters in relation to some shape characteristics (dimension, typology, direction), which are necessary to recognize the gravity of the road pavement conditions. The edge detection techniques tested in this research allowed identifying fatigue cracking or alligator cracking and also thin linear cracks in images with strong radiometric jumps by applying filters, gradient functions and morphological operators. The snake approach was one of them, in particular the type called Gradient Vector Flow (GVF). Another approach was based on the fuzzy theory. The advantage of this method is that the pixels, necessary to identify the cracks in road pavement, are darker than their surroundings in an image. The last stage was the pavement distress spatial database collection. The Mobile Mapping System (MMS) has allowed localizing the raster data and consequently the vector features of the detected cracks, associating into the table their attributes too. The proposed approaches allow to automatically localize and classify the kind of road pavement crack. {\textcopyright} 2013 IEEE.}, doi = {10.1109/MED.2013.6608934}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84885199881\&partnerID=40\&md5=bf9cd82e8356555a25eb7cc1fd02b547}, author = {Adriano Mancini and Eva Savina Malinverni and Emanuele Frontoni and Primo Zingaretti} } @conference {Mancini2010448, title = {Road change detection from multi-spectral aerial data}, booktitle = {Proceedings - International Conference on Pattern Recognition}, year = {2010}, note = {cited By 2}, pages = {448-451}, abstract = {The paper presents a novel approach to automate the Change Detection (CD) problem for the specific task of road extraction. Manual approaches to CD fail in terms of the time for releasing updated maps; in the contrary, automatic approaches, based on machine learning and image processing techniques, allow to update large areas in a short time with an accuracy and precision comparable to those obtained by human operators. This work is focused on the road-graph update starting from aerial, multi-spectral data. Georeferenced, ground data, acquired by a GPS and an inertial sensor, are integrated with aerial data to speed up the change detector. After roads extraction by means of a binary AdaBoost classifier, the old road-graph is updated exploiting a particle filter. In particular this filter results very useful to link (track) parts of roads not extracted by the classifier due to the presence of occlusions (e.g., shadows, trees). {\textcopyright} 2010 IEEE.}, doi = {10.1109/ICPR.2010.118}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-78149486517\&partnerID=40\&md5=6fcdcfdf8b76c0c7c79a8cd6f6eaef25}, author = {Adriano Mancini and Emanuele Frontoni and Primo Zingaretti} } @conference {Frontoni2010428, title = {Robot localization in urban environments using omnidirectional vision sensors and partial heterogeneous apriori knowledge}, booktitle = {Proceedings of 2010 IEEE/ASME International Conference on Mechatronic and Embedded Systems and Applications, MESA 2010}, year = {2010}, note = {cited By 3}, pages = {428-433}, abstract = {This paper addresses the problem of long term mobile robot localization in large urban environments using a partial apriori knowledge made by different kind of images. Typically, GPS is the preferred sensor for outdoor operation. However, using GPS-only localization methods leads to significant performance degradation in urban areas where tall nearby structures obstruct the view of the satellites. In our work, we use omnidirectional vision-based sensors to complement GPS and odometry and provide accurate localization. We also present some novel Monte Carlo Localization optimizations and we introduce the concept of online knowledge acquisition and integration presenting a framework able to perform long term robot localization in real environments. The vision system identifies prominent features in the scene and matches them with a database of geo-referenced features already known (with a partial coverage of the environment and using both directional and omnidirectional images and with different resolutions) or learned and integrated during the localization process (omnidirectional images only). Results of successful robot localization in the old town of Fermo are presented. The whole architecture behaves well also in long term experiments, showing a suitable and good system for real life robot applications with a particular focus on the integration of different knowledge sources. {\textcopyright} 2010 IEEE.}, doi = {10.1109/MESA.2010.5551994}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-77957367131\&partnerID=40\&md5=619cedb309b8e6e9bf546721e565a793}, author = {Emanuele Frontoni and Ascani, A. and Adriano Mancini and Primo Zingaretti} } @conference {Mancini20092544, title = {RoboBuntu: A linux distribution for mobile robotics}, booktitle = {Proceedings - IEEE International Conference on Robotics and Automation}, year = {2009}, note = {cited By 2}, pages = {2544-2549}, abstract = {During last years Linux started to climb the market of operating systems (OSs), and Ubuntu, derived by Debian OS, has become a good alternative to common OSs like Windows XP or Vista. The mobile robotics scientific community makes use of Linux based OSs to avoid the lack of stability that affects Microsoft OSs, especially when real time conditions must be satisfied. In this paper we present the Linux distribution RoboBuntu, acronym formed by the union of ROBOt and uBUNTU, to overcome the almost totally independent robotic software platforms existing today. The key idea behind RoboBuntu is the integration of different tools for mobile robotics into an embedded Ubuntu distribution. Another important characteristics of RoboBuntu is that every "hard step", like installation and configuration of OS and tools, is hidden to common users. In particular, RoboBuntu can be used either by students or researchers, as LiveCd, permanent installation on standard hard drive or, more interesting, on aUSB storage flash disk. {\textcopyright} 2009 IEEE.}, doi = {10.1109/ROBOT.2009.5152548}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-70350399144\&partnerID=40\&md5=7ad3e306e93a56d841f7c4fdaf250a03}, author = {Adriano Mancini and Emanuele Frontoni and Ascani, A. and Primo Zingaretti} } @conference {Ascani2008576, title = {Robot localization using omnidirectional vision in large and dynamic outdoor environments}, booktitle = {2008 IEEE/ASME International Conference on Mechatronics and Embedded Systems and Applications, MESA 2008}, year = {2008}, note = {cited By 3}, pages = {576-581}, abstract = {Local feature matching has become a commonly used method to compare images. For mobile robots, a reliable method for comparing images can constitute a key component for localization tasks. In this paper we present a mobile robot localization system based on local feature matching of omnidirectional images. In particular, we address the issues of appearance-based topological localization by comparing common feature-extractor methods (SIFT and SURF) to select robust features to match the current robot view with reference images. Our datasets, each consisting of a large number of omnidirectional images, have been acquired over different day times (different lighting conditions) and dynamic content in large outdoor environments (over 80.000 m2). Two different approaches (WTA and MCL) were used to evaluate performances, which, in general, are satisfactory. In particular, the use of Monte Carlo particle filtering improves topological localization results for all datasets with all algorithms. {\textcopyright} 2008 IEEE.}, doi = {10.1109/MESA.2008.4735695}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-60749133774\&partnerID=40\&md5=71ab60c63c96799064ca0bbb23fb7656}, author = {Ascani, A. and Emanuele Frontoni and Adriano Mancini and Primo Zingaretti} } @conference {Frontoni2006249, title = {Retrieval by visual content in image databases}, booktitle = {SEBD 2006 - Proceedings of the 14th Italian Symposium on Advanced Databases Systems}, year = {2006}, note = {cited By 0}, pages = {249-256}, abstract = {Large amounts of pictures and videos are created, published, transmitted and accessed everyday by corporations and the general public for different uses, from entertainment to mobile robotics. Multimedia content has become a vital enterprise asset and visual content management has emerged as a strategic necessity. Content-based indexing of visual databases is a key technology for representing huge databases of images. A novel image retrieval technique, based on the Polar Weighted Walktrough (PWW) representation, is presented and encouraging results of searching by content in image databases are shown. The proposed metric, has very important properties: it is reflexive, invariant with respect to image scaling, compositional and, in particular, invariable to rotations.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84893300087\&partnerID=40\&md5=63338b15b124ee548b38cd8c308dab03}, author = {Emanuele Frontoni and Primo Zingaretti} } @article {Zingaretti1998257, title = {Robust real-time detection of an underwater pipeline}, journal = {Engineering Applications of Artificial Intelligence}, volume = {11}, number = {2}, year = {1998}, note = {cited By 14}, pages = {257-268}, abstract = {Currently, the methods of inspection of underwater structures employ remotely operated vehicles, guided from a support vessel by human operators. The risk of losing concentration calls for the development of an intelligent vision, guidance and control system to support the human activity. The paper presents a robust system for the detection and the real-time tracking of submarine pipelines. An active vision system is proposed to predict changes in the scene, and to direct computational resources to confirm expectations by adapting the processing mode dynamically. The system originates from an image-processing algorithm that was previously developed by the authors to recognise the pipeline in the image plane. The accuracy of this algorithm has been enhanced by exploiting the temporal context in the image sequence. The disturbances on acquired images caused by motion are partially removed by a Kalman filter. The filter proves advantageous in supporting the guidance and control of the ROV, and in making the image-processing module itself more robust. Sequences of underwater images, acquired at a constant sampling frequency from T.V. cameras, are used together with synchronised navigation data to demonstrate the effectiveness of the system. {\textcopyright} 1998 Elsevier Science Ltd. All rights reserved.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0032041493\&partnerID=40\&md5=188347e23a7078459b6de61720f8fff2}, author = {Primo Zingaretti and S.M. Zanoli} } @article {Zingaretti1998177, title = {Route following based on adaptive visual landmark matching}, journal = {Robotics and Autonomous Systems}, volume = {25}, number = {3-4}, year = {1998}, note = {cited By 3}, pages = {177-184}, abstract = {Route following based on visual landmark matching may require many models to cover all different situations. This paper describes a system that is able to adapt template{\textquoteright}s modelling parameters to environmental conditions (lighting, shadows, etc.) by a genetic learning technique. In addition, the mobile robot self-localisation is obtained by a stereo approach that uses the centres of matching in the two images to solve in a simple way the correspondence problem in the 3D position estimation. The experimental results show that the tracking robustness is improved, while using a small set of templates. {\textcopyright} 1998 Elsevier Science B.V. All rights reserved.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0032208707\&partnerID=40\&md5=8655435896f671afeeb70009f9086bee}, author = {Primo Zingaretti and A. Carbonaro} } @article {Tascini1996432, title = {Real-time inspection by submarine images}, journal = {Journal of Electronic Imaging}, volume = {5}, number = {4}, year = {1996}, note = {cited By 5}, pages = {432-442}, abstract = {A real-time application of computer vision concerning tracking and inspection of a submarine pipeline is described. The objective is to develop automatic procedures for supporting human operators in the real-time analysis of images acquired by means of cameras mounted on underwater remotely operated vehicles (ROV). Implementation of such procedures gives rise to a human-machine system for underwater pipeline inspection that can automatically detect and signal the presence of the pipe, of its structural or accessory elements, and of dangerous or alien objects in its neighborhood. The possibility of modifying the image acquisition rate in the simulations performed on video-recorded images is used to prove that the system performs all necessary processing with an acceptable robustness working in real-time up to a speed of about 2.5 kn, widely greater than that the actual ROVs and the security features allow. {\textcopyright} 1996 SPIE and IS\&T.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0009091679\&partnerID=40\&md5=0be09172d776524c453eb82b24820faf}, author = {Guido Tascini and Primo Zingaretti and Conte, G.} } @conference {Tascini1993322, title = {Retina vascular network recognition}, booktitle = {Proceedings of SPIE - The International Society for Optical Engineering}, volume = {1898}, year = {1993}, note = {cited By 5}, pages = {322-329}, abstract = {The analysis of morphological and structural modifications of the retina vascular network is an interesting investigation method in the study of diabetes and hypertension. Normally this analysis is carried out by qualitative evaluations, according to standardized criteria, though medical research attaches great importance to quantitative analysis of vessel color, shape and dimensions. The paper describes a system which automatically segments and recognizes the ocular fundus circulation and micro circulation network, and extracts a set of features related to morphometric aspects of vessels. For this class of images the classical segmentation methods seem weak. We propose a computer vision system in which segmentation and recognition phases are strictly connected. The system is hierarchically organized in four modules. Firstly the Image Enhancement Module (IEM) operates a set of custom image enhancements to remove blur and to prepare data for subsequent segmentation and recognition processes. Secondly the Papilla Border Analysis Module (PBAM) automatically recognizes number, position and local diameter of blood vessels departing from optical papilla. Then the Vessel Tracking Module (VTM) analyses vessels comparing the results of body and edge tracking and detects branches and crossings. Finally the Feature Extraction Module evaluates PBAM and VTM output data and extracts some numerical indexes. Used algorithms appear to be robust and have been successfully tested on various ocular fundus images.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0027873308\&partnerID=40\&md5=fe51e921b3d6ecbbbd0483a977269acd}, author = {Guido Tascini and Passerini, G. and Paolo Puliti and Primo Zingaretti} }