@conference {, title = {A Cloud Computing Architecture to Map Trawling Activities Using Positioning Data}, year = {2019}, abstract = {
Descriptive and spatially-explicit information on fisheries plays a key role for an efficient integrated management of the maritime activities and the sustainable use of marine resources. However, this information is today still hard to obtain and, consequently, is a major issue for implementing Marine Spatial Planning (MSP). Since 2002, the Automatic Identification System (AIS) has been undergoing a major development allowing now for a real time geo-tracking and identification of equipped vessels of more than 15m in length overall (LOA) and, if properly processed, for the production of adequate information for MSP. Such monitoring systems or other low-cost and low-burden solutions are still missing for small vessels (LOA \< 12m), whose catches and fishing effort remain spatially unassessed and, hence, unregulated. In this context, we propose an architecture to process vessel tracking data, understand the behaviour of trawling fleets and map related fishing activities. It could be used to process not only AIS data but also positioning data from other low cost systems as IoT sensors that share their position over LoRa and 2G/3G/4G links. Analysis gives back important and verified data (overall accuracy of 92\% for trawlers) and opens up development perspectives for monitoring small scale fisheries, helping hence to fill fishery data gaps and obtain a clearer picture of the fishing grounds as a whole.
}, doi = {10.1115/DETC2019-97779}, url = {https://doi.org/10.1115/DETC2019-97779}, author = {Alessandro Galdelli and Adriano Mancini and Anna Nora Tassetti and Carmen Ferra Vega and Enrico Armelloni and Giuseppe Scarcella and Gianna Fabi and Primo Zingaretti} } @conference {8545397, title = {Convolutional Networks for Semantic Heads Segmentation using Top-View Depth Data in Crowded Environment}, booktitle = {2018 24th International Conference on Pattern Recognition (ICPR)}, year = {2018}, month = {Aug}, pages = {1384-1389}, abstract = {Detecting and tracking people is a challenging task in a persistent crowded environment (i.e. retail, airport, station, etc.) for human behaviour analysis of security purposes. This paper introduces an approach to track and detect people in cases of heavy occlusions based on CNNs for semantic segmentation using top-view depth visual data. The purpose is the design of a novel U-Net architecture, U-Net3, that has been modified compared to the previous ones at the end of each layer. In particular, a batch normalization is added after the first ReLU activation function and after each max-pooling and up-sampling functions. The approach was applied and tested on a new and public available dataset, TVHeads Dataset, consisting of depth images of people recorded from an RGB-D camera installed in top-view configuration. Our variant outperforms baseline architectures while remaining computationally efficient at inference time. Results show high accuracy, demonstrating the effectiveness and suitability of our approach.}, keywords = {Cameras, Computer architecture, Fractals, Head, Image segmentation, Semantics, Training}, issn = {1051-4651}, doi = {10.1109/ICPR.2018.8545397}, author = {Daniele Liciotti and Marina Paolanti and R. Pietrini and Emanuele Frontoni and Primo Zingaretti} } @article {Paolanti2018, title = {Person Re-Identification with RGB-D Camera in Top-View Configuration through Multiple Nearest Neighbor Classifiers and Neighborhood Component Features Selection}, journal = {Sensors}, volume = {18}, number = {10}, year = {2018}, month = {oct}, pages = {3471}, publisher = {{MDPI} {AG}}, doi = {10.3390/s18103471}, url = {https://doi.org/10.3390/s18103471}, author = {Marina Paolanti and Luca Romeo and Daniele Liciotti and Annalisa Cenci and Emanuele Frontoni and Primo Zingaretti} } @conference {, title = {A Synergic Photometric Stereo and Super Resolution Approach for Optical Inspection}, booktitle = {2018 14th IEEE/ASME International Conference on Mechatronic and Embedded Systems and Applications (MESA)}, year = {2018}, month = {2018}, pages = {1-8}, abstract = {Three-dimensional reconstruction is a very important technique of Computer Vision that produces a three-dimensional (3D) model of a real scene. For this purpose, we use Photometric Stereo that allows you to shape estimation from several images under different lighting conditions. In the last few years, this technology has been used in many areas both working and scientific. An important sector where Photometric Stereo is used is quality control inside productive cycle. In some case, the model 3D is used to identify defects or to make measurements of the order of hundredths or even thousandths of millimeters. To obtain an accurate 3D model, it is necessary to use an cost-effective reconstruction system with a performance hardware. Alternatively, we propose a method that combine Photometric Stereo with Super Resolution that produces a very accurate model using cheap and lower performance hardware that is suitable for optical inspection. Indeed, the Super Resolution allows to overcome the hardware constraint and produce one high resolution image from a set of low resolution images. With this novel method, we obtain a very accurate 3D model that could be in a decision support system for optical inspection.
}, doi = {10.1109/MESA.2018.8449206}, url = {https://doi.org/10.1109/MESA.2018.8449206}, author = {Alessandro Galdelli and Adriano Mancini and Emanuele Frontoni and Primo Zingaretti} } @conference {liciotti2017hmm, title = {HMM-based activity recognition with a ceiling RGB-D camera}, booktitle = {ICPRAM (International Conference on Pattern Recognition Applications and Methods)}, year = {2017}, month = {02/2017}, abstract = {Automated recognition of Activities of Daily Living allows to identify possible health problems and apply corrective strategies in Ambient Assisted Living (AAL). Activities of Daily Living analysis can provide very useful information for elder care and long-term care services. This paper presents an automated RGB-D video analysis system that recognises human ADLs activities, related to classical daily actions. The main goal is to predict the probability of an analysed subject action. Thus, the abnormal behaviour can be detected. The activity detection and recognition is performed using an affordable RGB-D camera. Human activities, despite their unstructured nature, tend to have a natural hierarchical structure; for instance, generally making a coffee involves a three-step process of turning on the coffee machine, putting sugar in cup and opening the fridge for milk. Action sequence recognition is then handled using a discriminative Hidden Markov Model (HMM). RADiaL, a dataset with RGB-D images and 3D position of each person for training as well as evaluating the HMM, has been built and made publicly available.
}, author = {Daniele Liciotti and Emanuele Frontoni and Primo Zingaretti and Bellotto, Nicola and Duckett, Tom} } @article {sturari2017integrating, title = {Integrating elevation data and multispectral high-resolution images for an improved hybrid Land Use/Land Cover mapping}, journal = {European Journal of Remote Sensing}, volume = {50}, number = {1}, year = {2017}, pages = {1{\textendash}17}, publisher = {Taylor \& Francis}, author = {Mirco Sturari and Emanuele Frontoni and Roberto Pierdicca and Adriano Mancini and Eva Savina Malinverni and Anna Nora Tassetti and Primo Zingaretti} } @inbook {Liciotti2017, title = {An Intelligent RGB-D Video System for Bus Passenger Counting}, booktitle = {Intelligent Autonomous Systems 14: Proceedings of the 14th International Conference IAS-14}, year = {2017}, pages = {473{\textendash}484}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, address = {Cham}, isbn = {978-3-319-48036-7}, doi = {10.1007/978-3-319-48036-7_34}, url = {http://dx.doi.org/10.1007/978-3-319-48036-7_34}, author = {Daniele Liciotti and Annalisa Cenci and Emanuele Frontoni and Adriano Mancini and Primo Zingaretti}, editor = {Chen, Weidong and Hosoda, Koh and Menegatti, Emanuele and Shimizu, Masahiro and Wang, Hesheng} } @conference {Cenci:2017:MAP:3109761.3109773, title = {Movements Analysis of Preterm Infants by Using Depth Sensor}, booktitle = {Proceedings of the 1st International Conference on Internet of Things and Machine Learning}, series = {IML {\textquoteright}17}, year = {2017}, month = {10-2017}, pages = {12:1{\textendash}12:9}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Qualitative assessment of general movements in preterm infants is widely used in clinical practice. It can enable early detection of neurological dysfunctions and consequent neuromotor impairments in high risk infants. However, the outcome of these assessments is not standardized and it is influenced by examiner{\textquoteright}s subjective interpretation. For this reason, there is an increasing interest in the use of automated movement recognition technologies being applied in this field. In this work, we use a video-based system for preterm infant{\textquoteright}s movements assessment to provide a 3D\ motion analysis method able to extract some important indicators from the sequence of depth images collected by using an RGB-D sensor placed over the infant lying on the crib. The advantage of the proposed method is that it is objective, contactless, non-invasive, easy to install, affordable and suitable to be used in an indoor environment with poor lighting, as might be rooms in the Neonatal Intensive Care Unit, where these infants are taken into care. Experimental results show that the proposed method is able to derive from statistical analysis of depth data some key performance indicators, each of which describes different characteristics of the infant{\textquoteright}s spontaneous movements. Preliminary tests are conducted in the experimental phase on a preterm infant hospitalized in a women{\textquoteright}s and children{\textquoteright}s hospital. The project can be used to investigate the relationship between the characteristics of spontaneous movements and the presence of pathologies as cerebral palsy or other minor neurological dysfunctions.
}, keywords = {3D tracking, clustering, modelling, preterm infant{\textquoteright}s movement analysis}, isbn = {978-1-4503-5243-7}, doi = {10.1145/3109761.3109773}, url = {http://doi.acm.org/10.1145/3109761.3109773}, author = {Annalisa Cenci and Daniele Liciotti and Emanuele Frontoni and Primo Zingaretti and Virgilio Paolo Carnielli} } @inbook {liciotti2017person, title = {Person Re-identification Dataset with RGB-D Camera in a Top-View Configuration}, booktitle = {Video Analytics. Face and Facial Expression Recognition and Audience Measurement}, volume = {1}, year = {2017}, author = {Daniele Liciotti and Marina Paolanti and Emanuele Frontoni and Adriano Mancini and Primo Zingaretti} } @inbook {liciotti2017pervasive, title = {Pervasive System for Consumer Behaviour Analysis in Retail Environments}, booktitle = {Video Analytics. Face and Facial Expression Recognition and Audience Measurement}, volume = {2}, year = {2017}, author = {Daniele Liciotti and Emanuele Frontoni and Adriano Mancini and Primo Zingaretti} } @article {frontoni2017shelf, title = {Shelf space re-allocation for out of stock reduction}, journal = {Computers \& Industrial Engineering}, year = {2017}, publisher = {Elsevier}, abstract = {A planogram is a detailed visual map that establishes product positions over a shelf in a retail store. It is designed to support an innovative merchandising approach, to increase sales and profits, to supply the best location of a product for suppliers and to better manage the shelves. Product selection and the shelf space reserved to each product is a central activity for retailers and Shelf Out of Stock (SOOS) events are often strongly related to planogram design. In this paper we present a solution to optimally re-allocate shelf space to minimize Out of Stock (OOS) events. The approach uses SOOS data coming in real time from a sensor network technology, named Shelf Detector System, and an Integer Linear Programming model that integrates a space elastic demand function. Experimental results, based on a real scenario in the diaper category in Belgium, have proved that the system can efficiently calculate a proper solution able to re-allocate space and reduce OOS events.
The paper focuses on the design of a fast and reliable multi-point vision-based measurement system able to be flexible, low cost and accurate for quality control in industrial robotics applications. The paper discusses a new method for integrating visual quality control in highly dynamic manufacturing lines, where products are added or removed from production. The structure of the vision-based measurement system is described. In particular, the stereo system is created by the movement of a single camera mounted on a six-axis manipulator. The visual software is structured in three phases: single camera and stereo calibration, addition of the visual inspection tasks for the object to be measured, stereo measurement. The feasibility of the proposed solution has been tested in a real industrial application with strong requirements on robot speed. The comparison of experimental results on a target with a simple and well-defined shape has shown that the proposed solution provides better results, in terms of accuracy and measurement speed, when compared to commercial libraries and RGBD vision systems. Besides, the robot motion policy adopted by the solution proposed here guarantees a continuous movement without the need for stop\&go phases. {\textcopyright} 2015, Springer Science+Business Media Dordrecht.
}, doi = {10.1007/s10846-015-0249-4}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84955089994\&partnerID=40\&md5=9c4abfb9d88e7dff178bbd704c850bbb}, author = {Fulvio, G.D. and Emanuele Frontoni and Adriano Mancini and Primo Zingaretti} } @conference {frontoni2016optimal, title = {Optimal production planning by reusing components}, booktitle = {Control and Automation (MED), 2016 24th Mediterranean Conference on}, year = {2016}, pages = {1272{\textendash}1277}, publisher = {IEEE}, organization = {IEEE}, author = {Emanuele Frontoni and Fabrizio Marinelli and Marina Paolanti and Roberto Rosetti and Primo Zingaretti} } @article {Sturari2016, title = {Robust and affordable retail customer profiling by vision and radio beacon sensor fusion}, journal = {Pattern Recognition Letters}, year = {2016}, pages = {-}, abstract = {Abstract The development of reliable and precise indoor localization systems would considerably improve the ability to investigate shopper movements and behaviour inside retail environments. Previous approaches used either computer vision technologies or the analysis of signals emitted by communication devices (beacons). While computer vision approaches provide higher level of accuracy, beacons cover a wider operational area. In this paper, we propose a sensor fusion approach between active radio beacons and RGB-D cameras. This system, used in an intelligent retail environment where cameras are already installed for other purposes, allows an affordable environment set-up and a low operational costs for customer indoor localization and tracking. We adopted a Kalman filter to fuse localization data from radio signals emitted by beacons are used to track users{\textquoteright} mobile devices and RGB-D cameras used to refine position estimations. By combing coarse localization datasets from active beacons and RGB-D data from sparse cameras, we demonstrate that the indoor position estimation is strongly enhanced. The aim of this general framework is to provide retailers with useful information by analysing consumer activities inside the store. To prove the robustness of our approach, several tests were conducted into a real indoor showroom by analysing real customers behaviour with encouraging results.}, issn = {0167-8655}, doi = {http://dx.doi.org/10.1016/j.patrec.2016.02.010}, url = {http://www.sciencedirect.com/science/article/pii/S016786551600057X}, author = {Mirco Sturari and Daniele Liciotti and Roberto Pierdicca and Emanuele Frontoni and Adriano Mancini and Marco Contigiani and Primo Zingaretti} } @article {pierdicca2016smart, title = {Smart maintenance of riverbanks using a standard data layer and Augmented Reality}, journal = {Computers \& Geosciences}, volume = {95}, year = {2016}, pages = {67{\textendash}74}, publisher = {Pergamon}, author = {Roberto Pierdicca and Emanuele Frontoni and Primo Zingaretti and Adriano Mancini and Eva Savina Malinverni and Anna Nora Tassetti and Marcheggiani, Ernesto and Galli, Andrea} } @article {cocchioni2016visual, title = {Visual Based Landing for an Unmanned Quadrotor}, journal = {Journal of Intelligent \& Robotic Systems}, volume = {84}, number = {1-4}, year = {2016}, pages = {511{\textendash}528}, publisher = {Springer Netherlands}, author = {Francesco Cocchioni and Emanuele Frontoni and Ippoliti, Gianluca and Sauro Longhi and Adriano Mancini and Primo Zingaretti} } @article {Pierdicca201538, title = {Advanced interaction with paintings by augmented reality and high resolution visualization: A real case exhibition}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {9254}, year = {2015}, note = {cited By 0}, pages = {38-50}, doi = {10.1007/978-3-319-22888-4_4}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84944686566\&partnerID=40\&md5=8c678ed32702d02aaa50edbf34433ca2}, author = {Roberto Pierdicca and Emanuele Frontoni and Primo Zingaretti and Mirco Sturari and Paolo Clini and Quattrini, R.} } @conference {paolanti2015automatic, title = {Automatic Classification for Anti Mixup Events in Advanced Manufacturing System}, booktitle = {ASME 2015 International Design Engineering Technical Conferences and Computers and Information in Engineering Conference}, year = {2015}, pages = {V009T07A061{\textendash}V009T07A061}, publisher = {American Society of Mechanical Engineers}, organization = {American Society of Mechanical Engineers}, author = {Marina Paolanti and Emanuele Frontoni and Adriano Mancini and Roberto Pierdicca and Primo Zingaretti} } @article {Callegari2015, title = {Current Developments in Robotics and Mobile Machines}, journal = {Journal of Intelligent and Robotic Systems: Theory and Applications}, year = {2015}, note = {cited By 0; Article in Press}, doi = {10.1007/s10846-015-0281-4}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84944707292\&partnerID=40\&md5=4ebe2e97ab2603b9e7503d4b595f438b}, author = {Callegari, M. and Primo Zingaretti} } @article {Mancini2015, title = {Embedded Multisensor System for Safe Point-to-Point Navigation of Impaired Users}, journal = {IEEE Transactions on Intelligent Transportation Systems}, year = {2015}, note = {cited By 0; Article in Press}, doi = {10.1109/TITS.2015.2489261}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84946761743\&partnerID=40\&md5=239f3356a998bb85416f5e2f91c5d623}, author = {Adriano Mancini and Emanuele Frontoni and Primo Zingaretti} } @article {Frontoni201521114, title = {Embedded vision sensor network for planogram maintenance in retail environments}, journal = {Sensors (Switzerland)}, volume = {15}, number = {9}, year = {2015}, note = {cited By 0}, pages = {21114-21133}, doi = {10.3390/s150921114}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84940689195\&partnerID=40\&md5=a67474231cc9c9f78f87d71734c24a53}, author = {Emanuele Frontoni and Adriano Mancini and Primo Zingaretti} } @conference {Mancini2015534, title = {High-resolution mapping of river and estuary areas by using unmanned aerial and surface platforms}, booktitle = {2015 International Conference on Unmanned Aircraft Systems, ICUAS 2015}, year = {2015}, note = {cited By 0}, pages = {534-542}, doi = {10.1109/ICUAS.2015.7152333}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84941111909\&partnerID=40\&md5=994c13558511f25e91843621dde17a5c}, author = {Adriano Mancini and Emanuele Frontoni and Primo Zingaretti and Sauro Longhi} } @conference {liciotti2015human, title = {Human activity analysis for in-home fall risk assessment}, booktitle = {Communication Workshop (ICCW), 2015 IEEE International Conference on}, year = {2015}, pages = {284{\textendash}289}, publisher = {IEEE}, organization = {IEEE}, author = {Daniele Liciotti and Gionata Massi and Emanuele Frontoni and Adriano Mancini and Primo Zingaretti} } @conference {pierdicca2015low, title = {Low cost embedded system for increasing retail environment intelligence}, booktitle = {Multimedia \& Expo Workshops (ICMEW), 2015 IEEE International Conference on}, year = {2015}, pages = {1{\textendash}6}, publisher = {IEEE}, organization = {IEEE}, author = {Roberto Pierdicca and Daniele Liciotti and Marco Contigiani and Emanuele Frontoni and Adriano Mancini and Primo Zingaretti} } @article {Pierdicca201525, title = {Making visible the invisible. Augmented reality visualization for 3D reconstructions of archaeological sites}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {9254}, year = {2015}, note = {cited By 0}, pages = {25-37}, doi = {10.1007/978-3-319-22888-4_3}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84944688602\&partnerID=40\&md5=5988b5e6786fdbcd931f64ac7f8483f5}, author = {Roberto Pierdicca and Emanuele Frontoni and Primo Zingaretti and Eva Savina Malinverni and F. Colosi and R. Orazi} } @conference {cenci2015non, title = {Non-Contact Monitoring of Preterm Infants Using RGB-D Camera}, booktitle = {ASME 2015 International Design Engineering Technical Conferences and Computers and Information in Engineering Conference}, year = {2015}, pages = {V009T07A003{\textendash}V009T07A003}, publisher = {American Society of Mechanical Engineers}, organization = {American Society of Mechanical Engineers}, author = {Annalisa Cenci and Daniele Liciotti and Emanuele Frontoni and Adriano Mancini and Primo Zingaretti} } @article {Cocchioni2015, title = {Visual Based Landing for an Unmanned Quadrotor}, journal = {Journal of Intelligent and Robotic Systems: Theory and Applications}, year = {2015}, note = {cited By 0; Article in Press}, doi = {10.1007/s10846-015-0271-6}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84941686223\&partnerID=40\&md5=9ccfc812909e8a74b64d123902639160}, author = {Francesco Cocchioni and Emanuele Frontoni and Ippoliti, G. and Sauro Longhi and Adriano Mancini and Primo Zingaretti} } @conference {Liciotti2014, title = {Advanced integration of multimedia assistive technologies: A prospective outlook}, booktitle = {MESA 2014 - 10th IEEE/ASME International Conference on Mechatronic and Embedded Systems and Applications, Conference Proceedings}, year = {2014}, note = {cited By 0}, abstract = {In the recent years several studies on population ageing in the most advanced countries argued that the share of people older than 65 years is steadily increasing. In order to tackle this phenomena, a significant effort has been devoted to the development of advanced technologies for supervising the domestic environments and their inhabitants to provide them assistance in their own home. In this context, the present paper aims to delineate a novel, highly-integrated system for advanced analysis of human behaviours. It is based on the fusion of the audio and vision frameworks, developed at the Multimedia Assistive Technology Laboratory (MATeLab) of the Universit{\`a} Politecnica delle Marche, in order to operate in the ambient assisted living context exploiting audio-visual domain features. The existing video framework exploits vertical RGB-D sensors for people tracking, interaction analysis and users activities detection in domestic scenarios. The depth information has been used to remove the affect of the appearance variation and to evaluate users activities inside the home and in front of the fixtures. In addition, group interactions are monitored and analysed. On the other side, the audio framework recognises voice commands by continuously monitoring the acoustic home environment. In addition, a hands-free communication to a relative or to a healthcare centre is automatically triggered when a distress call is detected. Echo and interference cancellation algorithms guarantee the high-quality communication and reliable speech recognition, respectively. The system we intend to delineate, thus, exploits multi-domain information, gathered from audio and video frameworks each, and stores them in a remote cloud for instant processing and analysis of the scene. Related actions are consequently performed.}, doi = {10.1109/MESA.2014.6935629}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84911977133\&partnerID=40\&md5=3a8fad94ccf6268631dbf553e9360956}, author = {Daniele Liciotti and Ferroni, G. and Emanuele Frontoni and Squartini, S. and Principi, E. and Bonfigli, R. and Primo Zingaretti and Francesco Piazza} } @conference {Liciotti2014, title = {An automatic analysis of shoppers behaviour using a distributed RGB-D cameras system}, booktitle = {MESA 2014 - 10th IEEE/ASME International Conference on Mechatronic and Embedded Systems and Applications, Conference Proceedings}, year = {2014}, note = {cited By 0}, abstract = {The aim of this work is to propose an integrated system consisted of an RGB-D camera and software able to monitor shoppers in intelligent retail environments. We want to propose an innovative low cost intelligent system that can evaluate not only the shopper behaviour, but also detect their interactions with the products in the shelves, by developing automatic RGB-D techniques for video analysis. The system of cameras, located in strategic locations within the store, detects the presence of a person by identifying the blob and the centre of mass. The camera detects the person as an object in moving. Through the video frames, the system detects the interactions of the shoppers with the products on the shelf and establishes also the type of interaction: if a product is picked up, if the product is taken and then repositioned and finally, if there is not contact with the products. To understand the shopper behaviour is very important for the marketing strategies of a retail store. The proposed architecture monitors this aspect, that is low cost, easy to install and able to ensure very satisfactory results also in real environments.}, doi = {10.1109/MESA.2014.6935617}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84911973081\&partnerID=40\&md5=57f2b74e422e1e28066bf350b02a97e2}, author = {Daniele Liciotti and Primo Zingaretti and Valerio Placidi} } @article {Frontoni2014, title = {Feature group matching: A novel method to filter out incorrect local feature matchings}, journal = {International Journal of Pattern Recognition and Artificial Intelligence}, volume = {28}, number = {5}, year = {2014}, note = {cited By 4}, abstract = {The importance of finding correct correspondences between two images is the major aspect in problems such as appearance-based robot localization and content-based image retrieval. Local feature matching has become a commonly used method to compare images, despite being highly probable that at least some of the matchings/correspondences it detects are incorrect. In this paper, we describe a novel approach to local feature matching, named Feature Group Matching (FGM), to select stable features and obtain a more reliable similarity value between two images. The proposed technique is demonstrated to be translational, rotational and scaling invariant. Experimental evaluation was performed on large and heterogeneous datasets of images using SIFT and SURF, the actual state-of-the-art feature extractors. Results show that FGM avoids almost 95\% of incorrect matchings, reduces the visual aliasing (number of images considered similar) and increases both robotic localization and image retrieval accuracy on the average of 13\%. {\textcopyright} 2014 World Scientific Publishing Company.}, doi = {10.1142/S0218001414500128}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84905462697\&partnerID=40\&md5=e778e6ea38958157d1df890fc014a6e6}, author = {Emanuele Frontoni and Adriano Mancini and Primo Zingaretti} } @conference {Marinelli2014, title = {GPU acceleration of feature extraction and matching algorithms}, booktitle = {MESA 2014 - 10th IEEE/ASME International Conference on Mechatronic and Embedded Systems and Applications, Conference Proceedings}, year = {2014}, note = {cited By 0}, abstract = {During the last years the applications of Computer Vision have increased greatly in many different contexts, owing to the availability of more and more powerful hardware. However, in some situations, the problem of algorithms with a high computational time still continues to limit their growth. One of the causes is that the progress from the point of view of software was much lower, despite very efficient algorithms have been discovered. This paper is focused on a way to accelerate some computer vision algorithms. In particular, they will be described and tested the benefits of running on a Graphical Processing Unit (GPU) the Feature Group Matching (FGM) algorithm, a novel approach to local feature matching to select stable features and obtain a more reliable similarity value between two images. Being FGM based on the state of the art algorithms Scale-Invariant Feature Transform (SIFT) and Speeded Up Robust Features (SURF), also the performances of these algorithms on a GPU implementation using the Compute Unified Device Architecture (CUDA) will be described.}, doi = {10.1109/MESA.2014.6935620}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84911974006\&partnerID=40\&md5=662fc68375fbb7536a27d28f5e8987df}, author = {M. Marinelli and Adriano Mancini and Primo Zingaretti} } @article {Frontoni2014255, title = {Information management for intelligent retail environment: The shelf detector system}, journal = {Information (Switzerland)}, volume = {5}, number = {2}, year = {2014}, note = {cited By 3}, pages = {255-271}, abstract = {Shelf-out-of-stock is one of the leading motivations of technology innovation in the shelf of the future. The Shelf Detector project described in this paper aims to solve the problem of data knowledge in the shelf-out-of-stock problem. This paper is mainly focused on the information layer of the system and main novelties illustrated in this work are in the information field demonstrating the huge number of insights that can be derived from the use of such a tool able to gather data in real time from the store. The tool presented is the first being installed for a long time in a high number of stores and products, demonstrating the ability to gather data and extract interesting insights. This paper aims to demonstrate the feasibility and the scalability of our system in providing a high number of data and interesting insights for store and marketing teams. The cloud based architecture developed and tested in this project is a key feature of our system together with the ability to collect data from a distributed sensor network. {\textcopyright} 2014 by the authors.}, doi = {10.3390/info5020255}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84902662564\&partnerID=40\&md5=53f917be9988ce992278dc033771abf7}, author = {Emanuele Frontoni and Adriano Mancini and Primo Zingaretti and Valerio Placidi} } @conference {Rossi2014, title = {Interoperability issues among smart home technological frameworks}, booktitle = {MESA 2014 - 10th IEEE/ASME International Conference on Mechatronic and Embedded Systems and Applications, Conference Proceedings}, year = {2014}, note = {cited By 0}, abstract = {Population aging may be seen both as a human success story, the triumph of public health, medical advancements and economic development over diseases and injures, and as one of the most challenging phenomena that society faces in this century. Assistive technology in all its possible implementations (from Telemedicine to Ambient Assisted Living, and Ambient Intelligence) represents an emerging answer to the needs of the new generation of older adults whose desire is to live longer with a higher quality of life. Objective of this paper is to present the results of a public financed action for the development and implementation of an {\textquoteright}integration platform{\textquoteright} for Ambient Assisted Living that includes features of home automation (energy management, safety, comfort, etc.) and introduces {\textquoteright}smart objects{\textquoteright}, to monitor activities of daily living and detect any abnormal behavior that may represent a danger, or highlight symptoms of some incipient disease.}, doi = {10.1109/MESA.2014.6935626}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84911958222\&partnerID=40\&md5=969da407b2d8bad8fa3e16dbc7d254a0}, author = {Rossi, L. and Belli, A. and De Santis, A. and Diamantini, C. and Emanuele Frontoni and Gambi, E. and Palma, L. and Pernini, L. and Pierleoni, P. and Potena, D. and Raffaeli, L. and Spinsante, S. and Primo Zingaretti and Cacciagrano, D. and Corradini, F. and Culmone, R. and De Angelis, F. and Merelli, E. and Re, B.} } @conference {Mancini2014, title = {Point to point navigation for people with mobility impairments}, booktitle = {MESA 2014 - 10th IEEE/ASME International Conference on Mechatronic and Embedded Systems and Applications, Conference Proceedings}, year = {2014}, note = {cited By 0}, abstract = {The wide availability of high resolution city maps has radically changed our life during the last years. In particular, the routing services offer the capability to plan a travel by taking into account different transportation modalities, e.g., by car, train, public transport, bike, foot. Actually these routing services are mainly oriented to the majority of end-users and does not take into account particular constraints. This paper introduces a novel framework to assist people with mobility impairments in point to point navigation. The proposed framework is based on the creation of a road graph with different levels of detail (LoD) that can be modified at run-time by users if an obstacle obstructs the pre-planned path. Sidewalks, crosswalks and dynamic obstacles are also considered during the planning due to their importance for the navigation of mobility-impaired users. The proposed framework is based on OpenStreetMap (OSM) to manage and model the road graph, OpenTripPlanner (OTP) to plan a dedicated path considering different constraints, and mobile platforms for real time navigation and obstacle detection.}, doi = {10.1109/MESA.2014.6935622}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84911952948\&partnerID=40\&md5=a4464d155dc00c67ea2fbb6b0e214263}, author = {Adriano Mancini and Primo Zingaretti} } @conference {Frontoni2014, title = {Real time out of shelf detection using embedded sensor network}, booktitle = {MESA 2014 - 10th IEEE/ASME International Conference on Mechatronic and Embedded Systems and Applications, Conference Proceedings}, year = {2014}, note = {cited By 0}, abstract = {Out-of-shelf problem is important to solve for retail store since the absence of products on the shelf can lead to a significant reduction of shoppers and a consequent drop on sales. For this purpose, it is necessary to study and to introduce approaches able to establish the lack of products on the shelves and thereby promptly ensuring their repositioning. In this context, the paper investigates the use of artificial intelligence techniques in detecting the out-of-shelf products. Particularly, having sales data, ordering info and product assortment of the store available, we study the development of low cost shelf detector that is based on wireless sensor network, and that can automatically discover out-of-shelf situations on a daily basis for all the stores of a retail chain. The use of an automatic method for detecting products that are not available on the shelf based on sales data would offer an accurate view of the shelf availability, both to retailers and to product suppliers. The tool presented is the first being installed for a long time in a high number of stores and products demonstrating the ability to gather data from there and extract interesting insights. This paper aims to present the hardware infrastructure of an embedded sensor network devoted to real time shelf out-of-stock management and to demonstrate the feasibility and the scalability of the system in providing a lot of data and interesting insights for store team and brand{\textquoteright}s marketing team.}, doi = {10.1109/MESA.2014.6935614}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84911958905\&partnerID=40\&md5=8aa2443592d484a806a041db808efd3e}, author = {Emanuele Frontoni and Adriano Mancini and Primo Zingaretti} } @conference {Massi2014, title = {A real-time reliability and durability testing framework}, booktitle = {MESA 2014 - 10th IEEE/ASME International Conference on Mechatronic and Embedded Systems and Applications, Conference Proceedings}, year = {2014}, note = {cited By 0}, abstract = {This paper presents a methodological framework for designing testing and measurement systems fully integrated with the enterprise information system. In comparison with the most common solutions for designing embedded testing platforms the proposed framework sets itself at a higher level of abstraction. The proposed framework allows getting different, programmable test benches that can run in parallel, and it does not restrict the choice of hardware, sensors and actuators, as it happens with commercial development systems for the same kind of machines. The framework is conceived to be used on embedded boards equipped with the GNU/Linux operating system and with at least one network interface. By using open data formats, the framework provides an easy way to exchange data with enterprise information systems, thus assuring interoperability with different IT solutions. The paper includes the description of a cooker hood testing system designed and implemented with this framework, and which highlights the advantages of the proposed development method.}, doi = {10.1109/MESA.2014.6935592}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84911979615\&partnerID=40\&md5=9e44e4e4151cf14b313983a811b7c3f1}, author = {Gionata Massi and Morganti, G. and Claudi, A. and Primo Zingaretti} } @conference {frontoni2014security, title = {Security issues for data sharing and service interoperability in eHealth systems: the Nu. Sa. test bed}, booktitle = {Security Technology (ICCST), 2014 International Carnahan Conference on}, year = {2014}, pages = {1{\textendash}6}, publisher = {IEEE}, organization = {IEEE}, author = {Emanuele Frontoni and Baldi, Marco and Primo Zingaretti and Landro, Vincenzo and Misericordia, Paolo} } @article {Liciotti2014, title = {Shopper analytics: A customer activity recognition system using a distributed rgb-d camera network}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {8811}, year = {2014}, note = {cited By 0}, abstract = {The aim of this paper is to present an integrated system consisted of a RGB-D camera and a software able to monitor shoppers in intelligent retail environments. We propose an innovative low cost smart system that can understand the shoppers{\textquoteright} behavior and, in particular, their interactions with the products in the shelves, with the aim to develop an automatic RGB-D technique for video analysis. The system of cameras detects the presence of people and univocally identifies them. Through the depth frames, the system detects the interactions of the shoppers with the products on the shelf and determines if a product is picked up or if the product is taken and then put back and finally, if there is not contact with the products. The system is low cost and easy to install, and experimental results demonstrated that its performances are satisfactory also in real environments.}, doi = {10.1007/978-3-319-12811-5_11}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84921652873\&partnerID=40\&md5=44bc641a712fa9af6ffd341f41b0aec0}, author = {Daniele Liciotti and Marco Contigiani and Emanuele Frontoni and Adriano Mancini and Primo Zingaretti and Valerio Placidi} } @article {Frontoni2014853, title = {SIT-REM: An interoperable and interactive web geographic information system for fauna, flora and plant landscape data management}, journal = {ISPRS International Journal of Geo-Information}, volume = {3}, number = {2}, year = {2014}, note = {cited By 0}, pages = {853-867}, publisher = {MDPI AG}, abstract = {The main goal of the SIT-REM project is the design and the development of an interoperable web-GIS environment for the information retrieval and data editing/updating of the geobotanical and wildlife map of Marche Region. The vegetation, plant landscape and faunistic analysis allow the realization of a regional information system for wildlife-geobotanical data. A main characteristic of the SIT-REM is its flexibility and interoperability, in particular, its ability to be easily updated with the insertion of new types of environmental, faunal or socio-economic data and to generate analyses at any geographical (from regional to local) or quantitative level of detail. Different query levels obtain the latter: spatial queries, hybrid query builder and WMSs usable by means of a GIS. SIT-REM has been available online for more than a year and its use over this period has produced extensive data about users{\textquoteright} experiences.. {\textcopyright} 2014 by the authors; licensee MDPI, Basel, Switzerland.}, issn = {22209964}, doi = {10.3390/ijgi3020853}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84948991558\&partnerID=40\&md5=e554cb57d885eec6bb72afa03e616523}, author = {Emanuele Frontoni and Adriano Mancini and Primo Zingaretti and Eva Savina Malinverni and Pesaresi, S.c and Biondi, E.c and Pandolfi, M.d and Marseglia, M.e and Mirco Sturari and Zabaglia, C.g} } @conference {Di Fulvio2014, title = {A stereovision system for dimensional measurementws in industrial robotics applications}, booktitle = {MESA 2014 - 10th IEEE/ASME International Conference on Mechatronic and Embedded Systems and Applications, Conference Proceedings}, year = {2014}, note = {cited By 0}, abstract = {The great variety of artificial vision applications, ranging from industrial automation and robotics to analysis and classification of documents, from bioinformatics and medicine to data mining, is due to the reinforcement of information sytems by the realization of algorithms that improved stability and efficiency of the results, also from the computational complexity point of view. The purpose of this work is to propose a stereoscopic vision system for dimensional measurements in an application of industrial robotics. The paper describes as a camera mounted on a six-axis robot manipulator cna calculate the real dimension of a target object. So, in the application the vision system is not composed by two physical cameras, bu tthe stero system is created by the mnovement of a single lcamera. The work provides a calibration procedure of the camera preliminary to the stage of actual measuremetn. Experimental results on a target with a simpole and well-defined shape have shown that the accuracy of the measurement is strongly influenced by a correct calibration.}, doi = {10.1109/MESA.2014.6935618}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84911977431\&partnerID=40\&md5=53653d90297d7f3e5afba6aee6a53c3c}, author = {Di Fulvio, G. and Emanuele Frontoni and Adriano Mancini and Primo Zingaretti} } @conference {Cocchioni2014374, title = {Unmanned ground and aerial vehicles in extended range indoor and outdoor missions}, booktitle = {2014 International Conference on Unmanned Aircraft Systems, ICUAS 2014 - Conference Proceedings}, year = {2014}, note = {cited By 1}, pages = {374-382}, abstract = {The capability to instantiate a cooperation among heterogeneous agents is a fundamental feature in mobile robotics. In this paper we focus on the interaction between Unmanned Ground Vehicle (UGV) and Unmanned Aerial Vehicle (UAV) to extend the endurance of UAV, thanks to a novel landing/recharging platform. The UGV acts as a docking station and hosts the UAV during the indoor/outdoor transition and vice-versa. We designed a platform and a robust landing target to automate the fast recharge of UAV. The synchronization and coordination of cooperation is managed by a Ground Control Station (GCS) developed using a versatile software toolchain based on the integration of Stateflow, auto-generation of C-code and ROS. All the software components of UAV, UGV and GCS have been developed using ROS. The obtained results show that the UAV is able to land over the UGV with high accuracy (<5cm for both x and y axis) thanks to a visual position estimation algorithm, also in presence of wind (with gust up to 20-25km/h), recharging its batteries in a short time to extend its endurance. {\textcopyright} 2014 IEEE.}, doi = {10.1109/ICUAS.2014.6842276}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84904582437\&partnerID=40\&md5=f57cb12575d6f9ff24ff152e95b48c80}, author = {Francesco Cocchioni and Pierfelice, V. and Benini, A. and Adriano Mancini and Emanuele Frontoni and Primo Zingaretti and Ippoliti, G. and Sauro Longhi} } @conference {Gao2014, title = {Welcome message}, booktitle = {MESA 2014 - 10th IEEE/ASME International Conference on Mechatronic and Embedded Systems and Applications, Conference Proceedings}, year = {2014}, note = {cited By 0}, doi = {10.1109/MESA.2014.6935511}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84911971399\&partnerID=40\&md5=276da3fdd39f1fd66d31f47d0f38e04e}, author = {Gao, Y. and Primo Zingaretti and Koo, J.C. and Emanuele Frontoni} } @article {Frontoni2013509, title = {Customers{\textquoteright} activity recognition in intelligent retail environments}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {8158 LNCS}, year = {2013}, note = {cited By 6}, pages = {509-516}, abstract = {This paper aims to propose a novel idea of an embedded intelligent system where low cost embedded vision systems can analyze human behaviors to obtain interactivity and statistical data, mainly devoted to customer behavior analysis. In this project we addressed the need for new services into the shop, involving consumers more directly and instigating them to increase their satisfaction and, as a consequence, their purchases. To do this, technology is very important and allows making interactions between costumers and products and between customers and the environment of the shop a rich source of marketing analysis. We construct a novel system that uses vertical RGBD sensor for people counting and shelf interaction analysis, where the depth information is used to remove the affect of the appearance variation and to evaluate customers{\textquoteright} activities inside the store and in front of the shelf, with products. Also group interactions are monitored and analyzed with the main goal of having a better knowledge of the customers{\textquoteright} activities, using real data in real time. Even if preliminary, results are convincing and most of all the general architecture is affordable in this specific application, robust, easy to install and maintain and low cost. {\textcopyright} 2013 Springer-Verlag.}, doi = {10.1007/978-3-642-41190-8_55}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84887121570\&partnerID=40\&md5=8a6e9b3442d8d4dbd47b404bd8248a7b}, author = {Emanuele Frontoni and Raspa, P. and Adriano Mancini and Primo Zingaretti and Valerio Placidi} } @conference {Luchetti20131199, title = {Design and test of a precise mobile GPS tracker}, booktitle = {2013 21st Mediterranean Conference on Control and Automation, MED 2013 - Conference Proceedings}, year = {2013}, note = {cited By 2}, pages = {1199-1207}, abstract = {In the last years the widespread diffusion of smartphones with sensing capabilities paved the way to smart pervasive applications. The tracking of users activities aided by the set of sensor installed on board of smartphones represents a really interesting market for users which today demand reliable, smart and tailored services. The target group of sportsmen was of particular concern in this study. We aim at offering them a tool to record their training sessions along with a web community to share their activities with similar users. To achieve this we have to deal with different issues concerning localization and modelling of the training session by using the GPS, as well as synchronization with the web service to upload the training data. {\textcopyright} 2013 IEEE.}, doi = {10.1109/MED.2013.6608872}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84885209325\&partnerID=40\&md5=cd710f4031bf159dc90b4ee6ada8a129}, author = {Luchetti, G. and Servici, G. and Emanuele Frontoni and Adriano Mancini and Primo Zingaretti} } @conference {Frontoni2013, title = {Energy harvesting for smart shoes: A real life application}, booktitle = {Proceedings of the ASME Design Engineering Technical Conference}, volume = {4}, year = {2013}, note = {cited By 1}, abstract = {Advanced technical developments have increased the efficiency of devices in capturing trace amounts of energy from the environment (such as from human movements) and transforming them into electrical energy (e.g., to instantly charge mobile devices). In addition, advancements in microprocessor technology have increased power efficiency, effectively reducing power consumption requirements. In combination, these developments have sparked interest in the engineering community to develop more and more applications that utilize energy harvesting for power. The approach here described aims to designing and manufacturing an innovative easy-to-use and general-purpose device for energy harvesting in general purpose shoes. The novelty of this device is the integration of polymer and ceramic piezomaterials accomplished by injection molding. In this spirit, this paper examines different devices that can be built into a shoe, (where excess energy is readily harvested) and used for generating electrical power while walking. A Main purpose is the development of an indoor localization system embedded in shoes that periodically broadcasts a digital RFID as the bearer walks. Results are encouraging and real life test are conducted on the first series of prototypes. Copyright {\textcopyright} 2013 by ASME.}, doi = {10.1115/DETC2013-12310}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84899124826\&partnerID=40\&md5=0a32a61f326043d4dcaa963a226ec167}, author = {Emanuele Frontoni and Adriano Mancini and Primo Zingaretti and A. Gatto} } @article {Mancini2013409, title = {A novel method for fast processing of large remote sensed image}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {8157 LNCS}, number = {PART 2}, year = {2013}, note = {cited By 1}, pages = {409-418}, abstract = {In this paper we present a novel approach to reduce the computational load of a CFAR detector. The proposed approach is based on the use of integral images to directly manage the presence of masked pixels or invalid data and reduce the computational time. The approach goes through the challenging problem of ship detection from remote sensed data. The capability of fast image processing allows to monitor the marine traffic and identify possible threats. The approach allows to significantly boost the performance up to 50x working with very high resolution image and large kernels. {\textcopyright} 2013 Springer-Verlag.}, doi = {10.1007/978-3-642-41184-7_42}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84884709393\&partnerID=40\&md5=335f37537f59723ffcfa04255b21821b}, author = {Adriano Mancini and Anna Nora Tassetti and Cinnirella, A. and Emanuele Frontoni and Primo Zingaretti} } @conference {Mancini20131589, title = {Road pavement crack automatic detection by MMS images}, booktitle = {2013 21st Mediterranean Conference on Control and Automation, MED 2013 - Conference Proceedings}, year = {2013}, note = {cited By 0}, pages = {1589-1596}, abstract = {The research topic was to test different feature extraction methods to localize road pavement cracks useful to construct a spatial database for the pavement distress monitoring. Several images were acquired by means of a line scan camera that assembled in a Mobile Mapping System (MMS) allows tracking directly the position of the images by a GPS-INS system. Following an automatic digital image processing was performed by means of several algorithms based on different approaches (edge detection and fuzzy set theory). The detected cracks were described with some parameters in relation to some shape characteristics (dimension, typology, direction), which are necessary to recognize the gravity of the road pavement conditions. The edge detection techniques tested in this research allowed identifying fatigue cracking or alligator cracking and also thin linear cracks in images with strong radiometric jumps by applying filters, gradient functions and morphological operators. The snake approach was one of them, in particular the type called Gradient Vector Flow (GVF). Another approach was based on the fuzzy theory. The advantage of this method is that the pixels, necessary to identify the cracks in road pavement, are darker than their surroundings in an image. The last stage was the pavement distress spatial database collection. The Mobile Mapping System (MMS) has allowed localizing the raster data and consequently the vector features of the detected cracks, associating into the table their attributes too. The proposed approaches allow to automatically localize and classify the kind of road pavement crack. {\textcopyright} 2013 IEEE.}, doi = {10.1109/MED.2013.6608934}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84885199881\&partnerID=40\&md5=bf9cd82e8356555a25eb7cc1fd02b547}, author = {Adriano Mancini and Eva Savina Malinverni and Emanuele Frontoni and Primo Zingaretti} } @conference {Mancini2013, title = {Smart vision system for shelf analysis in intelligent retail environments}, booktitle = {Proceedings of the ASME Design Engineering Technical Conference}, volume = {4}, year = {2013}, note = {cited By 6}, abstract = {This paper aims to propose an innovative idea of an embedded intelligent, multimedia and interactive shop system where embedded vision systems can analyse human behaviours around shelves for interactivity and statistical purposes, mostly devoted to customer behaviour analysis, planogram maintenance and out of stock detection. We discuss the need for new services into the shop, involving consumers more directly and instigating them to increase their satisfaction and, as a consequence, their purchases. To do this, technology is very important and allows making interactions between costumers and products and between customers and the environment of the shop a rich source of marketing analysis. In particular we focus on concepts of monitoring and interactivity, introducing several emerging technologies in the field of retail environments. The main novelty of the paper is the general architecture of the system together with the introduction of a series of intelligent embedded systems, yet implemented and tested in a dataset recorded during the Euro Shop trade fair, in cooperation with Grottini group, a leading company in Retail Design industry based in Italy. Results are convincing and most of all the general architecture is affordable in this specific application. Copyright {\textcopyright} 2013 by ASME.}, doi = {10.1115/DETC2013-12317}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84899129851\&partnerID=40\&md5=9468fe9c8ec144cde0a360886f9aae83}, author = {Adriano Mancini and Emanuele Frontoni and Primo Zingaretti and Valerio Placidi} } @conference {Mancini2012281, title = {Automatic road object extraction from Mobile Mapping Systems}, booktitle = {Proceedings of 2012 8th IEEE/ASME International Conference on Mechatronic and Embedded Systems and Applications, MESA 2012}, year = {2012}, note = {cited By 3}, pages = {281-286}, abstract = {Mobile Mapping Systems (MMSs) often represent the best choice to provide an accurate 3D modeling of the environment, especially in urban streets where the aerial/satellite surveys do not provide accurate data. MMSs are equipped with many kinds of sensors, and, in particular, laser scanners that allow 2D/3D environment modeling from very dense point clouds. Usually an operator manually explores the point cloud to discover and mark a particular feature of interest (e.g., road line, cross-walk). Obviously this procedure is tedious and expensive. One of the greater challenges is to automatically extract objects/features from co-registered data coming from LiDAR, optical and positioning sensors. This paper presents an automatic feature/object approach to extract and then to georeference with high accuracy/precision horizontal road signs, mainly lanes and crosswalks. The proposed approach exploits image processing techniques and methods for the 3D to 2D re-projection of data. The results obtained demonstrate that is possible to achieve accuracy and precision in the range of one centimeter. {\textcopyright} 2012 IEEE.}, doi = {10.1109/MESA.2012.6275575}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84867444429\&partnerID=40\&md5=bbdfcd587f93e0fdf60be30447684e89}, author = {Adriano Mancini and Emanuele Frontoni and Primo Zingaretti} } @conference {Bucchi2012207, title = {Summarization of echo-Doppler videos for computer-aided diagnosis}, booktitle = {Proceedings of 2012 8th IEEE/ASME International Conference on Mechatronic and Embedded Systems and Applications, MESA 2012}, year = {2012}, note = {cited By 0}, pages = {207-210}, abstract = {Summarization of echo-Doppler videos allows reducing diagnosis time, improving comparison between videos and making a more efficient storing of them. This paper aims at providing a solution to the static summarization of echo-Doppler videos. A static summary of a video is a collection of keyframes together with a description of them. The selection of keyframes discussed in this paper is based on the analysis of properties of red blobs resulting from Power Doppler technique. The properties of red blobs are extracted by a robust thresholding algorithm in the HSL colour model and via a connected-component labelling algorithm. Keyframes extracted from the echo-Doppler video satisfy specific properties for the red blobs. The work is still in progress and we are now collecting data for the construction of a decision support system to help doctors in their diagnoses. First results are encouraging and future works will bring to an interesting computer-aided system. {\textcopyright} 2012 IEEE.}, doi = {10.1109/MESA.2012.6275563}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84867455278\&partnerID=40\&md5=2f62875baeba4f7c9118f7ebd26e06af}, author = {Bucchi, M. and Emanuele Frontoni and Adriano Mancini and Primo Zingaretti} } @conference {Mancini2011745, title = {Coalition formation for unmanned quadrotors}, booktitle = {Proceedings of the ASME Design Engineering Technical Conference}, volume = {3}, number = {PARTS A AND B}, year = {2011}, note = {cited By 1}, pages = {745-752}, abstract = {Today Unmanned Aerial Vehicles (UAVs) and in particular quad-rotors represent novel platforms to accomplish a wide set of missions as surveillance, Search \& Rescue, inspection, pho-togrammetry. The main limitation of these vehicles is represented by the restricted operating area. The area is mainly limited by power supplies (batteries or fuel). A strategy to overcame this limitation is to increase the number of vehicles forming a coalition. The main benefit of coalition formation are the extended mission range and the capability to increase the sensorial set. Each vehicles is a part of a dynamic network that must be properly coordinated in order to optimize all the available resources. In this paper a new framework for simulation of unmanned vehicles in cooperative scenarios is first presented. The framework is based on the interaction of a physics-engine, which simulates the dynamics of vehicles and their interaction with world increasing the realism of simulation, and a simulation environment where the high-level strategy is designed/developed. A Model Predictive Control (MPC) is then introduced to solve the problem of leader-follower applied to quad-rotors. Using the developed framework and the MPC technique is possible to easily instantiate the coalition minimizing also a cost function. The obtained results from the control strategy point of view show that positioning error at steady state is equal to zero. The MPC allows also the modelling of different conflicting constraints as the control actions, positioning error, and fuel/energy consumption. {\textcopyright} 2011 by ASME.}, doi = {10.1115/DETC2011-48904}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84863584753\&partnerID=40\&md5=9d10818fcd0cd5219ab5df1c9eb26502}, author = {Adriano Mancini and Benini, A. and Emanuele Frontoni and Primo Zingaretti and Sauro Longhi} } @conference {Catani2011889, title = {Efficient traffic simulation using busses as active sensor network}, booktitle = {Proceedings of the ASME Design Engineering Technical Conference}, volume = {3}, number = {PARTS A AND B}, year = {2011}, note = {cited By 4}, pages = {889-894}, abstract = {In this paper we present a traffic simulation system using traffic data gathered from readings by GPS devices installed on board of busses, i.e., busses running on the road are used as a mobile sensor network. Traffic data are inferred from the comparison of data about normal speed and current speed in a certain position. The simulation system was developed to invent mechanisms that use such information for traffic optimization. In particular, simulations allow to manage traffic lights in real time and to control the traffic system in order to give priority to public transportation systems. {\textcopyright} 2011 by ASME.}, doi = {10.1115/DETC2011-49013}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84863576021\&partnerID=40\&md5=b5122447bb6ce4e611ea9e196886e59e}, author = {Ludovico Catani and Emanuele Frontoni and Primo Zingaretti and Di Pasquale, G.} } @article {Malinverni20111025, title = {Hybrid object-based approach for land use/land cover mapping using high spatial resolution imagery}, journal = {International Journal of Geographical Information Science}, volume = {25}, number = {6}, year = {2011}, note = {cited By 13}, pages = {1025-1043}, abstract = {Traditionally, remote sensing has employed pixel-based classification techniques to deal with land use/land cover (LULC) studies. Generally, pixel-based approaches have been proven to work well with low spatial resolution imagery (e.g. Landsat or System Pour L{\textquoteright}Observation de la Terre sensors). Now, however, commercially available high spatial resolution images (e.g. aerial Leica ADS40 and Vexcel UltraCam sensors, and satellite IKONOS, Quickbird, GeoEye and WorldView sensors) can be problematic for pixel-based analysis due to their tendency to oversample the scene. This is driving research towards object-based approaches. This article proposes a hybrid classification method with the aim of incorporating the advantages of supervised pixel-based classification into object-based approaches. The method has been developed for medium- scale (1:10,000) LULC mapping using ADS40 imagery with 1 m ground sampling distance. First, spatial information is incorporated into a pixel-based classification (AdaBoost classifier) by means of additional texture features (Haralick, Gabor, Law features), which can be selected {\textquoteright}ad hoc{\textquoteright} according to optimal training samples ({\textquoteright}Relief-F{\textquoteright} pproach,Mahalanobis distances). Then a rule-based approach sorts segmented regions into thematic CORINE Land Cover classes in terms of membership class percentages (a modified Winner-Takes-All approach) and shape parameters. Finally, ancillary data (roads, rivers, etc.) are exploited to increase classification accuracy. The experimental results show that the proposed hybrid approach allows the extraction of more LULC classes than conventional pixel-based methods, while improving classification accuracy considerably. A second contribution of this article is the assessment of classification reliability by implementing a stability map, in addition to confusion matrices. {\textcopyright} 2011 Taylor \& Francis.}, doi = {10.1080/13658816.2011.566569}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-79960685342\&partnerID=40\&md5=18767c8a88bf2abff53ef96ec138f0aa}, author = {Eva Savina Malinverni and Anna Nora Tassetti and Adriano Mancini and Primo Zingaretti and Emanuele Frontoni and A. Bernardini} } @conference {Benini2011406, title = {A simulation framework for coalition formation of Unmanned Aerial Vehicles}, booktitle = {2011 19th Mediterranean Conference on Control and Automation, MED 2011}, year = {2011}, note = {cited By 3}, pages = {406-411}, abstract = {Unmanned Aerial Vehicles (UAVs) and in particular quad-rotors are gaining an increasing interest owing to their flexibility and versatility. Today the challenge is to integrate these versatile platforms in a wide fleet in order to perform cooperative tasks as surveillance, search \& rescue, inspection. The coalition formation problem is a pre-condition for cooperative missions. This problem can be solved with different methodologies from biologically-inspired algorithms to parasocial consensus sampling ones. In this paper a new framework for simulation of unmanned vehicles in cooperative scenarios is first presented. Then a novel method that exploits the benefits of Model Predictive Control (MPC) for a coalition formation problem (leader-follower) is introduced. The obtained results evidence the good performance of MPC to solve the problem of coalition formation for unmanned aerial vehicles finding the optimal solution taking into account different kind of constraints. The developed framework allows also to easily change from simulated agent to real one. {\textcopyright} 2011 IEEE.}, doi = {10.1109/MED.2011.5983163}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-80052355021\&partnerID=40\&md5=51acc4875190b250928b66e5b54b01a0}, author = {Benini, A. and Adriano Mancini and Emanuele Frontoni and Primo Zingaretti and Sauro Longhi} } @conference {Frontoni20111047, title = {UAVS safe landing using range images}, booktitle = {Proceedings of the ASME Design Engineering Technical Conference}, volume = {3}, number = {PARTS A AND B}, year = {2011}, note = {cited By 0}, pages = {1047-1052}, abstract = {In this paper a mixed vision-range based approach, based on Kinect technology, for safe landing of an Unmanned Aerial Vehicle (UAV) is proposed. The guidance system allows a remote user to define target areas from an high resolution aerial or satellite image to determine the waypoints of the navigation trajectory or the landing area. The system is based on our previous work on UAV navigation and landing: a feature-based image matching algorithms finds the natural landmarks and gives feedbacks to the control system for autonomous navigation and landing. An algorithm for safe landing areas detection is proposed, based on the use of 4D RGBD (Red, Green, Blue, Distance) image analysis. The helicopter is required to navigate from an initial to a final position in a partially known environment, to locate a landing area and to land on it. Results show the appropriateness of the vision-based approach that does not require any artificial landmark (e.g., helipad) and is quite robust to occlusions, light variations and high vibrations. {\textcopyright} 2011 by ASME.}, doi = {10.1115/DETC2011-49012}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84863583459\&partnerID=40\&md5=4a39fb31b9b944e81de89b9c26185af7}, author = {Emanuele Frontoni and Adriano Mancini and Primo Zingaretti} } @article {Cesetti2011157, title = {A visual global positioning system for unmanned aerial vehicles used in photogrammetric applications}, journal = {Journal of Intelligent and Robotic Systems: Theory and Applications}, volume = {61}, number = {1-4}, year = {2011}, note = {cited By 10}, pages = {157-168}, abstract = {The combination of photogrammetric aerial and terrestrial recording methods can provide new opportunities for photogrammetric applications. A UAV (Unmanned Aerial Vehicle), in our case a helicopter system, can cover both the aerial and quasi-terrestrial image acquisition methods. A UAV can be equipped with an on-board high resolution camera and a priori knowledge of the operating area where to perform photogrammetric tasks. In this general scenario our paper proposes vision-based techniques for localizing a UAV. Only natural landmarks provided by a feature tracking algorithm will be considered, without the help of visual beacons or landmarks with known positions. The novel idea is to perform global localization, position tracking and localization failure recovery (kidnapping) based only on visual matching between current view and available georeferenced satellite images. The matching is based on SIFT features and the system estimates the position of the UAV and its altitude on the base of the reference image. The vision system replaces the GPS signal combining position information from visual odometry and georeferenced imagery. Georeferenced satellite or aerial images must be available on-board beforehand or downloaded during the flight. The growing availability of high resolution satellite images (e.g., provided by Google Earth or other local information sources) makes this topic very interesting and timely. Experiments with both synthetic (i.e., taken from satellites or datasets and pre elaborated) and real world images have been performed to test the accuracy and the robustness of our method. Results show sufficient performance if compared with common GPS systems and give a good performance also in the altitude estimation, even if in this last case there are only preliminary results. {\textcopyright} 2010 Springer Science+Business Media B.V.}, doi = {10.1007/s10846-010-9489-5}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-79951511981\&partnerID=40\&md5=22fb7414e9a79d5c70082f7e0e8c0684}, author = {Cesetti, A. and Emanuele Frontoni and Adriano Mancini and Ascani, A. and Primo Zingaretti and Sauro Longhi} } @conference {Cesetti2010125, title = {Autonomous safe landing of a vision guided helicopter}, booktitle = {Proceedings of 2010 IEEE/ASME International Conference on Mechatronic and Embedded Systems and Applications, MESA 2010}, year = {2010}, note = {cited By 2}, pages = {125-130}, abstract = {In this paper a vision-based system for safe autonomous landing of a helicopter-based Unmanned Aerial Vehicle (UAV) is presented. The remote user selects target areas from high resolution aerial or satellite images. These areas are tracked by a feature-based image matching algorithm that identifies natural landmarks and gives feedbacks for control purposes. {\textcopyright} 2010 IEEE.}, doi = {10.1109/MESA.2010.5552081}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-77957363095\&partnerID=40\&md5=105d5126c8256e58ab2a67b66bd8c271}, author = {Cesetti, A. and Emanuele Frontoni and Adriano Mancini and Primo Zingaretti} } @conference {Catani2010319, title = {A framework based on vision sensors for the automatic management of exchange parking areas}, booktitle = {Proceedings of 2010 IEEE/ASME International Conference on Mechatronic and Embedded Systems and Applications, MESA 2010}, year = {2010}, note = {cited By 3}, pages = {319-324}, abstract = {This paper proposes a framework for the automatic management of exchange parking areas, usually located in the periphery of large cities. These parks are used for medium/long period stops of private or public vehicles and the subsequent sorting of passengers to public transportation networks. The objective of this paper is to analyze and stress the potential of a framework that exploits only vision sensors, which are very versatile and minimally invasive. Using a visual sensor network and the proposed tracking approach we are able to know and track the position of every bus in the exchange station and to send data to the planning station, which allocates slots for other busses and manages public information. Preliminary results are promising and show the feasibility of the proposed method, so that future research work is directed towards a distributed implementation of the framework with stand-alone and embedded devices under the control of a supervisor. {\textcopyright} 2010 IEEE.}, doi = {10.1109/MESA.2010.5552047}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-77957341506\&partnerID=40\&md5=accfd4f3b19a01e8662f81ce6185545b}, author = {Ludovico Catani and Emanuele Frontoni and Primo Zingaretti} } @conference {Malinverni20102836, title = {LCLU Information System for object-oriented nomenclature}, booktitle = {International Geoscience and Remote Sensing Symposium (IGARSS)}, year = {2010}, note = {cited By 0}, pages = {2836-2839}, abstract = {A Land Cover/Land Use (LCLU) Information System is proposed as a new dynamic and flexible approach to describe landscape objects. It is able to give a deeper and more realistic thematic description by storing membership land cover attributes for each polygon automatically extracted and classified by the T-MAP software. The proposed approach can overcome the traditional "hard" classification by taking directly into account "fuzzy" cover components and making the classification approach more bounded with the polygon characteristics and their changes. The LCLU Information System can be easily integrated with different databases, making it suitable for different nomenclatures and further analysis, regarding environmental indexes, class updating and classification stability assessment. {\textcopyright} 2010 IEEE.}, doi = {10.1109/IGARSS.2010.5651398}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-78650884639\&partnerID=40\&md5=a38544ae56c5f11edee2b11e7730439c}, author = {Eva Savina Malinverni and Anna Nora Tassetti and Primo Zingaretti} } @conference {Zingaretti2010185, title = {Particle clustering to improve omnidirectional localization in outdoor environments}, booktitle = {Proceedings of the ASME International Design Engineering Technical Conferences and Computers and Information in Engineering Conference 2009, DETC2009}, volume = {3}, year = {2010}, note = {cited By 1}, pages = {185-192}, abstract = {Monte Carlo Localization (MCL) is a common method for self-localization of a mobile robot under the assumption that a map of the environment is available. In addition to laser scan-ners and sonar sensors, localization approaches using vision sensors have also been recently developed with good results. In this paper we present two variations to improve the standard im-plementation of the MCL algorithm. The first change consists in a new strategy for the generation of particles, both at the initialization and at the resampling stage, which tries to generate new particles near the position of images in the learning dataset or in the neighborhood of particles with higher weights in the previous estimate, respectively. The second variation is related to a new approach to the estimate of the robot position, now based on two steps: clustering of particles and taking as estimate of robot position the center of the cluster, computed as a weighted sum of particle weights, with higher weight. The improved MCL algorithm described in this paper is compared with the standard MCL algorithm in terms of localization accuracy. In particular, tests were performed using local feature matching of omnidirectional images implemented on a real robot system operating in large outdoor environments with high dynamic content. Obtained results show that the localization accuracy of the improved MCL algorithm is more than twice that of the standard MCL algorithm. Copyright {\textcopyright} 2009 by ASME.}, doi = {10.1115/DETC2009-87373}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-77953702059\&partnerID=40\&md5=bf8d639ae45538fb4d049602073a0907}, author = {Primo Zingaretti and Ascani, A. and Adriano Mancini and Emanuele Frontoni} } @article {Khoshelham2010123, title = {Performance evaluation of automated approaches to building detection in multi-source aerial data}, journal = {ISPRS Journal of Photogrammetry and Remote Sensing}, volume = {65}, number = {1}, year = {2010}, note = {cited By 25}, pages = {123-133}, abstract = {Automated approaches to building detection in multi-source aerial data are important in many applications, including map updating, city modeling, urban growth analysis and monitoring of informal settlements. This paper presents a comparative analysis of different methods for automated building detection in aerial images and laser data at different spatial resolutions. Five methods are tested in two study areas using features extracted at both pixel level and object level, but with the strong prerequisite of using the same training set for all methods. The evaluation of the methods is based on error measures obtained by superimposing the results on a manually generated reference map of each area. The results in both study areas show a better performance of the Dempster-Shafer and the AdaBoost methods, although these two methods also yield a number of unclassified pixels. The method of thresholding a normalized DSM performs well in terms of the detection rate and reliability in the less vegetated Mannheim study area, but also yields a high rate of false positive errors. The Bayesian methods perform better in the Memmingen study area where buildings have more or less the same heights. {\textcopyright} 2009 International Society for Photogrammetry and Remote Sensing, Inc. (ISPRS).}, doi = {10.1016/j.isprsjprs.2009.09.005}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-72949110177\&partnerID=40\&md5=016e5e523686951f9fa75cccf7957c8d}, author = {Khoshelham, K. and C. Nardinocchi and Emanuele Frontoni and Adriano Mancini and Primo Zingaretti} } @article {Bernardini201043, title = {Pixel, object and hybrid classification comparisons}, journal = {Journal of Spatial Science}, volume = {55}, number = {1}, year = {2010}, note = {cited By 3}, pages = {43-54}, abstract = {The choice of the best classification approach for thematic map generation relies on many factors, such as image resolution and minimum mapping unit. The generalized GIS-ready products derived from the results of pixel-based approaches and the availability of higherresolution imagery have directed research towards object-based classification approaches. In this paper we present the superior performance of a hybrid methodology that combines the results of automatic segmentation with the land cover information derived from a pixel classification by means of the Winner Takes All (WTA) algorithm. Land use and land cover results obtained through this hybrid classification approach are compared with those of a One Against All (OAA) object-oriented classification approach. {\textcopyright} 2010 Surveying and Spatial Sciences Institute and Mapping Sciences Institute, Australia.}, doi = {10.1080/14498596.2010.487641}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-80052137947\&partnerID=40\&md5=74b7a4fe82987b5213c27b7315b0f768}, author = {A. Bernardini and Emanuele Frontoni and Eva Savina Malinverni and Adriano Mancini and Anna Nora Tassetti and Primo Zingaretti} } @conference {Mancini2010448, title = {Road change detection from multi-spectral aerial data}, booktitle = {Proceedings - International Conference on Pattern Recognition}, year = {2010}, note = {cited By 2}, pages = {448-451}, abstract = {The paper presents a novel approach to automate the Change Detection (CD) problem for the specific task of road extraction. Manual approaches to CD fail in terms of the time for releasing updated maps; in the contrary, automatic approaches, based on machine learning and image processing techniques, allow to update large areas in a short time with an accuracy and precision comparable to those obtained by human operators. This work is focused on the road-graph update starting from aerial, multi-spectral data. Georeferenced, ground data, acquired by a GPS and an inertial sensor, are integrated with aerial data to speed up the change detector. After roads extraction by means of a binary AdaBoost classifier, the old road-graph is updated exploiting a particle filter. In particular this filter results very useful to link (track) parts of roads not extracted by the classifier due to the presence of occlusions (e.g., shadows, trees). {\textcopyright} 2010 IEEE.}, doi = {10.1109/ICPR.2010.118}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-78149486517\&partnerID=40\&md5=6fcdcfdf8b76c0c7c79a8cd6f6eaef25}, author = {Adriano Mancini and Emanuele Frontoni and Primo Zingaretti} } @conference {Frontoni2010428, title = {Robot localization in urban environments using omnidirectional vision sensors and partial heterogeneous apriori knowledge}, booktitle = {Proceedings of 2010 IEEE/ASME International Conference on Mechatronic and Embedded Systems and Applications, MESA 2010}, year = {2010}, note = {cited By 3}, pages = {428-433}, abstract = {This paper addresses the problem of long term mobile robot localization in large urban environments using a partial apriori knowledge made by different kind of images. Typically, GPS is the preferred sensor for outdoor operation. However, using GPS-only localization methods leads to significant performance degradation in urban areas where tall nearby structures obstruct the view of the satellites. In our work, we use omnidirectional vision-based sensors to complement GPS and odometry and provide accurate localization. We also present some novel Monte Carlo Localization optimizations and we introduce the concept of online knowledge acquisition and integration presenting a framework able to perform long term robot localization in real environments. The vision system identifies prominent features in the scene and matches them with a database of geo-referenced features already known (with a partial coverage of the environment and using both directional and omnidirectional images and with different resolutions) or learned and integrated during the localization process (omnidirectional images only). Results of successful robot localization in the old town of Fermo are presented. The whole architecture behaves well also in long term experiments, showing a suitable and good system for real life robot applications with a particular focus on the integration of different knowledge sources. {\textcopyright} 2010 IEEE.}, doi = {10.1109/MESA.2010.5551994}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-77957367131\&partnerID=40\&md5=619cedb309b8e6e9bf546721e565a793}, author = {Emanuele Frontoni and Ascani, A. and Adriano Mancini and Primo Zingaretti} } @article {Cesetti2010233, title = {A Vision-based guidance system for UAV navigation and safe landing using natural landmarks}, journal = {Journal of Intelligent and Robotic Systems: Theory and Applications}, volume = {57}, number = {1-4}, year = {2010}, note = {cited By 45}, pages = {233-257}, abstract = {In this paper a vision-based approach for guidance and safe landing of an Unmanned Aerial Vehicle (UAV) is proposed. The UAV is required to navigate from an initial to a final position in a partially known environment. The guidance system allows a remote user to define target areas from a high resolution aerial or satellite image to determine either the waypoints of the navigation trajectory or the landing area. A feature-based image-matching algorithm finds the natural landmarks and gives feedbacks to an onboard, hierarchical, behaviour-based control system for autonomous navigation and landing. Two algorithms for safe landing area detection are also proposed, based on a feature optical flow analysis. The main novelty is in the vision-based architecture, extensively tested on a helicopter, which, in particular, does not require any artificial landmark (e.g., helipad). Results show the appropriateness of the vision-based approach, which is robust to occlusions and light variations. {\textcopyright} 2009 Springer Science+Business Media B.V.}, doi = {10.1007/s10846-009-9373-3}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84893415343\&partnerID=40\&md5=1af91a05c14c688e97131ab24f43e2bd}, author = {Cesetti, A. and Emanuele Frontoni and Adriano Mancini and Primo Zingaretti and Sauro Longhi} } @conference {Ascani2010415, title = {Wireless sensor network for exhausted oil collection management}, booktitle = {Proceedings of 2010 IEEE/ASME International Conference on Mechatronic and Embedded Systems and Applications, MESA 2010}, year = {2010}, note = {cited By 1}, pages = {415-420}, abstract = {Sensor networks have a large diffusion in several areas, using different kind of sensors, different line for data transmission and different ways for their collection and management. In this paper we present a particular typology of sensor network, created for optimally manage the collection of exhausted oil stored in particular bins located in a wide area (about 20.000 km2). A sensor is located on each bin measuring the distance from the lid of the collected oil; a transmission module communicate, once a day, the oil and the battery level to a web server via GPRS. In this way the control center (on a web server) has a complete overview of the situation in a wide zone, in way to optimize the run of collectors only in points that require an operation. {\textcopyright} 2010 IEEE.}, doi = {10.1109/MESA.2010.5551992}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-77957355959\&partnerID=40\&md5=623731d74fba78b45e4b811bf63ec2b9}, author = {Ascani, A. and Emanuele Frontoni and Adriano Mancini and Primo Zingaretti} } @article {Mancini2009307, title = {A framework for simulation and testing of UAVs in cooperative scenarios}, journal = {Journal of Intelligent and Robotic Systems: Theory and Applications}, volume = {54}, number = {1-3 SPEC. ISS.}, year = {2009}, note = {cited By 18}, pages = {307-329}, abstract = {Today, Unmanned Aerial Vehicles (UAVs) have deeply modified the concepts of surveillance, Search\&Rescue, aerial photogrammetry, mapping, etc. The kinds of missions grow continuously; missions are in most cases performed by a fleet of cooperating autonomous and heterogeneous vehicles. These systems are really complex and it becomes fundamental to simulate any mission stage to exploit benefits of simulations like repeatability, modularity and low cost. In this paper a framework for simulation and testing of UAVs in cooperative scenarios is presented. The framework, based on modularity and stratification in different specialized layers, allows an easy switching from simulated to real environments, thus reducing testing and debugging times, especially in a training context. Results obtained using the proposed framework on some test cases are also reported. {\textcopyright} 2008 Springer Science+Business Media B.V.}, doi = {10.1007/s10846-008-9268-8}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-56649092912\&partnerID=40\&md5=dc161c1b0a0533432e58fcdf588aec40}, author = {Adriano Mancini and Cesetti, A. and Iual{\`e}, A. and Emanuele Frontoni and Primo Zingaretti and Sauro Longhi} } @article {Zingaretti2009500, title = {A hybrid approach to land cover classification from multi spectral images}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {5716 LNCS}, year = {2009}, note = {cited By 5}, pages = {500-508}, abstract = {This work is part of a wider project whose general objective is to develop a methodology for the automatic classification, based on CORINE land-cover (CLC) classes, of high resolution multispectral IKONOS images. The specific objective of this paper is to describe a new methodology for producing really exploitable results from automatic classification algorithms. Input data are basically constituted by multispectral images, integrated with textural and contextual measures. The output is constituted by an image with each pixel assigned to one out of 15 classes at the second level of the CLC legend or let unclassified (somehow a better solution than a classification error), plus a stability map that helps users to separate the regions classified with high accuracy from those whose classification result should be verified before being used. {\textcopyright} 2009 Springer Berlin Heidelberg.}, doi = {10.1007/978-3-642-04146-4_54}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-76249123810\&partnerID=40\&md5=fcaeb5e46615d9bbdf96fcbf4ffac41a}, author = {Primo Zingaretti and Emanuele Frontoni and Eva Savina Malinverni and Adriano Mancini} } @conference {Zingaretti2009185, title = {Particle clustering to improve omnidirectional localization in outdoor environments}, booktitle = {Proceedings of the ASME Design Engineering Technical Conference}, volume = {3}, year = {2009}, note = {cited By 0}, pages = {185-192}, abstract = {Monte Carlo Localization (MCL) is a common method for self-localization of a mobile robot under the assumption that a map of the environment is available. In addition to laser scanners and sonar sensors, localization approaches using vision sensors have also been recently developed with good results. In this paper we present two variations to improve the standard implementation of the MCL algorithm. The first change consists in a new strategy for the generation of particles, both at the initialization and at the resampling stage, which tries to generate new particles near the position of images in the learning dataset or in the neighborhood of particles with higher weights in the previous estimate, respectively. The second variation is related to a new approach to the estimate of the robot position, now based on two steps: clustering of particles and taking as estimate of robot position the center of the cluster, computed as a weighted sum of particle weights, with higher weight. The improved MCL algorithm described in this paper is compared with the standard MCL algorithm in terms of localization accuracy. In particular, tests were performed using local feature matching of omnidirectional images implemented on a real robot system operating in large outdoor environments with high dynamic content. Obtained results show that the localization accuracy of the improved MCL algorithm is more than twice that of the standard MCL algorithm. {\textcopyright} 2009 by ASME.}, doi = {10.1115/DETC2009-87373}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-82155166811\&partnerID=40\&md5=46091a6bd1c837a8652000dffdfa0861}, author = {Primo Zingaretti and Ascani, A. and Adriano Mancini and Emanuele Frontoni} } @conference {Mancini20092544, title = {RoboBuntu: A linux distribution for mobile robotics}, booktitle = {Proceedings - IEEE International Conference on Robotics and Automation}, year = {2009}, note = {cited By 2}, pages = {2544-2549}, abstract = {During last years Linux started to climb the market of operating systems (OSs), and Ubuntu, derived by Debian OS, has become a good alternative to common OSs like Windows XP or Vista. The mobile robotics scientific community makes use of Linux based OSs to avoid the lack of stability that affects Microsoft OSs, especially when real time conditions must be satisfied. In this paper we present the Linux distribution RoboBuntu, acronym formed by the union of ROBOt and uBUNTU, to overcome the almost totally independent robotic software platforms existing today. The key idea behind RoboBuntu is the integration of different tools for mobile robotics into an embedded Ubuntu distribution. Another important characteristics of RoboBuntu is that every "hard step", like installation and configuration of OS and tools, is hidden to common users. In particular, RoboBuntu can be used either by students or researchers, as LiveCd, permanent installation on standard hard drive or, more interesting, on aUSB storage flash disk. {\textcopyright} 2009 IEEE.}, doi = {10.1109/ROBOT.2009.5152548}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-70350399144\&partnerID=40\&md5=7ad3e306e93a56d841f7c4fdaf250a03}, author = {Adriano Mancini and Emanuele Frontoni and Ascani, A. and Primo Zingaretti} } @conference {Cesetti2009, title = {A single-camera feature-based vision system for helicopter autonomous landing}, booktitle = {2009 International Conference on Advanced Robotics, ICAR 2009}, year = {2009}, note = {cited By 4}, abstract = {In this paper a feature based single camera vision system for the safe landing of an Unmanned Aerial Vehicle (UAV) is proposed. The autonomous helicopter used for tests is required to navigate from an initial to a final position in a partially known environment, to locate a landing area and to land on it. The algorithm proposed for the detection of safe landing areas is based on the analysis of optical flow and of mutual geometric position of different kinds of features, observed from different points of view. Vision allows estimating the position and velocity of a set of features with respect to the helicopter while the onboard, hierarchical, behavior-based control system autonomously guides the helicopter. Results, obtained using real data and a real helicopter in a outdoor scenario, show the appropriateness of the vision-based approach. It does not require any artificial landmark (e.g., helipad), is able to estimate correctly and autonomously safe landing areas and is quite robust to occlusions.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-70449334565\&partnerID=40\&md5=3373a2496d17d58330e7a887f552c6ba}, author = {Cesetti, A. and Emanuele Frontoni and Adriano Mancini and Primo Zingaretti and Sauro Longhi} } @conference {Frontoni2009, title = {Stability maps for really exploitable automatic classification results}, booktitle = {2009 17th International Conference on Geoinformatics, Geoinformatics 2009}, year = {2009}, note = {cited By 1}, abstract = {The paper describes a new methodology for producing really exploitable results from automatic classification algorithms. The output of these algorithms is usually constituted by an image with each region assigned to one out of n classes. If the end user, on the basis of results obtained from a control set provided with a ground truth, simply knows that classification over the whole dataset can be considered correct at, for example, 85\% (s)he cannot know where correct and erroneously classified regions are really located in the whole dataset. Obviously, the result obtained can be exploited to effectively compute global indexes over the dataset, but it cannot be used as a thematic map. Thus, in addition to the assignment of a class to each region we propose an approach that provides a stability map, a binary image that separates regions (S) classified with high accuracy from those (U) whose classification result should be verified before being used. Two further benefits derive from the construction of the stability map: the control set can be used to set up a good threshold for binarizing the stability map (that is, a threshold by which all regions S are effectively correctly classified); unreliable regions U can help the end user to identify principal causes of (types of regions leading to) misclassification and corresponding (fuzzy, neural, rule based, etc.) approaches to overcome them.}, doi = {10.1109/GEOINFORMATICS.2009.5293443}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-74349085598\&partnerID=40\&md5=9cde6053a2e8199444209988fccd5e2c}, author = {Emanuele Frontoni and A. Bernardini and Eva Savina Malinverni and Adriano Mancini and Primo Zingaretti} } @conference {Cesetti2009910, title = {Vision-based autonomous navigation and landing of an unmanned aerial vehicle using natural landmarks}, booktitle = {2009 17th Mediterranean Conference on Control and Automation, MED 2009}, year = {2009}, note = {cited By 13}, pages = {910-915}, abstract = {This paper presents the design and implementation of a vision-based navigation and landing algorithm for an autonomous helicopter. The vision system allows to define target areas from a high resolution aerial or satellite image to determine the waypoints of the navigation trajectory or the landing area. The helicopter is required to navigate from an initial position to a final position in a partially known environment using GPS and vision, to locate a landing target (a helipad of a known shape or a natural landmark) and to land on it. The vision system, using a feature-based image matching algorithm, finds the area and gives feedbacks to the control system for autonomous landing. Vision is used for accurate target detection, recognition and tracking. The helicopter updates its landing target parameters owing to vision and uses an on board behavior-based controller to follow a path to the landing site. Results show the appropriateness of the vision-based approach that does not require any artificial landmark (e.g., helipad) and is quite robust to occlusions, light variations and seasonal changes (e.g., brown or green leaves). {\textcopyright} 2009 IEEE.}, doi = {10.1109/MED.2009.5164661}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84885338997\&partnerID=40\&md5=e544d46c421bd62be9c0ad9fa4ac016b}, author = {Cesetti, A. and Emanuele Frontoni and Adriano Mancini and Primo Zingaretti and Sauro Longhi} } @conference {Mancini2009, title = {A winner takes all mechanism for automatic object extraction from multi-source data}, booktitle = {2009 17th International Conference on Geoinformatics, Geoinformatics 2009}, year = {2009}, note = {cited By 3}, abstract = {Automatic object extraction from multi-source aerial data is a desirable property for many activities, such as detecting 3D city model changes or updating road databases. This paper applies the Winner Takes All (WTA) mechanism, derived from other research fields, to combine the benefits of pixel and region classification. We fuse LiDAR data and multi-spectral high-resolution images to generate the set of features used by boosted classifiers to detect buildings, trees, bare land and grass. The main benefit of region based classification is that it removes the sensibility to noise of pixel based classifiers. The WTA approach is useful especially when pixel based approaches leave many pixels unclassified; typical cases are borders of building roofs or thin canopies, where LiDAR data are often noisy. Results in an urban environment using high-resolution LiDAR and multi-spectral data are presented comparing the performance of pixel, region and WTA approaches.}, doi = {10.1109/GEOINFORMATICS.2009.5293425}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-74349100965\&partnerID=40\&md5=d8aa4248c0d2835fde5d5a907e76ad8d}, author = {Adriano Mancini and Emanuele Frontoni and Primo Zingaretti} } @conference {Zingaretti2008227, title = {Autonomous helicopter for surveillance and security}, booktitle = {2007 Proceedings of the ASME International Design Engineering Technical Conferences and Computers and Information in Engineering Conference, DETC2007}, volume = {4}, year = {2008}, note = {cited By 0}, pages = {227-234}, abstract = {Unmanned Aerial Vehicles represent today an advanced and complex robotics platform for novel tasks. For example, UAVs can be used in applications for traffic monitoring and surveillance, emergency services assistance, photogrammetry and surveying. Generally, an UAV must be fully autonomous; autonomy is accomplished by a complex interconnection of systems related to a wide range of topics, e.g., flight low level control, navigation and task-based planning, elaboration of sensor signals, software architecture for reactive behaviours, communication. Today the challenge is the ability to insert UAVs in a cooperative network based on autonomous agents as UAV, UGV (Unmanned Ground Vehicle) to accomplish a specific task a priori defined. In this paper we introduce a prototype of autonomous aerial vehicle, the Helibot helicopter, specifically designed for applications as surveillance and security. Copyright {\textcopyright} 2007 by ASME.}, doi = {10.1115/DETC2007-35427}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-44849089281\&partnerID=40\&md5=722c3bbead7b4107ed7b860dd3c492b3}, author = {Primo Zingaretti and Adriano Mancini and Emanuele Frontoni and Monteri{\`u}, A. and Sauro Longhi} } @conference {Ascani20083933, title = {Feature group matching for appearance-based localization}, booktitle = {2008 IEEE/RSJ International Conference on Intelligent Robots and Systems, IROS}, year = {2008}, note = {cited By 24}, pages = {3933-3938}, abstract = {Local feature matching has become a commonly used method to compare images. For mobile robots, a reliable method for comparing images can constitute a key component for localization tasks. In this paper, we address the issues of appearance-based topological and metric localization by introducing a novel group matching approach to select less but more robust features to match the current robot view with reference images. Feature group matching is based on the consideration that feature descriptors together with spatial relations are more robust than classical approaches. Our datasets, each consisting of a large number of omnidirectional images, have been acquired over different day times (different lighting conditions) both in indoor and outdoor environments. The feature group matching outperforms the SIFT in indoor localization showing better performances both in the case of topological and metric localization. In outdoor SURF remains the best feature extraction method, as reported in literature. {\textcopyright}2008 IEEE.}, doi = {10.1109/IROS.2008.4651023}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-69549135915\&partnerID=40\&md5=26b54613d0a4e016f6a3b8bec2face46}, author = {Ascani, A. and Emanuele Frontoni and Adriano Mancini and Primo Zingaretti} } @article {Cesetti200817, title = {From simulated to real scenarios: A framework for multi-UAVs}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {5325 LNAI}, year = {2008}, note = {cited By 2}, pages = {17-28}, abstract = {In this paper a framework for simulation of Unmanned Aerial Vehicles (UAVs), oriented to rotary wings aerial vehicles, is presented. It allows UAV simulations for stand-alone agents or multi-agents exchanging data in cooperative scenarios. The framework, based on modularity and stratification in different specialized layers, allows an easy switching from simulated to real environments, thus reducing testing and debugging times. CAD modelling supports the framework mainly with respect to extraction of geometrical parameters and virtualization. Useful applications of the framework include pilot training, testing and validation of UAVs control strategies, especially in an educational context, and simulation of complex missions. {\textcopyright} 2008 Springer Berlin Heidelberg.}, doi = {10.1007/978-3-540-89076-8-6}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-58049115678\&partnerID=40\&md5=8301ffc2a6a97aefca4df012079ad726}, author = {Cesetti, A. and Adriano Mancini and Emanuele Frontoni and Primo Zingaretti and Sauro Longhi} } @conference {Ascani2008576, title = {Robot localization using omnidirectional vision in large and dynamic outdoor environments}, booktitle = {2008 IEEE/ASME International Conference on Mechatronics and Embedded Systems and Applications, MESA 2008}, year = {2008}, note = {cited By 3}, pages = {576-581}, abstract = {Local feature matching has become a commonly used method to compare images. For mobile robots, a reliable method for comparing images can constitute a key component for localization tasks. In this paper we present a mobile robot localization system based on local feature matching of omnidirectional images. In particular, we address the issues of appearance-based topological localization by comparing common feature-extractor methods (SIFT and SURF) to select robust features to match the current robot view with reference images. Our datasets, each consisting of a large number of omnidirectional images, have been acquired over different day times (different lighting conditions) and dynamic content in large outdoor environments (over 80.000 m2). Two different approaches (WTA and MCL) were used to evaluate performances, which, in general, are satisfactory. In particular, the use of Monte Carlo particle filtering improves topological localization results for all datasets with all algorithms. {\textcopyright} 2008 IEEE.}, doi = {10.1109/MESA.2008.4735695}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-60749133774\&partnerID=40\&md5=71ab60c63c96799064ca0bbb23fb7656}, author = {Ascani, A. and Emanuele Frontoni and Adriano Mancini and Primo Zingaretti} } @conference {Frontoni2007117, title = {Adaptive and fast scale invariant feature extraction}, booktitle = {Proceedings of the 1st International Workshop on Robot Vision; In Conjunction with VISAPP 2007}, year = {2007}, note = {cited By 6}, pages = {117-125}, abstract = {The Scale Invariant Feature Transform, SIFT, has been successfully applied to robot vision, object recognition, motion estimation, etc. Still, the parameter settings are not fully investigated, especially when dealing with variable lighting conditions. In this work, we propose a SIFT improvement that allows feature extraction and matching between images taken under different illumination. Also an interesting approach to reduce the SIFT computational time is presented. Finally, results of robot vision based localization experiments using the proposed approach are presented.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-58149132302\&partnerID=40\&md5=a3a9d9ab504182d6ad9d69f9dfbe0344}, author = {Emanuele Frontoni and Primo Zingaretti} } @conference {Zingaretti2007273, title = {Automatic extraction of LIDAR data classification rules}, booktitle = {Proceedings - 14th International conference on Image Analysis and Processing, ICIAP 2007}, year = {2007}, note = {cited By 4}, pages = {273-278}, abstract = {LIDAR (Light Detection And Ranging) data are a primary data source for digital terrain model (DTM) generation and 3D city models. This paper presents an AdaBoost algorithm for the identification of rules for the classification of raw LIDAR data mainly as buildings, ground and vegetation. First raw data are filtered, interpolated over a grid and segmented. Then geometric and topological relationships among regions resulting from segmentation constitute the input to the tree-structured classification algorithm. Results obtained on data sets gathered over the town of Pavia (Italy) are compared with those obtained by a rule-based approach previously presented by the authors for the classification of the regions. {\textcopyright} 2007 IEEE.}, doi = {10.1109/ICIAP.2007.4362791}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-48149101919\&partnerID=40\&md5=6578b3da6a7d08ffa58b7e9b97f98d95}, author = {Primo Zingaretti and Emanuele Frontoni and G. Forlani and C. Nardinocchi} } @conference {Zingaretti2007, title = {Comparison and fusion of vision and range measurements for robot pose estimation}, booktitle = {2007 Mediterranean Conference on Control and Automation, MED}, year = {2007}, note = {cited By 0}, abstract = {Multiple sensor fusion for robot pose estimation has attracted a lot of interest in recent years. Monte Carlo Localization (MCL) is a common method for self-localization of a mobile robot under the assumption that a map of the environment is available. In this paper we first compare pure vision-based with sonar-based MCL approaches in terms of localization accuracy, and then we show how the fusion of vision and range measurements improves the overall accuracy. Experiments were performed in an environment with high perceptual aliasing like our department corridors. They demonstrated that fusing simple and computationally inexpensive sensory information, coming from omnidirectional cameras and sonar sensors, can allow a mobile robot to precisely locate itself. {\textcopyright}2007 IEEE.}, doi = {10.1109/MED.2007.4433854}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-50249102768\&partnerID=40\&md5=fbd623d1f43921d58130364b7f5c60ec}, author = {Primo Zingaretti and Emanuele Frontoni} } @conference {Frontoni2007, title = {Prototype UAV helicopter working in cooperative environments}, booktitle = {IEEE/ASME International Conference on Advanced Intelligent Mechatronics, AIM}, year = {2007}, note = {cited By 7}, abstract = {Today small autonomous helicopters offer a low budget platform for aerial applications such as surveillance (both military and civilian), land management, and earth science. In this paper we introduce a prototype of autonomous aerial vehicle, the Helibot helicopter, specifically designed for applications in a cooperative network based on autonomous agents as UAV and UGV. We present our scalable and robust architecture, focusing in particular on hardware and real time solutions. A mechanical structure for safe testing is then presented. {\textcopyright}2007 IEEE.}, doi = {10.1109/AIM.2007.4412564}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-48649092977\&partnerID=40\&md5=79d155f3eb244add87a4d8ba5d7616ed}, author = {Emanuele Frontoni and Adriano Mancini and Caponetti, F. and Primo Zingaretti and Sauro Longhi} } @conference {Mancini2007, title = {Safe flying for an UAV helicopter}, booktitle = {2007 Mediterranean Conference on Control and Automation, MED}, year = {2007}, note = {cited By 9}, abstract = {Today small autonomous helicopters offer a low budget platform for aerial applications such as surveillance (both military and civil), land management and earth sciences. In this paper we introduce a prototype of autonomous aerial vehicle, the Helibot helicopter, specifically designed for applications in cooperative networks. Fundamental steps in the design process of an UAV are shown. We also present work in progress in the field of failure detection and a novel idea to the problem of failure recovery using a terrain vision system. {\textcopyright}2007 IEEE.}, doi = {10.1109/MED.2007.4433946}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-50249099214\&partnerID=40\&md5=290b41c35e2f9a3766073505671d036a}, author = {Adriano Mancini and Caponetti, F. and Monteri{\`u}, A. and Emanuele Frontoni and Primo Zingaretti and Sauro Longhi} } @conference {Zingaretti2007, title = {Vision and sonar sensor fusion for mobile robot localization in aliased environments}, booktitle = {Proceedings of the 2nd IEEE/ASME International Conference on Mechatronic and Embedded Systems and Applications, MESA 2006}, year = {2007}, note = {cited By 9}, abstract = {Monte Carlo Localization (MCL) is a common method for self-localization of a mobile robot under the assumption that a map of the environment is available. Original implementations used range sensors like laser scanners and sonar sensors. Recently, localization approaches using vision sensors have been developed with good results. In this paper we compare vision-based with sonar-based MCL approaches in terms of localization accuracy. In particular, we show how in an environment with high perceptual aliasing like our department both approaches bear certain weaknesses while by combining vision and sonar sensors the respective localization errors decrease and overall accuracy is improved.}, doi = {10.1109/MESA.2006.296971}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-44949256550\&partnerID=40\&md5=22fa07327960746e83a4f0b3c0ce5f46}, author = {Primo Zingaretti and Emanuele Frontoni} } @conference {Frontoni2007, title = {Vision based approach for active selection of robot{\textquoteright}s localization action}, booktitle = {2007 Mediterranean Conference on Control and Automation, MED}, year = {2007}, note = {cited By 4}, abstract = {The paper presents a mobile robot localization system that integrates Monte-Carlo Localization (MCL) with an active action-selection approach based on an aliasing map. The main novelties of the approach are: the off-line evaluation of the perceptual aliasing of the environment; the use of this knowledge to actively perform the localization processes; the use of an improved SIFT feature extractor to aliasing map evaluation and to measure image similarity. Results, obtained in a real scenario using a real robot, show improved performances in the number of steps needed to correctly localize the robot and in the localization error, compared with the classic MCL approach. Also better performances in computational time due to improvements in the vision system are shown. {\textcopyright}2007 IEEE.}, doi = {10.1109/MED.2007.4433677}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-50249111061\&partnerID=40\&md5=c6e81b44708accfe6472fb11aa06e510}, author = {Emanuele Frontoni and Adriano Mancini and Primo Zingaretti} } @conference {Frontoni2007197, title = {Visual feature group matching for autonomous robot localization}, booktitle = {Proceedings - 14th International conference on Image Analysis and Processing, ICIAP 2007}, year = {2007}, note = {cited By 2}, pages = {197-202}, abstract = {The Scale Invariant Feature Transform, SIFT, has been successfully applied to robot vision, object recognition, motion estimation, etc. In this work, we propose a SIFT improvement that makes feature extraction and matching more robust, adding a feature group matching layer, which takes into account mutual spatial relations between features. The feature group matching is very fast to be computed and leads to interesting results, above all for the absence of outliers. Results of vision based robot localization using the proposed approach are presented. {\textcopyright} 2007 IEEE.}, doi = {10.1109/ICIAP.2007.4362779}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-48149104519\&partnerID=40\&md5=65dcfdb3e59bdf9ea8eaa2c5d0165770}, author = {Emanuele Frontoni and Primo Zingaretti} } @article {Frontoni2006855, title = {Aliasing maps for robot global localization}, journal = {Frontiers in Artificial Intelligence and Applications}, volume = {141}, year = {2006}, note = {cited By 0}, pages = {855-856}, abstract = {In this paper we present a mobile robot localization system that integrates Monte-Carlo localization with an active action-selection approach based on an aliasing map. The main novelty of the approach is in the off-line evaluation of the perceptual aliasing of the environment and in the use of this knowledge to perform localization processes faster and better. Preliminary results show improved performances compared with the classic Monte-Carlo localization approach. {\textcopyright} 2006 The authors.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84886063653\&partnerID=40\&md5=cd93b50250c277363610aeec65edcaec}, author = {Emanuele Frontoni and Primo Zingaretti} } @article {Zingaretti200659, title = {Appearance-based robotics}, journal = {IEEE Robotics and Automation Magazine}, volume = {13}, number = {1}, year = {2006}, note = {cited By 22}, pages = {59-68}, abstract = {A novel appearance-based framework for active robot localization in partially explored environments is introduced. It provides qualitative measurements of the position of the robot, thus monitoring the progress of the overall task. The chromatic and spatial attributes of the color sets extracted from snapshots of the environment are used together with a stochastic evaluator based on partially observable Markov decision process (POMPD). Also, robot localization is performed without using explicit object models. The robustness of the appearance-based framework is demonstrated by a long series of experiments in each of the three environments with different characteristics and with different percentages of knowledge acquired during the visual tour. The proposed metric, based on color sets and weighted walkthrough, easily allows several variations to the similarity of the evaluation.}, doi = {10.1109/MRA.2006.1598054}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-33644674351\&partnerID=40\&md5=0399cae997333eacc85cce4d0f987671}, author = {Primo Zingaretti and Emanuele Frontoni} } @article {Forlani2006357, title = {Complete classification of raw LIDAR data and 3D reconstruction of buildings}, journal = {Pattern Analysis and Applications}, volume = {8}, number = {4}, year = {2006}, note = {cited By 68}, pages = {357-374}, abstract = {LIDAR (LIght Detection And Ranging) data are a primary data source for digital terrain model (DTM) generation and 3D city models. This paper presents a three-stage framework for a robust automatic classification of raw LIDAR data as buildings, ground and vegetation, followed by a reconstruction of 3D models of the buildings. In the first stage the raw data are filtered and interpolated over a grid. In the second stage, first a double raw data segmentation is performed and then geometric and topological relationships among regions resulting from segmentation are computed and stored in a knowledge base. In the third stage, a rule-based scheme is applied for the classification of the regions. Finally, polyhedral building models are reconstructed by analysing the topology of building outlines, building roof slopes and eaves lines. Results obtained on data sets with different ground point density, gathered over the town of Pavia (Italy) with Toposys and Optech airborne laser scanning systems, are shown to illustrate the effectiveness of the proposed approach.}, doi = {10.1007/s10044-005-0018-2}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-31944451659\&partnerID=40\&md5=55cd07722f1f7f4286ba71b656517b77}, author = {G. Forlani and C. Nardinocchi and M. Scaioni and Primo Zingaretti} } @article {Zingaretti2006719, title = {Editorial}, journal = {Robotics and Autonomous Systems}, volume = {54}, number = {9}, year = {2006}, note = {cited By 0}, pages = {719-720}, doi = {10.1016/j.robot.2006.05.003}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-33746996946\&partnerID=40\&md5=ce0b91c35f45a2ec67a8e750da61b537}, author = {Primo Zingaretti} } @article {Frontoni2006750, title = {An efficient similarity metric for omnidirectional vision sensors}, journal = {Robotics and Autonomous Systems}, volume = {54}, number = {9}, year = {2006}, note = {cited By 5}, pages = {750-757}, abstract = {This paper presents an efficient metric for the computation of the similarity among omnidirectional images (image matching). The representation of image appearance is based on feature vectors that include both the chromatic attributes of color sets and their mutual spatial relationships. The proposed metric fits well to robotic navigation using omnidirectional vision sensors, because it has very important properties: it is reflexive, compositional and invariant with respect to image scaling and rotation. The robustness of the metric was repeatedly tested using omnidirectional images for a robot localization task in a real indoor environment. {\textcopyright} 2006 Elsevier Ltd. All rights reserved.}, doi = {10.1016/j.robot.2006.04.014}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-33747016764\&partnerID=40\&md5=2b43b2f1410d76936beb09d967368bee}, author = {Emanuele Frontoni and Primo Zingaretti} } @conference {Frontoni2006, title = {Fast mobile robot localization using low cost sensors}, booktitle = {IFAC Proceedings Volumes (IFAC-PapersOnline)}, volume = {8}, number = {PART 1}, year = {2006}, note = {cited By 2}, abstract = {Bayesian filtering is a well known probabilistic filtering method. Its applications to mobile robot localization are very popular, but an active approach to the problem of localization was never presented. An interesting question is: what is the best action that the robot should choose to localize itself in the minimum number of steps? This paper presents the Fast Particle Filtering (FPF) algorithm to select the best action that allows a fast global localization using particle filtering. The appropriateness of our approach is demonstrated empirically using a mobile robot equipped with low cost sonar sensors in a structured office environment. Comparisons with classical Bayesian filtering approaches are also presented to demonstrate the better performances and the lower computational cost of the FPF algorithm. Copyright {\textcopyright} 2006 IFAC.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-80051600774\&partnerID=40\&md5=b6c6e42ff4df40128cbe9bf76d128f44}, author = {Emanuele Frontoni and Adriano Mancini and Caponetti, F. and Primo Zingaretti} } @conference {Frontoni2006, title = {A framework for simulations and tests of mobile robotics tasks}, booktitle = {14th Mediterranean Conference on Control and Automation, MED{\textquoteright}06}, year = {2006}, note = {cited By 13}, abstract = {This paper presents an education framework, developed in Matlab, for studying and experimenting typical mobile robotics tasks such as obstacle avoidance, localization, navigation and SLAM. The most important characteristic of this framework is the ability to easily switch from a simulator to a real robot to tune and test algorithms and to evaluate results in simulated and real environments. The framework is being used with interesting results in robotic courses at the Universit{\`a} Politecnica delle Marche in Ancona, Italy. In the second part of the paper a test case to evaluate an optimization of a Monte Carlo Localization process with sonar sensors is presented.}, doi = {10.1109/MED.2006.328842}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-35949002522\&partnerID=40\&md5=c56db964d779bed90b4aff91801d06cf}, author = {Emanuele Frontoni and Adriano Mancini and Caponetti, F. and Primo Zingaretti} } @conference {Frontoni2006249, title = {Retrieval by visual content in image databases}, booktitle = {SEBD 2006 - Proceedings of the 14th Italian Symposium on Advanced Databases Systems}, year = {2006}, note = {cited By 0}, pages = {249-256}, abstract = {Large amounts of pictures and videos are created, published, transmitted and accessed everyday by corporations and the general public for different uses, from entertainment to mobile robotics. Multimedia content has become a vital enterprise asset and visual content management has emerged as a strategic necessity. Content-based indexing of visual databases is a key technology for representing huge databases of images. A novel image retrieval technique, based on the Polar Weighted Walktrough (PWW) representation, is presented and encouraging results of searching by content in image databases are shown. The proposed metric, has very important properties: it is reflexive, invariant with respect to image scaling, compositional and, in particular, invariable to rotations.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84893300087\&partnerID=40\&md5=63338b15b124ee548b38cd8c308dab03}, author = {Emanuele Frontoni and Primo Zingaretti} } @conference {Frontoni2005347, title = {A vision based algorithm for active robot localization}, booktitle = {Proceedings of IEEE International Symposium on Computational Intelligence in Robotics and Automation, CIRA}, year = {2005}, note = {cited By 10}, pages = {347-352}, abstract = {This paper describes a method to localize a mobile robot in a structured indoor environment from visual information provided by omnidirectional images. In particular, an approximate algorithm for active robot localization in partially observable environments based on a POMDP model is introduced. Results from many different tests are presented to prove the setting of algorithm parameters. They show that a localization accuracy of less than 25 cm can be achieved even with a partial knowledge of the environment Besides, the proposed approximated algorithm reduces the computational cost without degrading results. {\textcopyright} 2005 IEEE.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-28444487920\&partnerID=40\&md5=0f5de1c29fccabb1de7c4b94d2a750d0}, author = {Emanuele Frontoni and Primo Zingaretti} } @conference {Zingaretti2001113, title = {Image segmentation for appearance-based self-localisation}, booktitle = {Proceedings - 11th International Conference on Image Analysis and Processing, ICIAP 2001}, year = {2001}, note = {cited By 4}, pages = {113-118}, abstract = {The paper describes a segmentation technique that well fits to an appearance-based self-localisation. In an appearance-based approach robot positioning is performed without using explicit object models. The choice of the representation of image appearances is fundamental. We use image-domain features, as opposed to interpreted characteristics of the scene, and we adopt feature vectors including both the chromatic attributes of colour sets and their mutual spatial relationships. To obtain the colour sets we perform image segmentation by autothresholding the colour histograms and taking into account what the results are addressed to. The experimental results indicate that the method performs well for a variety of environments. {\textcopyright} 2001 IEEE.}, doi = {10.1109/ICIAP.2001.956994}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-28444487236\&partnerID=40\&md5=bcd769625c20a95b5d9454e37805508a}, author = {Primo Zingaretti and Bossoletti, L.} } @conference {Carbonaro1999241, title = {A comprehensive approach to image-contrast enhancement}, booktitle = {Proceedings - International Conference on Image Analysis and Processing, ICIAP 1999}, year = {1999}, note = {cited By 3}, pages = {241-246}, abstract = {The paper describes a novel comprehensive approach to image-contrast enhancement in the spatial domain. Instead of defining another transformation function our strategy consists of adopting a general functional form, able to map different transformation functions, and in using a learning technique to select the parameter values that are optimal for the image being processed. First, local measures of spatial activity are assigned to each pixel of the image. Second, the local contrast value for each pixel is computed according to a function which is based on human visual response. Third, the parameters of a comprehensive contrast-enhancement function are selected by a genetic algorithm on the basis of the spatial activity of the image resulting from the transformation. The validity of the proposed technique is confirmed both perceptually, that is, higher fitness values correspond to the images that have been judged better by human observers, and by comparative evaluations of our algorithm with respect to classical methods. {\textcopyright} 1999 IEEE.}, doi = {10.1109/ICIAP.1999.797602}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-77956062070\&partnerID=40\&md5=bec0356d2a98fdaca54e11dd4711add0}, author = {A. Carbonaro and Primo Zingaretti} } @article {Zingaretti1998407, title = {Fast chain coding of region boundaries}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, volume = {20}, number = {4}, year = {1998}, note = {cited By 32}, pages = {407-415}, abstract = {A fast single-pass algorithm to convert a multivalued image from a raster-based representation into chain codes is presented. All chain codes are obtained in linear time with respect to the number of chain segments that are generated at each raster according to a set of templates. A formal statement and the complexity and performance analysis of the algorithm are given. {\textcopyright}1998 IEEE.}, doi = {10.1109/34.677272}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0032047517\&partnerID=40\&md5=efe01ab3fa89f0ff9e24a34413091a2b}, author = {Primo Zingaretti} } @article {Zingaretti1998257, title = {Robust real-time detection of an underwater pipeline}, journal = {Engineering Applications of Artificial Intelligence}, volume = {11}, number = {2}, year = {1998}, note = {cited By 14}, pages = {257-268}, abstract = {Currently, the methods of inspection of underwater structures employ remotely operated vehicles, guided from a support vessel by human operators. The risk of losing concentration calls for the development of an intelligent vision, guidance and control system to support the human activity. The paper presents a robust system for the detection and the real-time tracking of submarine pipelines. An active vision system is proposed to predict changes in the scene, and to direct computational resources to confirm expectations by adapting the processing mode dynamically. The system originates from an image-processing algorithm that was previously developed by the authors to recognise the pipeline in the image plane. The accuracy of this algorithm has been enhanced by exploiting the temporal context in the image sequence. The disturbances on acquired images caused by motion are partially removed by a Kalman filter. The filter proves advantageous in supporting the guidance and control of the ROV, and in making the image-processing module itself more robust. Sequences of underwater images, acquired at a constant sampling frequency from T.V. cameras, are used together with synchronised navigation data to demonstrate the effectiveness of the system. {\textcopyright} 1998 Elsevier Science Ltd. All rights reserved.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0032041493\&partnerID=40\&md5=188347e23a7078459b6de61720f8fff2}, author = {Primo Zingaretti and S.M. Zanoli} } @article {Zingaretti1998177, title = {Route following based on adaptive visual landmark matching}, journal = {Robotics and Autonomous Systems}, volume = {25}, number = {3-4}, year = {1998}, note = {cited By 3}, pages = {177-184}, abstract = {Route following based on visual landmark matching may require many models to cover all different situations. This paper describes a system that is able to adapt template{\textquoteright}s modelling parameters to environmental conditions (lighting, shadows, etc.) by a genetic learning technique. In addition, the mobile robot self-localisation is obtained by a stereo approach that uses the centres of matching in the two images to solve in a simple way the correspondence problem in the 3D position estimation. The experimental results show that the tracking robustness is improved, while using a small set of templates. {\textcopyright} 1998 Elsevier Science B.V. All rights reserved.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0032208707\&partnerID=40\&md5=8655435896f671afeeb70009f9086bee}, author = {Primo Zingaretti and A. Carbonaro} } @conference {Tascini1998278, title = {Unauthorized access identification in restricted areas}, booktitle = {Proceedings of SPIE - The International Society for Optical Engineering}, volume = {3364}, year = {1998}, note = {cited By 0}, pages = {278-286}, abstract = {The paper describes a system to control vehicle accesses in restricted areas. The signalling of vehicles whose license- plates do not belong to a specific database is the aim of the system. The adaptation to different environmental conditions, and the identification of a vehicle by processing the license- plate pattern as a whole, without considering the recognition of the characters, are its two main characteristics. The system implements a recognition engine constituted by two modules. First, the system analyzes the video-recorded sequences to select a frame in which the license-plate satisfies pre-defined constraints, and extracts the license- plate template on which the matching with the model templates stored in the database will be performed. Second, vehicle identification is performed by a genetic template matching that, without requiring a high computational complexity, provides adaptation to normal environmental variations by exploiting learning capabilities. The implemented system, forced to distinguish only between authorized and unauthorized vehicles according to a threshold in the genetic fitness function, shows robust performance on Italian cars, but it is adaptable to different license-plate models, and is independent from outdoor conditions. {\textcopyright}2003 Copyright SPIE - The International Society for Optical Engineering.}, doi = {10.1117/12.317481}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-62249155465\&partnerID=40\&md5=c5bfb42de0062e33b5145339e0517adb}, author = {Guido Tascini and A. Carbonaro and Primo Zingaretti} } @conference {Zanoli199856, title = {Underwater imaging system to support ROV guidance}, booktitle = {Oceans Conference Record (IEEE)}, volume = {1}, year = {1998}, note = {cited By 1}, pages = {56-60}, abstract = {An underwater active vision system is proposed to support ROV guidance. The system exploits the temporal context in the image sequence to improve the precision in the computation of the pipeline contours, by adapting the processing mode dynamically. Disturbances of motion effect on acquired images are also partially removed by cascading a Kalman filter to an image processing module.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0032255064\&partnerID=40\&md5=ea30ad86202dc8ac07f8bdb5f8090de5}, author = {S.M. Zanoli and Primo Zingaretti} } @conference {Giansanti1997530, title = {Imaging system for retinal change evaluation}, booktitle = {IEE Conference Publication}, number = {443 pt 2}, year = {1997}, note = {cited By 1}, pages = {530-534}, abstract = {This paper concentrates on the results of a computerised approach to the automatic extraction of numerical indexes describing morphological details of the fundus oculi. The authors proposed an imaging software system with a strict interconnection between the segmentation and recognition phases. This paper presents new image processing techniques developed to take advantage of an improved imaging system constituted by a high resolution digital camera (Kodak DCS 420) connected on top of a standard retinal camera (Topcon TRC-50VT). The higher resolution (1524{\texttimes}1024 pixels) now permits a more accurate analysis of the degree of arteriolar sclerosis and vessel narrowing, both in the proximal and distal segment, and the computation of further numerical indexes, such as vessel reflectance and permeability to fluorescein.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0031339703\&partnerID=40\&md5=0a327e5d1523689f98dd675f821ff3e2}, author = {Giansanti, R. and Fumelli, P. and Passerini, G. and Primo Zingaretti} } @conference {Carbonaro1997147, title = {Landmark matching in a varying environment}, booktitle = {Proceedings of the Euromicro Workshop on Advanced Mobile Robots, EUROBOT}, year = {1997}, note = {cited By 4}, pages = {147-153}, abstract = {A system for landmark tracking by a template matching approach is described. Route following based on landmarks may require many models to cover all different situations, so a genetic algorithm learning technique is used to adapt modelling parameters to environmental conditions (lighting, shadows, reflexes, etc.) during the tracking. In addition, the mobile robot self-localization is obtained by a stereo approach that uses the centres of matching in the two images to solve in a simple way the correspondence analysis in the 3-D position estimation. The experimental results show that the tracking robustness is improved when the adaptive template matching is used for landmark tracking.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0031356261\&partnerID=40\&md5=84da6f7845bf77d41951a6da2296155c}, author = {A. Carbonaro and Primo Zingaretti} } @conference {Carbonaro1997229, title = {Object tracking in a varying environment}, booktitle = {IEE Conference Publication}, number = {443 pt 1}, year = {1997}, note = {cited By 3}, pages = {229-233}, abstract = {An object tracking system based on a template matching approach is demonstrated. The system identifies the target and tracks a sequence of video-recorded images without losing the object by using a genetic algorithm (GA)-based learning to adapt template matching processes to environmental conditions. The adaptive GA generates templates better than fixed, random or indexed template generation techniques in terms of both the cross-correlation score and time necessary to choose the template model. While the overload using the GA is minimal if it is not necessary to change template model, in all other cases the GA offers a better solution to the search the associated search and optimization problem than usual algorithms.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0031337379\&partnerID=40\&md5=91c558ca9bbaca4384ce4e7619dd71f4}, author = {A. Carbonaro and Primo Zingaretti} } @conference {Conte19961213, title = {Automatic analysis of visual data in submarine pipeline inspection}, booktitle = {Oceans Conference Record (IEEE)}, volume = {3}, year = {1996}, note = {cited By 8}, pages = {1213-1219}, abstract = {An automatic system for analysis and interpretation of visual data from submarine pipeline inspection operates using images collected by cameras mounted on an unmanned Remotely Operated Vehicle (ROV) that moves along the pipeline. The system supports the operator in navigating the ROV and in detecting structural elements of the pipeline such as anodes, gravel heaps and reference bands. The system employs a feature based technique for recognizing the pipeline{\textquoteright}s profile and for detecting the structural elements. Information gathered by analyzing and interpreting the visual data can be used for automatic guidance and inspection by integrating the system into a larger architecture.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0030385353\&partnerID=40\&md5=d5723a92ebcff100f3868adc25135935}, author = {Conte, G. and S.M. Zanoli and Perdon, A.M. and Guido Tascini and Primo Zingaretti} } @conference {Zingaretti1996129, title = {Imaging approach to real-time tracking of submarine pipeline}, booktitle = {Proceedings of SPIE - The International Society for Optical Engineering}, volume = {2661}, year = {1996}, note = {cited By 10}, pages = {129-137}, abstract = {The work presents a real-time underwater imaging system for identification and tracking of a submarine pipeline on a sequence of recorded images. The main novelty of this work relies on adopting an automatic approach that is entirely based on the analysis and interpretation of visual data, in spite of the various limitations upon the ability to image underwater objects. The analysis of the data is performed starting from image processing operations (like filtering, profile analysis, feature enhancement) implemented on a dedicated board. Then, the system employs an efficient dynamic process for recognizing the two contours of the pipeline. In each frame the system is able to determine the equations of the two straight lines corresponding to the pipeline contours. The system reaches satisfactory performances in real time operation: up to eight frames per second on a Pentium based PC. The results of this work are somewhat more meaningful as the input images were acquired by three cameras, mounted on a remotely operated vehicle travelling at one nautical mile an hour, without any attention either to illumination conditions or stability of cameras. This work is originated from the interest of Snamprogetti in enhancing the level of automation in submarine pipeline inspection.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0029754848\&partnerID=40\&md5=65001780f0cb94776ceeb1da3a6eec40}, author = {Primo Zingaretti and Guido Tascini and Paolo Puliti and S.M. Zanoli} } @article {Tascini1996432, title = {Real-time inspection by submarine images}, journal = {Journal of Electronic Imaging}, volume = {5}, number = {4}, year = {1996}, note = {cited By 5}, pages = {432-442}, abstract = {A real-time application of computer vision concerning tracking and inspection of a submarine pipeline is described. The objective is to develop automatic procedures for supporting human operators in the real-time analysis of images acquired by means of cameras mounted on underwater remotely operated vehicles (ROV). Implementation of such procedures gives rise to a human-machine system for underwater pipeline inspection that can automatically detect and signal the presence of the pipe, of its structural or accessory elements, and of dangerous or alien objects in its neighborhood. The possibility of modifying the image acquisition rate in the simulations performed on video-recorded images is used to prove that the system performs all necessary processing with an acceptable robustness working in real-time up to a speed of about 2.5 kn, widely greater than that the actual ROVs and the security features allow. {\textcopyright} 1996 SPIE and IS\&T.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0009091679\&partnerID=40\&md5=0be09172d776524c453eb82b24820faf}, author = {Guido Tascini and Primo Zingaretti and Conte, G.} } @conference {Tascini199518, title = {Model attraction in medical image object recognition}, booktitle = {Proceedings of SPIE - The International Society for Optical Engineering}, volume = {2436}, year = {1995}, note = {cited By 0}, pages = {18-29}, abstract = {This paper presents as new approach to image recognition based on a general attraction principle. A cognitive recognition is governed by a {\textquoteright}focus on attention{\textquoteright} process that concentrates on the visual data subset of task- relevant type only. Our model-based approach combines it with another process, focus on attraction, which concentrates on the transformations of visual data having relevance for the matching. The recognition process is characterized by an intentional evolution of the visual data. This chain of image transformations is viewed as driven by an attraction field that attempts to reduce the distance between the image-point and the model-point in the feature space. The field sources are determined during a learning phase, by supplying the system with a training set. The paper describes a medical interpretation case in the feature space, concerning human skin lesions. The samples of the training set, supplied by the dermatologists, allow the system to learn models of lesions in terms of features such as hue factor, asymmetry factor, and asperity factor. The comparison of the visual data with the model derives the trend of image transformations, allowing a better definition of the given image and its classification. The algorithms are implemented in C language on a PC equipped with Matrox Image Series IM-1280 acquisition and processing boards. The work is now in progress.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0029452584\&partnerID=40\&md5=181cb1d8753867d28066421ab8229a59}, author = {Guido Tascini and Primo Zingaretti} } @conference {Tascini19942378, title = {Attraction based recognition}, booktitle = {Proceedings of the IEEE International Conference on Systems, Man and Cybernetics}, volume = {3}, year = {1994}, note = {cited By 0}, pages = {2378-2383}, abstract = {An approach to image recognition is proposed, which is achieved through the correspondences between predicted and measured properties: points, lines, regions, color, shape, etc. Image parts are subjected to a series of transformations performed by the recognition process. In these transformations, called image chaining, there is a grouping of image parts or features due to a field continually perturbed by the recognition process. A fundamental role is played by the spatial correspondences together with an intentional behavior of the transformed visual data called attraction. After a definition of the attraction principle, the paper describes the attraction process, and justifies the image chaining concept. Finally, a series of processes - segmentation refinement, search space reduction, and perceptual organization - are interpreted in terms of the attraction principle.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0028742684\&partnerID=40\&md5=9925689ed71ffc413fb731f2205f4173}, author = {Guido Tascini and Primo Zingaretti} } @conference {Tascini19941722, title = {Automatic quantitative analysis of lumbar bone radiographs}, booktitle = {IEEE Nuclear Science Symposium \& Medical Imaging Conference}, number = {pt 3}, year = {1994}, note = {cited By 1}, pages = {1722-1726}, abstract = {In the current radiological diagnosis the radiographs often appear as {\textquoteleft}rough{\textquoteright} means to be understood due to the bad quality of radiographic images. The aim of the work is to guarantee a diagnostic support even in presence of radiographic means only, with a little cost increment due to a computer system, by supplying a useful primary screening tool that automatically analyzes digitized radiographs and gives important features relevant from medical standpoint. The specific domain of image analysis concerns the osteoporosis which is a long term diseases requiring an accurate measurement of bone density and a periodic follow-up. The automatic image processing method described in the work goes through a series of steps: selection of the region of interest, first preprocessing, extraction of the vertebral left side, second preprocessing, extraction of upper and lower borders of the end plates, detection of the greatest uniform area inside the vertebral body, third preprocessing, correction of image disuniformity, detection of the end plates, feature detection (variance, mean-gray, concavity, body and tissue density, etc.). The automation of the process is complete and the results good agree with those obtained from human analysis.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0028277067\&partnerID=40\&md5=7871b50c44eb8cbdb7e70759b9b2eb0d}, author = {Guido Tascini and Primo Zingaretti} } @conference {Tascini1994838, title = {Image sequence recognition}, booktitle = {Proceedings of SPIE - The International Society for Optical Engineering}, volume = {2308}, number = {p 2}, year = {1994}, note = {cited By 1}, pages = {838-847}, abstract = {Image sequence recognition is a problem that occurs in computer vision and particularly in mobile robot vision. The feature based method has been selected to solve this problem. The method first extracts the features as corners, points of curvature, lines etc. Then the correspondence of these features is established between two successive frames, and finally motion parameters and object structure from correspondences are computed. Test results demonstrating the method are included.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0028735739\&partnerID=40\&md5=2752672a7fe70d15fb9df66474f82faf}, author = {Guido Tascini and Primo Zingaretti} } @article {Antonicelli1993125, title = {Camerano study on hypertension: The problem of blood pressure variability during medical visit}, journal = {Clinical and Experimental Hypertension}, volume = {15}, number = {SUPPL. 1}, year = {1993}, note = {cited By 3}, pages = {125-138}, abstract = {The Camerano Study on Arterial Hypertension (AH) is a cross-sectional study, carried out on a wide population sample in a small town in Central Italy, and aimed at revealing the prevalence of certain characteristics of AH in the population examined. In particular, we studied some aspects of blood pressure (BP) levels during the medical visits. To evaluate the effects of the medical visit on BP levels, we divided the subjects into 3 groups: I) Hypertensive subjects, II) Treated hypertensive subjects, III) Normotensive subjects (control group). The Systolic Arterial Pressure (SAP) in normotensive subjects reached maximum levels during the first medical visit and then decreased in the following two controls (p<0.001). The Diastolic Arterial Pressure (DAP) did not show any significant changes during the three measurements (p=n.s.). Instead the maximum level of SAP in the hypertensive group did not appear at the First measurement but only after 5 minutes and was seen to decrease towards the end of the visit (p<0.001). Even DAP showed different levels compared to the normotensives: A decrease in BP levels was registered after 15 minutes respect to earlier measurements (p<0.01). The levels of group II were similar to those of normotensive subjects.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0027175329\&partnerID=40\&md5=f3e21a4965a8f2d45fca099ff52dfea1}, author = {Antonicelli, R. and Gesuita, R. and Primo Zingaretti and Amadio, L. and Pagelli, P. and Cusi, D. and Paciaroni, E.} } @conference {Tascini1993126, title = {Handwritten character recognition using background analysis}, booktitle = {Proceedings of SPIE - The International Society for Optical Engineering}, volume = {1906}, year = {1993}, note = {cited By 0}, pages = {126-133}, abstract = {The paper describes a low-cost handwritten character recognizer. It is constituted by three modules: the {\textquoteleft}acquisition{\textquoteright} module, the {\textquoteleft}binarization{\textquoteright} module, and the {\textquoteleft}core{\textquoteright} module. The core module can be logically partitioned into six steps: character dilation, character circumscription, region and {\textquoteleft}profile{\textquoteright} analysis, {\textquoteleft}cut{\textquoteright} analysis, decision tree descent, and result validation. Firstly, it reduces the resolution of the binarized regions and detects the minimum rectangle (MR) which encloses the character; the MR partitions the background into regions that surround the character or are enclosed by it, and allows it to define features as {\textquoteleft}profiles{\textquoteright} and {\textquoteleft}cuts;{\textquoteright} a {\textquoteleft}profile{\textquoteright} is the set of vertical or horizontal minimum distances between a side of the MR and the character itself; a {\textquoteleft}cut{\textquoteright} is a vertical or horizontal image segment delimited by the MR. Then, the core module classifies the character by descending along the decision tree on the basis of the analysis of regions around the character, in particular of the {\textquoteleft}profiles{\textquoteright} and {\textquoteleft}cuts,{\textquoteright} and without using context information. Finally, it recognizes the character or reactivates the core module by analyzing validation test results. The recognizer is largely insensible to character discontinuity and is able to detect Arabic numerals and English alphabet capital letters. The recognition rate of a 32 {\texttimes} 32 pixel character is of about 97\% after the first iteration, and of over 98\% after the second iteration.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0027277522\&partnerID=40\&md5=06fd7da850b665db5896240114bab4e7}, author = {Guido Tascini and Paolo Puliti and Primo Zingaretti} } @conference {Tascini1993322, title = {Retina vascular network recognition}, booktitle = {Proceedings of SPIE - The International Society for Optical Engineering}, volume = {1898}, year = {1993}, note = {cited By 5}, pages = {322-329}, abstract = {The analysis of morphological and structural modifications of the retina vascular network is an interesting investigation method in the study of diabetes and hypertension. Normally this analysis is carried out by qualitative evaluations, according to standardized criteria, though medical research attaches great importance to quantitative analysis of vessel color, shape and dimensions. The paper describes a system which automatically segments and recognizes the ocular fundus circulation and micro circulation network, and extracts a set of features related to morphometric aspects of vessels. For this class of images the classical segmentation methods seem weak. We propose a computer vision system in which segmentation and recognition phases are strictly connected. The system is hierarchically organized in four modules. Firstly the Image Enhancement Module (IEM) operates a set of custom image enhancements to remove blur and to prepare data for subsequent segmentation and recognition processes. Secondly the Papilla Border Analysis Module (PBAM) automatically recognizes number, position and local diameter of blood vessels departing from optical papilla. Then the Vessel Tracking Module (VTM) analyses vessels comparing the results of body and edge tracking and detects branches and crossings. Finally the Feature Extraction Module evaluates PBAM and VTM output data and extracts some numerical indexes. Used algorithms appear to be robust and have been successfully tested on various ocular fundus images.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0027873308\&partnerID=40\&md5=fe51e921b3d6ecbbbd0483a977269acd}, author = {Guido Tascini and Passerini, G. and Paolo Puliti and Primo Zingaretti} } @article {Antonicelli199217, title = {The camerano study on hypertension: The problem of arterial hypertension in the elderly}, journal = {Archives of Gerontology and Geriatrics}, volume = {15}, number = {SUPPL. 1}, year = {1992}, note = {cited By 3}, pages = {17-26}, abstract = {The Camerano study on arterial hypertension (AH) was a cross-sectional study, carried out on a large population sample in a small twon in central Italy. The main goal was to reveal both the prevalence and certain characteristics of AH in the population examined. The main resuts, can be summarized as follows: (i) The occurrence of AH in the old (65-74 years) and very old (>=75 years) groups was 43.3 and 57.4\%, respectively. (ii) isolated systolic hypertension (ISH) was found in 1.7, 23.6 and 3.9\% in the adult, old and very old subjects, respectively. (iii) The association of AH with some of the more common cardiovascular risk factors (dyslipidemia, hyperglycemia, obesity, etc.) was significant for all the risk factors in the adult group, while in the old group there was a significant association only with the body mass index. (iv) Blood pressure (BP) values during the medical visits were evaluated, and adult versus old subjects were compared, but no significant differences were found. {\textcopyright} 1992 Elsevier Science Pablishers B.V. All rights reserved.}, doi = {10.1016/S0167-4943(05)80003-2}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0026662064\&partnerID=40\&md5=32b7c6611b71b6ca2f62c62434cf5ae9}, author = {Antonicelli, R. and Gesuita, R. and Primo Zingaretti and Amadio, L. and Pacelli, P. and Cusi, D. and Paciaroni, E.} } @conference {Tascini1991178, title = {Decision support system for capillaroscopic images}, booktitle = {Proceedings of SPIE - The International Society for Optical Engineering}, volume = {1450}, year = {1991}, note = {cited By 1}, pages = {178-185}, abstract = {The aim of the paper is to describe a decision support system operating in the area of capillaroscopic images. The system automatically sites the capillaroscopic analyzed image into one of the following classes: normal, diabetic and sclerodermic. The automatic morphometric analysis attempts to imitate physician behaviour and requires the introduction of some particular features connected with the specific domain. These features allow achieving a symbolic representation of the capillary partitioning it into three components: apex, arteriolar and venular side. The system is hierarchically organized in two levels. The system has been successfully used for obtaining images of nailfold capillaries of human finger.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0025798390\&partnerID=40\&md5=05b1aa9225d95f1ed2cecdc0a5268c2a}, author = {Guido Tascini and Paolo Puliti and Primo Zingaretti} } @article {Gaggiotti1991203, title = {Nutritional parameters in subjects aged 60 years and over [PARAMETRI NUTRIZIONALI IN SOGGETTI CON ETA SUPERIORE A 60 ANNI]}, journal = {Rivista Italiana di Nutrizione Parenterale ed Enterale}, volume = {9}, number = {3}, year = {1991}, note = {cited By 1}, pages = {203-210}, abstract = {Nutritional assessment of the elderly presents some difficulties because of the lack of reference values. Moreover chronic age-related diseases can interfere with the physiological nutritional values. Anthropometric (triceps skinfold, arm muscle area, total body muscle mass, fat mass and Body Mass Index = BMI), biochemical (prealbumin, transferrin, ceruloplasmin, total protein, albumin) and immunological (total lymphocytes) parameters were examined in 583 subjects of over 60 years, selected following specific criteria and with BMI < 30. The t-test evaluation of all anthropometric parameters (except BMI) showed a significant difference in both age (p < 0.05) and sex (p < 0.05) values; there were also significant differences in prealbumin and ceruloplasmin levels for both sex (p < 0.05) and age (p < 0.05) and for sex only (p < 0.05) respectively. Comparing our biochemical mean values using the t-test with those from our Analysis Laboratory, we found that the ceruloplasmin and prealbumin mean values for our sample were higher (p < 0.05) than from our Laboratory, while both protein and albumin levels were lower (p < 0.05).}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0026322716\&partnerID=40\&md5=4ab2812d4bc5a00f2ce4f7bf55d073de}, author = {Gaggiotti, G. and Orlandoni, P. and Ambrosi, S. and Bartolacci, T. and Onorato, G. and Piloni, G. and Amadio, L. and Spazzafumo, L. and Primo Zingaretti and Fabris, N.} } @article {Antonicelli1989155, title = {Evaluation of efficacy and tolerability of the association Captopril 50 mg + hydrochlorothiazide 15 mg in the elderly [VALUTAZIONE DELL{\textquoteright}EFFICACIA E TOLLERABILITA DELL{\textquoteright}ASSOCIAZIONE A DOSE FISSA DI CAPTOPRIL (CPT) 50 MG E IDROCLOROTIAZIDE (HCTZ) 15 MG IN SO}, journal = {European Review for Medical and Pharmacological Sciences}, volume = {11}, number = {2}, year = {1989}, note = {cited By 0}, pages = {155-161}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0024956512\&partnerID=40\&md5=83288f643078d8c4d318448d3a1b48fc}, author = {Antonicelli, R. and Raffaelli, S. and Bossini, A. and Paciaroni, E. and Primo Zingaretti and Botta, G.F.} } @article {Palareti1988307, title = {Prolong approach to image segmentation}, journal = {Applied Artificial Intelligence}, volume = {2}, number = {3-4}, year = {1988}, note = {cited By 0}, pages = {307-331}, abstract = {Segmentation is a problem in computer vision. It attempts to supply primitives for higher-level processes of interpretation. A multithreshold approach has already given acceptable results. This paper presents a Prolog implementation of a multithreshold system. The algorithms implemented concern image representation, connected regions, identification, and contour detection. The improvement of algorithms in a logic language rather than in a procedural one appeared to be of particular interest. The main reasons for this are implementation facility and the natural use of logic programming in the field of knowledge-based systems. In fact, a knowledge-based approach seems a reasonable solution to problems of assisted image understanding processes.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0024019798\&partnerID=40\&md5=bbf1d6d0aad82eee5540c0841df3bfad}, author = {Palareti, Aldopaolo and Paolo Puliti and Guido Tascini and Primo Zingaretti} }