@article {, title = {Heartbeat detection by Laser Doppler Vibrometry and Machine Learning}, journal = {Sensors}, year = {2020}, doi = {http://dx.doi.org/10.3390/s20185362}, author = {Luca Antognoli and Sara Moccia and Lucia Migliorelli and Sara Casaccia and Lorenzo Scalise and Emanuele Frontoni} } @conference {liciotti2017hmm, title = {HMM-based activity recognition with a ceiling RGB-D camera}, booktitle = {ICPRAM (International Conference on Pattern Recognition Applications and Methods)}, year = {2017}, month = {02/2017}, abstract = {

Automated recognition of Activities of Daily Living allows to identify possible health problems and apply corrective strategies in Ambient Assisted Living (AAL). Activities of Daily Living analysis can provide very useful information for elder care and long-term care services. This paper presents an automated RGB-D video analysis system that recognises human ADLs activities, related to classical daily actions. The main goal is to predict the probability of an analysed subject action. Thus, the abnormal behaviour can be detected. The activity detection and recognition is performed using an affordable RGB-D camera. Human activities, despite their unstructured nature, tend to have a natural hierarchical structure; for instance, generally making a coffee involves a three-step process of turning on the coffee machine, putting sugar in cup and opening the fridge for milk. Action sequence recognition is then handled using a discriminative Hidden Markov Model (HMM). RADiaL, a dataset with RGB-D images and 3D position of each person for training as well as evaluating the HMM, has been built and made publicly available.

}, author = {Daniele Liciotti and Emanuele Frontoni and Primo Zingaretti and Bellotto, Nicola and Duckett, Tom} } @conference {Mancini2015534, title = {High-resolution mapping of river and estuary areas by using unmanned aerial and surface platforms}, booktitle = {2015 International Conference on Unmanned Aircraft Systems, ICUAS 2015}, year = {2015}, note = {cited By 0}, pages = {534-542}, doi = {10.1109/ICUAS.2015.7152333}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84941111909\&partnerID=40\&md5=994c13558511f25e91843621dde17a5c}, author = {Adriano Mancini and Emanuele Frontoni and Primo Zingaretti and Sauro Longhi} } @conference {liciotti2015human, title = {Human activity analysis for in-home fall risk assessment}, booktitle = {Communication Workshop (ICCW), 2015 IEEE International Conference on}, year = {2015}, pages = {284{\textendash}289}, publisher = {IEEE}, organization = {IEEE}, author = {Daniele Liciotti and Gionata Massi and Emanuele Frontoni and Adriano Mancini and Primo Zingaretti} } @conference {Frontoni2014, title = {A heuristic approach to evaluate occurrences of products for the planogram maintenance}, booktitle = {MESA 2014 - 10th IEEE/ASME International Conference on Mechatronic and Embedded Systems and Applications, Conference Proceedings}, year = {2014}, note = {cited By 0}, abstract = {Planogram is a detailed visual map of products in a retail store and establishes the position of products in order to increase sales and to supply the best location for suppliers. So, the aims of {\textquoteright}correct{\textquoteright} planogram are several and are: increasing sales, increasing profits, introducing a new item, supporting an innovative merchandising approach, and better manages the shelves. Deviating from the planogram defeats the purpose of any of those goals. A fundamental aspect in the retail operations is to maintain the integrity of the planogram. This work intends to provide a solution to this problem, proposing a system that individually identifies the presence of a specific product in the image of a shelf. This even though the product is moved, rotated, misplaced, or even in poor lighting conditions. This paper presents a method to find and count multiple instances of the same object that occurs into an image of a shelf in a store without using classifiers. The procedures here described are based on a heuristic algorithm that involves morphological operation, template matching and histogram comparison. Experimental results are presented in order to verify the effectiveness of the proposed approach. They demonstrate that the algorithm provides satisfactory results when the user manually chooses the most significant label of the product to find in the shelf image.}, doi = {10.1109/MESA.2014.6935615}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84911965097\&partnerID=40\&md5=0de9992bbb2157ad432a05fee65d6dfc}, author = {Emanuele Frontoni and Marco Contigiani and Ribighini, G.} } @article {De Giovanni2013627, title = {A heuristic and an exact method for the gate matrix connection cost minimization problem}, journal = {International Transactions in Operational Research}, volume = {20}, number = {5}, year = {2013}, note = {cited By 1}, pages = {627-643}, abstract = {In many applications, a sequencing of patterns (electronic circuit nodes, cutting patterns, product orders, etc.) has to be found in order to optimize some given objective function, giving rise to the so-called open stack problems. We focus on a problem related to the optimization of gate matrix layouts: electronic circuits are obtained by connecting gates and one seeks a gate layout permutation that minimizes connection costs under restrictions on the circuit area. In the literature, the connection costs and circuit area are also known as time of open stacks and maximum number of open stacks, respectively. We propose a genetic algorithm providing heuristic solutions and a branch-and-cut algorithm based on a new linear integer programming formulation that represents, to the best of our knowledge, the first exact method proposed in the literature. The algorithms have been tested on real instances and on data sets from the literature. The computational results give evidence that the proposed methods provide solutions that improve the ones found by the approaches presented in the literature. {\textcopyright} 2013 International Federation of Operational Research Societies.}, doi = {10.1111/itor.12025}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84881022755\&partnerID=40\&md5=243ec854395721c11c14eec4036e7a7b}, author = {De Giovanni, L. and Gionata Massi and F. Pezzella and Pfetsch, M.E. and Rinaldi, G. and Ventura, P.d} } @article {Malinverni20111025, title = {Hybrid object-based approach for land use/land cover mapping using high spatial resolution imagery}, journal = {International Journal of Geographical Information Science}, volume = {25}, number = {6}, year = {2011}, note = {cited By 13}, pages = {1025-1043}, abstract = {Traditionally, remote sensing has employed pixel-based classification techniques to deal with land use/land cover (LULC) studies. Generally, pixel-based approaches have been proven to work well with low spatial resolution imagery (e.g. Landsat or System Pour L{\textquoteright}Observation de la Terre sensors). Now, however, commercially available high spatial resolution images (e.g. aerial Leica ADS40 and Vexcel UltraCam sensors, and satellite IKONOS, Quickbird, GeoEye and WorldView sensors) can be problematic for pixel-based analysis due to their tendency to oversample the scene. This is driving research towards object-based approaches. This article proposes a hybrid classification method with the aim of incorporating the advantages of supervised pixel-based classification into object-based approaches. The method has been developed for medium- scale (1:10,000) LULC mapping using ADS40 imagery with 1 m ground sampling distance. First, spatial information is incorporated into a pixel-based classification (AdaBoost classifier) by means of additional texture features (Haralick, Gabor, Law features), which can be selected {\textquoteright}ad hoc{\textquoteright} according to optimal training samples ({\textquoteright}Relief-F{\textquoteright} pproach,Mahalanobis distances). Then a rule-based approach sorts segmented regions into thematic CORINE Land Cover classes in terms of membership class percentages (a modified Winner-Takes-All approach) and shape parameters. Finally, ancillary data (roads, rivers, etc.) are exploited to increase classification accuracy. The experimental results show that the proposed hybrid approach allows the extraction of more LULC classes than conventional pixel-based methods, while improving classification accuracy considerably. A second contribution of this article is the assessment of classification reliability by implementing a stability map, in addition to confusion matrices. {\textcopyright} 2011 Taylor \& Francis.}, doi = {10.1080/13658816.2011.566569}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-79960685342\&partnerID=40\&md5=18767c8a88bf2abff53ef96ec138f0aa}, author = {Eva Savina Malinverni and Anna Nora Tassetti and Adriano Mancini and Primo Zingaretti and Emanuele Frontoni and A. Bernardini} } @conference {Dragoni2010185, title = {Hybrid System for a never-ending unsupervised learning}, booktitle = {2010 10th International Conference on Hybrid Intelligent Systems, HIS 2010}, year = {2010}, note = {cited By 0}, pages = {185-190}, abstract = {We propose a Hybrid System for dynamic environments, where a "Multiple Neural Networks" system works with Bayes Rule. One or more neural nets may no longer be able to properly operate, due to partial changes in some of the characteristics of the individuals. We assume that each expert network has a reliability factor that can be dynamically re-evaluated on the ground of the global recognition operated by the overall group. Since the net{\textquoteright}s degree of reliability is defined as the probability that the net is giving the desired output, in case of conflicts between the outputs of the various nets the re-evaluation of their degrees of reliability can be simply performed on the basis of the Bayes Rule. The new vector of reliability will be used for making the final choice, by applying two algorithms, the "Inclusion based" and the "Weighted" one over all the maximally consistent subsets of the global outcome. {\textcopyright} 2010 IEEE.}, doi = {10.1109/HIS.2010.5601070}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-78650122885\&partnerID=40\&md5=c1f15dbf9914a0b618932ba197795fa9}, author = {Dragoni, A.F. and Vallesi, G. and Paola Baldassarri} } @article {Dragoni2010296, title = {An hybrid system for continuous learning}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {6077 LNAI}, number = {PART 2}, year = {2010}, note = {cited By 0}, pages = {296-303}, abstract = {We propose a Multiple Neural Networks system for dynamic environments, where one or more neural nets could no longer be able to properly operate, due to partial changes in some of the characteristics of the individuals. We assume that each expert network has a reliability factor that can be dynamically re-evaluated on the ground of the global recognition operated by the overall group. Since the net{\textquoteright}s degree of reliability is defined as the probability that the net is giving the desired output, in case of conflicts between the outputs of the various nets the re-evaluation of their degrees of reliability can be simply performed on the basis of the Bayes Rule. The new vector of reliability will be used for making the final choice, by applying two algorithms, the Inclusion based and the Weighted one over all the maximally consistent subsets of the global outcome. {\textcopyright} 2010 Springer-Verlag.}, doi = {10.1007/978-3-642-13803-4_37}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-77954587081\&partnerID=40\&md5=623ab72ce6e0ef83d2087d387550b607}, author = {Dragoni, A.F. and Vallesi, G. and Paola Baldassarri and Mazzieri, M.} } @article {Zingaretti2009500, title = {A hybrid approach to land cover classification from multi spectral images}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {5716 LNCS}, year = {2009}, note = {cited By 5}, pages = {500-508}, abstract = {This work is part of a wider project whose general objective is to develop a methodology for the automatic classification, based on CORINE land-cover (CLC) classes, of high resolution multispectral IKONOS images. The specific objective of this paper is to describe a new methodology for producing really exploitable results from automatic classification algorithms. Input data are basically constituted by multispectral images, integrated with textural and contextual measures. The output is constituted by an image with each pixel assigned to one out of 15 classes at the second level of the CLC legend or let unclassified (somehow a better solution than a classification error), plus a stability map that helps users to separate the regions classified with high accuracy from those whose classification result should be verified before being used. {\textcopyright} 2009 Springer Berlin Heidelberg.}, doi = {10.1007/978-3-642-04146-4_54}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-76249123810\&partnerID=40\&md5=fcaeb5e46615d9bbdf96fcbf4ffac41a}, author = {Primo Zingaretti and Emanuele Frontoni and Eva Savina Malinverni and Adriano Mancini} } @conference {Tascini1993126, title = {Handwritten character recognition using background analysis}, booktitle = {Proceedings of SPIE - The International Society for Optical Engineering}, volume = {1906}, year = {1993}, note = {cited By 0}, pages = {126-133}, abstract = {The paper describes a low-cost handwritten character recognizer. It is constituted by three modules: the {\textquoteleft}acquisition{\textquoteright} module, the {\textquoteleft}binarization{\textquoteright} module, and the {\textquoteleft}core{\textquoteright} module. The core module can be logically partitioned into six steps: character dilation, character circumscription, region and {\textquoteleft}profile{\textquoteright} analysis, {\textquoteleft}cut{\textquoteright} analysis, decision tree descent, and result validation. Firstly, it reduces the resolution of the binarized regions and detects the minimum rectangle (MR) which encloses the character; the MR partitions the background into regions that surround the character or are enclosed by it, and allows it to define features as {\textquoteleft}profiles{\textquoteright} and {\textquoteleft}cuts;{\textquoteright} a {\textquoteleft}profile{\textquoteright} is the set of vertical or horizontal minimum distances between a side of the MR and the character itself; a {\textquoteleft}cut{\textquoteright} is a vertical or horizontal image segment delimited by the MR. Then, the core module classifies the character by descending along the decision tree on the basis of the analysis of regions around the character, in particular of the {\textquoteleft}profiles{\textquoteright} and {\textquoteleft}cuts,{\textquoteright} and without using context information. Finally, it recognizes the character or reactivates the core module by analyzing validation test results. The recognizer is largely insensible to character discontinuity and is able to detect Arabic numerals and English alphabet capital letters. The recognition rate of a 32 {\texttimes} 32 pixel character is of about 97\% after the first iteration, and of over 98\% after the second iteration.}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0027277522\&partnerID=40\&md5=06fd7da850b665db5896240114bab4e7}, author = {Guido Tascini and Paolo Puliti and Primo Zingaretti} }